From a03030270decf530fa9c598eb95c274aa49a3e3a Mon Sep 17 00:00:00 2001 From: ZahraAssadizadeh <152871406+ZahraAssdizadeh@users.noreply.github.com> Date: Mon, 24 Jun 2024 06:02:46 +0200 Subject: [PATCH] Release 20.14.0 (#8) * PE-2406: Upgrade to v20.14.0 * PE-2467: Re-add old IRSA policy --- .gitattributes | 1 + .github/workflows/lock.yml | 2 +- .github/workflows/pr-title.yml | 2 +- .github/workflows/pre-commit.yml | 36 +- .github/workflows/release.yml | 12 +- .github/workflows/stale-actions.yaml | 2 +- .pre-commit-config.yaml | 10 +- .spacelift/config.yml | 2 +- CHANGELOG.md | 291 ++++++++ README.md | 255 +++---- docs/README.md | 2 +- docs/UPGRADE-18.0.md | 8 + docs/UPGRADE-19.0.md | 10 +- docs/UPGRADE-20.0.md | 270 ++++++++ docs/compute_resources.md | 9 +- docs/faq.md | 248 ++++++- docs/irsa_integration.md | 84 --- docs/user_data.md | 17 +- examples/complete/README.md | 107 --- examples/complete/main.tf | 469 ------------- examples/complete/outputs.tf | 192 ----- examples/complete/versions.tf | 14 - examples/eks_managed_node_group/README.md | 20 +- examples/eks_managed_node_group/main.tf | 287 ++++---- examples/eks_managed_node_group/outputs.tf | 28 +- examples/eks_managed_node_group/versions.tf | 8 +- examples/fargate_profile/README.md | 16 +- examples/fargate_profile/main.tf | 55 +- examples/fargate_profile/outputs.tf | 28 +- examples/fargate_profile/versions.tf | 12 +- examples/karpenter/README.md | 82 ++- examples/karpenter/main.tf | 223 +++--- examples/karpenter/outputs.tf | 60 +- examples/karpenter/versions.tf | 16 +- examples/outposts/README.md | 18 +- examples/outposts/main.tf | 21 +- examples/outposts/outputs.tf | 28 +- examples/outposts/prerequisites/main.tf | 6 +- examples/outposts/prerequisites/versions.tf | 4 +- examples/outposts/versions.tf | 6 +- examples/self_managed_node_group/README.md | 17 +- examples/self_managed_node_group/main.tf | 187 +++-- examples/self_managed_node_group/outputs.tf | 28 +- examples/self_managed_node_group/versions.tf | 8 +- examples/user_data/README.md | 88 ++- examples/user_data/main.tf | 458 ++++++++++-- examples/user_data/outputs.tf | 212 ++++-- .../rendered/al2/eks-mng-additional.txt | 11 + .../rendered/al2/eks-mng-custom-ami-ipv6.sh | 8 + .../rendered/al2/eks-mng-custom-ami.sh | 8 + .../rendered/al2/eks-mng-custom-template.sh | 12 + .../rendered/al2/eks-mng-no-op.sh} | 0 .../rendered/al2/self-mng-bootstrap-ipv6.sh | 9 + .../rendered/al2/self-mng-bootstrap.sh | 9 + .../rendered/al2/self-mng-custom-template.sh | 12 + .../user_data/rendered/al2/self-mng-no-op.sh | 0 .../rendered/al2023/eks-mng-additional.txt | 19 + .../rendered/al2023/eks-mng-custom-ami.txt | 41 ++ .../al2023/eks-mng-custom-template.txt | 45 ++ .../rendered/al2023/eks-mng-no-op.txt | 0 .../rendered/al2023/self-mng-bootstrap.txt | 41 ++ .../al2023/self-mng-custom-template.txt | 45 ++ .../rendered/al2023/self-mng-no-op.txt | 0 .../bottlerocket/eks-mng-additional.toml | 3 + .../bottlerocket/eks-mng-custom-ami.toml | 8 + .../bottlerocket/eks-mng-custom-template.toml | 9 + .../rendered/bottlerocket/eks-mng-no-op.toml | 0 .../bottlerocket/self-mng-bootstrap.toml | 8 + .../self-mng-custom-template.toml | 9 + .../rendered/bottlerocket/self-mng-no-op.toml | 0 .../rendered/windows/eks-mng-additional.ps1 | 1 + .../rendered/windows/eks-mng-custom-ami.ps1 | 9 + .../windows/eks-mng-custom-template.ps1 | 10 + .../rendered/windows/eks-mng-no-op.ps1 | 0 .../rendered/windows/self-mng-bootstrap.ps1 | 9 + .../windows/self-mng-custom-template.ps1 | 10 + .../rendered/windows/self-mng-no-op.ps1 | 0 .../user_data/templates/al2023_custom.tpl | 15 + examples/user_data/templates/linux_custom.tpl | 3 +- examples/user_data/versions.tf | 9 +- main.tf | 308 ++++---- modules/_user_data/README.md | 23 +- modules/_user_data/main.tf | 148 ++-- modules/_user_data/outputs.tf | 7 +- modules/_user_data/variables.tf | 57 +- modules/_user_data/versions.tf | 6 +- modules/aws-auth/README.md | 81 +++ modules/aws-auth/main.tf | 47 ++ modules/aws-auth/outputs.tf | 0 modules/aws-auth/variables.tf | 39 ++ modules/aws-auth/versions.tf | 10 + modules/eks-managed-node-group/README.md | 35 +- modules/eks-managed-node-group/main.tf | 191 ++++- modules/eks-managed-node-group/migrations.tf | 20 + modules/eks-managed-node-group/outputs.tf | 9 + modules/eks-managed-node-group/variables.tf | 69 +- modules/eks-managed-node-group/versions.tf | 4 +- modules/fargate-profile/README.md | 9 +- modules/fargate-profile/main.tf | 37 +- modules/fargate-profile/migrations.tf | 15 + modules/fargate-profile/variables.tf | 2 +- modules/fargate-profile/versions.tf | 4 +- modules/karpenter/README.md | 155 ++--- modules/karpenter/main.tf | 655 ++++++++++++++---- modules/karpenter/migrations.tf | 77 ++ modules/karpenter/outputs.tf | 61 +- modules/karpenter/variables.tf | 175 +++-- modules/karpenter/versions.tf | 4 +- modules/self-managed-node-group/README.md | 35 +- modules/self-managed-node-group/main.tf | 291 ++++++-- modules/self-managed-node-group/migrations.tf | 20 + modules/self-managed-node-group/outputs.tf | 31 +- modules/self-managed-node-group/variables.tf | 128 +++- modules/self-managed-node-group/versions.tf | 4 +- node_groups.tf | 88 ++- outputs.tf | 60 +- templates/al2023_user_data.tpl | 11 + templates/aws_auth_cm.tpl | 37 - templates/bottlerocket_user_data.tpl | 1 + templates/linux_user_data.tpl | 6 +- templates/windows_user_data.tpl | 4 + variables.tf | 116 ++-- versions.tf | 8 +- 123 files changed, 4894 insertions(+), 2516 deletions(-) create mode 100644 .gitattributes create mode 100644 docs/UPGRADE-20.0.md delete mode 100644 docs/irsa_integration.md delete mode 100644 examples/complete/README.md delete mode 100644 examples/complete/main.tf delete mode 100644 examples/complete/outputs.tf delete mode 100644 examples/complete/versions.tf create mode 100755 examples/user_data/rendered/al2/eks-mng-additional.txt create mode 100755 examples/user_data/rendered/al2/eks-mng-custom-ami-ipv6.sh create mode 100755 examples/user_data/rendered/al2/eks-mng-custom-ami.sh create mode 100755 examples/user_data/rendered/al2/eks-mng-custom-template.sh rename examples/{complete/variables.tf => user_data/rendered/al2/eks-mng-no-op.sh} (100%) mode change 100644 => 100755 create mode 100755 examples/user_data/rendered/al2/self-mng-bootstrap-ipv6.sh create mode 100755 examples/user_data/rendered/al2/self-mng-bootstrap.sh create mode 100755 examples/user_data/rendered/al2/self-mng-custom-template.sh create mode 100755 examples/user_data/rendered/al2/self-mng-no-op.sh create mode 100755 examples/user_data/rendered/al2023/eks-mng-additional.txt create mode 100755 examples/user_data/rendered/al2023/eks-mng-custom-ami.txt create mode 100755 examples/user_data/rendered/al2023/eks-mng-custom-template.txt create mode 100755 examples/user_data/rendered/al2023/eks-mng-no-op.txt create mode 100755 examples/user_data/rendered/al2023/self-mng-bootstrap.txt create mode 100755 examples/user_data/rendered/al2023/self-mng-custom-template.txt create mode 100755 examples/user_data/rendered/al2023/self-mng-no-op.txt create mode 100755 examples/user_data/rendered/bottlerocket/eks-mng-additional.toml create mode 100755 examples/user_data/rendered/bottlerocket/eks-mng-custom-ami.toml create mode 100755 examples/user_data/rendered/bottlerocket/eks-mng-custom-template.toml create mode 100755 examples/user_data/rendered/bottlerocket/eks-mng-no-op.toml create mode 100755 examples/user_data/rendered/bottlerocket/self-mng-bootstrap.toml create mode 100755 examples/user_data/rendered/bottlerocket/self-mng-custom-template.toml create mode 100755 examples/user_data/rendered/bottlerocket/self-mng-no-op.toml create mode 100755 examples/user_data/rendered/windows/eks-mng-additional.ps1 create mode 100755 examples/user_data/rendered/windows/eks-mng-custom-ami.ps1 create mode 100755 examples/user_data/rendered/windows/eks-mng-custom-template.ps1 create mode 100755 examples/user_data/rendered/windows/eks-mng-no-op.ps1 create mode 100755 examples/user_data/rendered/windows/self-mng-bootstrap.ps1 create mode 100755 examples/user_data/rendered/windows/self-mng-custom-template.ps1 create mode 100755 examples/user_data/rendered/windows/self-mng-no-op.ps1 create mode 100644 examples/user_data/templates/al2023_custom.tpl create mode 100644 modules/aws-auth/README.md create mode 100644 modules/aws-auth/main.tf create mode 100644 modules/aws-auth/outputs.tf create mode 100644 modules/aws-auth/variables.tf create mode 100644 modules/aws-auth/versions.tf create mode 100644 modules/eks-managed-node-group/migrations.tf create mode 100644 modules/fargate-profile/migrations.tf create mode 100644 modules/karpenter/migrations.tf create mode 100644 modules/self-managed-node-group/migrations.tf create mode 100644 templates/al2023_user_data.tpl delete mode 100644 templates/aws_auth_cm.tpl diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..176a458f94 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml index 6b6c9cec02..bd5f2df7cb 100644 --- a/.github/workflows/lock.yml +++ b/.github/workflows/lock.yml @@ -8,7 +8,7 @@ jobs: lock: runs-on: ubuntu-latest steps: - - uses: dessant/lock-threads@v4 + - uses: dessant/lock-threads@v5 with: github-token: ${{ secrets.GITHUB_TOKEN }} issue-comment: > diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml index cb32a0f815..3973df4438 100644 --- a/.github/workflows/pr-title.yml +++ b/.github/workflows/pr-title.yml @@ -14,7 +14,7 @@ jobs: steps: # Please look up the latest version from # https://github.com/amannn/action-semantic-pull-request/releases - - uses: amannn/action-semantic-pull-request@v5.0.2 + - uses: amannn/action-semantic-pull-request@v5.4.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 58820b6342..c2632d1a44 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -8,7 +8,7 @@ on: env: TERRAFORM_DOCS_VERSION: v0.16.0 - TFLINT_VERSION: v0.44.1 + TFLINT_VERSION: v0.50.3 jobs: collectInputs: @@ -18,11 +18,11 @@ jobs: directories: ${{ steps.dirs.outputs.directories }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get root directories id: dirs - uses: clowdhaus/terraform-composite-actions/directories@v1.8.0 + uses: clowdhaus/terraform-composite-actions/directories@v1.9.0 preCommitMinVersions: name: Min TF pre-commit @@ -32,19 +32,27 @@ jobs: matrix: directory: ${{ fromJson(needs.collectInputs.outputs.directories) }} steps: + # https://github.com/orgs/community/discussions/25678#discussioncomment-5242449 + - name: Delete huge unnecessary tools folder + run: | + rm -rf /opt/hostedtoolcache/CodeQL + rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + rm -rf /opt/hostedtoolcache/Ruby + rm -rf /opt/hostedtoolcache/go + - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Terraform min/max versions id: minMax - uses: clowdhaus/terraform-min-max@v1.2.0 + uses: clowdhaus/terraform-min-max@v1.3.0 with: directory: ${{ matrix.directory }} - name: Pre-commit Terraform ${{ steps.minMax.outputs.minVersion }} # Run only validate pre-commit check on min version supported if: ${{ matrix.directory != '.' }} - uses: clowdhaus/terraform-composite-actions/pre-commit@v1.8.0 + uses: clowdhaus/terraform-composite-actions/pre-commit@v1.9.0 with: terraform-version: ${{ steps.minMax.outputs.minVersion }} tflint-version: ${{ env.TFLINT_VERSION }} @@ -53,7 +61,7 @@ jobs: - name: Pre-commit Terraform ${{ steps.minMax.outputs.minVersion }} # Run only validate pre-commit check on min version supported if: ${{ matrix.directory == '.' }} - uses: clowdhaus/terraform-composite-actions/pre-commit@v1.8.0 + uses: clowdhaus/terraform-composite-actions/pre-commit@v1.9.0 with: terraform-version: ${{ steps.minMax.outputs.minVersion }} tflint-version: ${{ env.TFLINT_VERSION }} @@ -64,18 +72,26 @@ jobs: runs-on: ubuntu-latest needs: collectInputs steps: + # https://github.com/orgs/community/discussions/25678#discussioncomment-5242449 + - name: Delete huge unnecessary tools folder + run: | + rm -rf /opt/hostedtoolcache/CodeQL + rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + rm -rf /opt/hostedtoolcache/Ruby + rm -rf /opt/hostedtoolcache/go + - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.ref }} repository: ${{github.event.pull_request.head.repo.full_name}} - name: Terraform min/max versions id: minMax - uses: clowdhaus/terraform-min-max@v1.2.0 + uses: clowdhaus/terraform-min-max@v1.3.0 - name: Pre-commit Terraform ${{ steps.minMax.outputs.maxVersion }} - uses: clowdhaus/terraform-composite-actions/pre-commit@v1.8.0 + uses: clowdhaus/terraform-composite-actions/pre-commit@v1.9.0 with: terraform-version: ${{ steps.minMax.outputs.maxVersion }} tflint-version: ${{ env.TFLINT_VERSION }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 81f674740b..4a9422614e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,18 +20,18 @@ jobs: if: github.repository_owner == 'terraform-aws-modules' steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: persist-credentials: false fetch-depth: 0 - name: Release - uses: cycjimmy/semantic-release-action@v3 + uses: cycjimmy/semantic-release-action@v4 with: - semantic_version: 18.0.0 + semantic_version: 23.0.2 extra_plugins: | - @semantic-release/changelog@6.0.0 - @semantic-release/git@10.0.0 - conventional-changelog-conventionalcommits@4.6.3 + @semantic-release/changelog@6.0.3 + @semantic-release/git@10.0.1 + conventional-changelog-conventionalcommits@7.0.2 env: GITHUB_TOKEN: ${{ secrets.SEMANTIC_RELEASE_TOKEN }} diff --git a/.github/workflows/stale-actions.yaml b/.github/workflows/stale-actions.yaml index 50379957fd..6ccd0ed856 100644 --- a/.github/workflows/stale-actions.yaml +++ b/.github/workflows/stale-actions.yaml @@ -7,7 +7,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v6 + - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} # Staling issues and PR's diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dabb150840..06efda4811 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,9 +1,8 @@ repos: - repo: https://github.com/antonbabenko/pre-commit-terraform - rev: v1.80.0 + rev: v1.91.0 hooks: - id: terraform_fmt - - id: terraform_validate - id: terraform_docs args: - '--args=--lockfile=false' @@ -22,8 +21,13 @@ repos: - '--args=--only=terraform_required_providers' - '--args=--only=terraform_standard_module_structure' - '--args=--only=terraform_workspace_remote' + - '--args=--only=terraform_unused_required_providers' + - id: terraform_validate - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.6.0 hooks: - id: check-merge-conflict - id: end-of-file-fixer + - id: trailing-whitespace + - id: mixed-line-ending + args: [--fix=lf] diff --git a/.spacelift/config.yml b/.spacelift/config.yml index da9e57ea69..34748135dc 100644 --- a/.spacelift/config.yml +++ b/.spacelift/config.yml @@ -1,2 +1,2 @@ version: 1 -module_version: 19.15.3 +module_version: 20.14.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 062b48ec60..d0b114602b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,297 @@ All notable changes to this project will be documented in this file. +## [20.14.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.13.1...v20.14.0) (2024-06-13) + + +### Features + +* Require users to supply OS via `ami_type` and not via `platform` which is unable to distinquish between the number of variants supported today ([#3068](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3068)) ([ef657bf](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/ef657bfcb51296841f14cf514ffefb1066f810ee)) + +## [20.13.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.13.0...v20.13.1) (2024-06-04) + + +### Bug Fixes + +* Correct syntax for correctly ignoring `bootstrap_cluster_creator_admin_permissions` and not all of `access_config` ([#3056](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3056)) ([1e31929](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/1e319290445a6eb50b53dfb89c9ae9f2949d38d7)) + +## [20.13.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.12.0...v20.13.0) (2024-05-31) + + +### Features + +* Starting with `1.30`, do not use the cluster OIDC issuer URL by default in the identity provider config ([#3055](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3055)) ([00f076a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/00f076ada4cd78c5c34b8be6e8eba44b628b629a)) + +## [20.12.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.11.1...v20.12.0) (2024-05-28) + + +### Features + +* Support additional cluster DNS IPs with Bottlerocket based AMIs ([#3051](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3051)) ([541dbb2](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/541dbb29f12bb763a34b32acdaea9cea12d7f543)) + +## [20.11.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.11.0...v20.11.1) (2024-05-21) + + +### Bug Fixes + +* Ignore changes to `bootstrap_cluster_creator_admin_permissions` which is disabled by default ([#3042](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3042)) ([c65d308](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c65d3085037d9c1c87f4fd3a5be1ca1d732dbf7a)) + +## [20.11.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.10.0...v20.11.0) (2024-05-16) + + +### Features + +* Add `SourceArn` condition to Fargate profile trust policy ([#3039](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3039)) ([a070d7b](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/a070d7b2bd92866b91e0963a0f819eec9839ed03)) + +## [20.10.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.9.0...v20.10.0) (2024-05-09) + + +### Features + +* Add support for Pod Identity assocation on Karpenter sub-module ([#3031](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3031)) ([cfcaf27](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/cfcaf27ac78278916ebf3d51dc64a20fe0d7bf01)) + +## [20.9.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.5...v20.9.0) (2024-05-08) + + +### Features + +* Propagate `ami_type` to self-managed node group; allow using `ami_type` only ([#3030](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3030)) ([74d3918](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/74d39187d855932dd976da6180eda42dcfe09873)) + +## [20.8.5](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.4...v20.8.5) (2024-04-08) + + +### Bug Fixes + +* Forces cluster outputs to wait until access entries are complete ([#3000](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/3000)) ([e2a39c0](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/e2a39c0f261d776e4e18a650aa9068429c4f5ef4)) + +## [20.8.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.3...v20.8.4) (2024-03-21) + + +### Bug Fixes + +* Pass nodeadm user data variables from root module down to nodegroup sub-modules ([#2981](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2981)) ([84effa0](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/84effa0e30f64ba2fceb7f89c2a822e92f1ee1ea)) + +## [20.8.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.2...v20.8.3) (2024-03-12) + + +### Bug Fixes + +* Ensure the correct service CIDR and IP family is used in the rendered user data ([#2963](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2963)) ([aeb9f0c](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/aeb9f0c990b259320a6c3e5ff93be3f064bb9238)) + +## [20.8.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.1...v20.8.2) (2024-03-11) + + +### Bug Fixes + +* Ensure a default `ip_family` value is provided to guarantee a CNI policy is attached to nodes ([#2967](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2967)) ([29dcca3](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/29dcca335d80e248c57b8efa2c36aaef2e1b1bd2)) + +## [20.8.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.8.0...v20.8.1) (2024-03-10) + + +### Bug Fixes + +* Do not attach policy if Karpenter node role is not created by module ([#2964](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2964)) ([3ad19d7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/3ad19d7435f34600e4872fd131e155583e498cd9)) + +## [20.8.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.7.0...v20.8.0) (2024-03-10) + + +### Features + +* Replace the use of `toset()` with static keys for node IAM role policy attachment ([#2962](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2962)) ([57f5130](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/57f5130132ca11fd3e478a61a8fc082a929540c2)) + +## [20.7.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.6.0...v20.7.0) (2024-03-09) + + +### Features + +* Add supprot for creating placement group for managed node group ([#2959](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2959)) ([3031631](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/30316312f33fe7fd09faf86fdb1b01ab2a377b2a)) + +## [20.6.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.5.3...v20.6.0) (2024-03-09) + + +### Features + +* Add support for tracking latest AMI release version on managed nodegroups ([#2951](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2951)) ([393da7e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/393da7ec0ed158cf783356ab10959d91430c1d80)) + +## [20.5.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.5.2...v20.5.3) (2024-03-08) + + +### Bug Fixes + +* Update AWS provider version to support `AL2023_*` AMI types; ensure AL2023 user data receives cluster service CIDR ([#2960](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2960)) ([dfe4114](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/dfe41141c2385db783d97494792c8f2e227cfc7c)) + +## [20.5.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.5.1...v20.5.2) (2024-03-07) + + +### Bug Fixes + +* Use the `launch_template_tags` on the launch template ([#2957](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2957)) ([0ed32d7](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/0ed32d7b291513f34775ca85b0aa33da085d09fa)) + +## [20.5.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.5.0...v20.5.1) (2024-03-07) + + +### Bug Fixes + +* Update CI workflow versions to remove deprecated runtime warnings ([#2956](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2956)) ([d14cc92](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/d14cc925c450451b023407d05a2516d7682d1617)) + +## [20.5.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.4.0...v20.5.0) (2024-03-01) + + +### Features + +* Add support for AL2023 `nodeadm` user data ([#2942](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2942)) ([7c99bb1](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7c99bb19cdbf1eb4f4543f9b8e6d29c3a6734a55)) + +## [20.4.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.3.0...v20.4.0) (2024-02-23) + + +### Features + +* Add support for enabling EFA resources ([#2936](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2936)) ([7f472ec](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7f472ec660049d4ca85de039cb3015c1b1d12fb8)) + +## [20.3.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.2.2...v20.3.0) (2024-02-21) + + +### Features + +* Add support for addon and identity provider custom tags ([#2938](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2938)) ([f6255c4](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f6255c49e47d44bd62bb2b4e1e448ac80ceb2b3a)) + +### [20.2.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.2.1...v20.2.2) (2024-02-21) + + +### Bug Fixes + +* Replace Karpenter SQS policy dynamic service princpal DNS suffixes with static `amazonaws.com` ([#2941](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2941)) ([081c762](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/081c7624a5a4f2b039370ae8eb9ee8e445d01c48)) + +### [20.2.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.2.0...v20.2.1) (2024-02-08) + + +### Bug Fixes + +* Karpenter `enable_spot_termination = false` should not result in an error ([#2907](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2907)) ([671fc6e](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/671fc6e627d957ada47ef3f33068d715e79d25d6)) + +## [20.2.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.1.1...v20.2.0) (2024-02-06) + + +### Features + +* Allow enable/disable of EKS pod identity for the Karpenter controller ([#2902](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2902)) ([cc6919d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/cc6919de811f3972815d4ca26e5e0c8f64c2b894)) + +### [20.1.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.1.0...v20.1.1) (2024-02-06) + + +### Bug Fixes + +* Update access entries `kubernetes_groups` default value to `null` ([#2897](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2897)) ([1e32e6a](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/1e32e6a9f8a389b1a4969dde697d34ba4e3c85ac)) + +## [20.1.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.0.1...v20.1.0) (2024-02-06) + + +### Features + +* Add output for `access_policy_associations` ([#2904](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2904)) ([0d2a4c2](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/0d2a4c2af3d7c8593226bbccbf8753950e741b15)) + +### [20.0.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v20.0.0...v20.0.1) (2024-02-03) + + +### Bug Fixes + +* Correct cluster access entry to create multiple policy associations per access entry ([#2892](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2892)) ([4177913](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/417791374cf72dfb673105359463398eb4a75d6e)) + +## [20.0.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.21.0...v20.0.0) (2024-02-02) + + +### ⚠ BREAKING CHANGES + +* Replace the use of `aws-auth` configmap with EKS cluster access entry (#2858) + +### Features + +* Replace the use of `aws-auth` configmap with EKS cluster access entry ([#2858](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2858)) ([6b40bdb](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/6b40bdbb1d283d9259f43b03d24dca99cc1eceff)) + +## [19.21.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.20.0...v19.21.0) (2023-12-11) + + +### Features + +* Add tags for CloudWatch log group only ([#2841](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2841)) ([4c5c97b](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/4c5c97b5d404a4e46945e3b6228d469743669937)) + +## [19.20.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.19.1...v19.20.0) (2023-11-14) + + +### Features + +* Allow OIDC root CA thumbprint to be included/excluded ([#2778](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2778)) ([091c680](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/091c68051d9cbf24644121a24c715307f00c44b3)) + +### [19.19.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.19.0...v19.19.1) (2023-11-10) + + +### Bug Fixes + +* Remove additional conditional on Karpenter instance profile creation to support upgrading ([#2812](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2812)) ([c36c8dc](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c36c8dc825aa09e2ded20ff675905aa8857853cf)) + +## [19.19.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.18.0...v19.19.0) (2023-11-04) + + +### Features + +* Update KMS module to avoid calling data sources when `create_kms_key = false` ([#2804](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2804)) ([0732bea](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/0732bea85f46fd2629705f9ee5f87cb695ee95e5)) + +## [19.18.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.17.4...v19.18.0) (2023-11-01) + + +### Features + +* Add Karpenter v1beta1 compatibility ([#2800](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2800)) ([aec2bab](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/aec2bab1d8da89b65b84d11fef77cbc969fccc91)) + +### [19.17.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.17.3...v19.17.4) (2023-10-30) + + +### Bug Fixes + +* Updating license_specification result type ([#2798](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2798)) ([ba0ebeb](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/ba0ebeb11a64a6400a3666165509975d5cdfea43)) + +### [19.17.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.17.2...v19.17.3) (2023-10-30) + + +### Bug Fixes + +* Correct key used on `license_configuration_arn` ([#2796](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2796)) ([bd4bda2](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/bd4bda266e23635c7ca09b6e9d307b29ef6b8579)) + +### [19.17.2](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.17.1...v19.17.2) (2023-10-10) + + +### Bug Fixes + +* Karpenter node IAM role policies variable should be a map of strings, not list ([#2771](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2771)) ([f4766e5](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/f4766e5c27f060e8c7f5950cf82d1fe59c3231af)) + +### [19.17.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.17.0...v19.17.1) (2023-10-06) + + +### Bug Fixes + +* Only include CA thumbprint in OIDC provider list ([#2769](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2769)) ([7e5de15](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/7e5de1566c7e1330c05c5e6c51f5ab4690001915)), closes [#2732](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2732) [#32847](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/32847) + +## [19.17.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.16.0...v19.17.0) (2023-10-06) + + +### Features + +* Add support for `allowed_instance_types` on self-managed nodegroup ASG ([#2757](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2757)) ([feee18d](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/feee18dd423b1e76f8a5119206f23306e5879b26)) + +## [19.16.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.15.4...v19.16.0) (2023-08-03) + + +### Features + +* Add `node_iam_role_arns` local variable to check for Windows platform on EKS managed nodegroups ([#2477](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2477)) ([adb47f4](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/adb47f46dc53b1a0c18691a59dc58401c327c0be)) + +### [19.15.4](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.15.3...v19.15.4) (2023-07-27) + + +### Bug Fixes + +* Use `coalesce` when desired default value is not `null` ([#2696](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/2696)) ([c86f8d4](https://github.com/terraform-aws-modules/terraform-aws-eks/commit/c86f8d4db3236e7dae59ef9142da4d7e496138c8)) + ### [19.15.3](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v19.15.2...v19.15.3) (2023-06-09) diff --git a/README.md b/README.md index a87400e1c0..124c57a830 100644 --- a/README.md +++ b/README.md @@ -8,17 +8,18 @@ Terraform module which creates AWS EKS (Kubernetes) resources - [Frequently Asked Questions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md) - [Compute Resources](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/compute_resources.md) -- [IRSA Integration](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/irsa_integration.md) - [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/user_data.md) - [Network Connectivity](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/network_connectivity.md) - Upgrade Guides - [Upgrade to v17.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-17.0.md) - [Upgrade to v18.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-18.0.md) - [Upgrade to v19.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-19.0.md) + - [Upgrade to v20.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-20.0.md) ### External Documentation Please note that we strive to provide a comprehensive suite of documentation for __*configuring and utilizing the module(s)*__ defined here, and that documentation regarding EKS (including EKS managed node group, self managed node group, and Fargate profile) and/or Kubernetes features, usage, etc. are better left up to their respective sources: + - [AWS EKS Documentation](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) - [Kubernetes Documentation](https://kubernetes.io/docs/home/) @@ -27,58 +28,17 @@ Please note that we strive to provide a comprehensive suite of documentation for The examples provided under `examples/` provide a comprehensive suite of configurations that demonstrate nearly all of the possible different configurations and settings that can be used with this module. However, these examples are not representative of clusters that you would normally find in use for production workloads. For reference architectures that utilize this module, please see the following: - [EKS Reference Architecture](https://github.com/clowdhaus/eks-reference-architecture) - -## Available Features - -- AWS EKS Cluster Addons -- AWS EKS Identity Provider Configuration -- [AWS EKS on Outposts support](https://aws.amazon.com/blogs/aws/deploy-your-amazon-eks-clusters-locally-on-aws-outposts/) -- All [node types](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html) are supported: - - [EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) - - [Self Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) - - [Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) -- Support for creating Karpenter related AWS infrastructure resources (e.g. IAM roles, SQS queue, EventBridge rules, etc.) -- Support for custom AMI, custom launch template, and custom user data including custom user data template -- Support for Amazon Linux 2 EKS Optimized AMI and Bottlerocket nodes - - Windows based node support is limited to a default user data template that is provided due to the lack of Windows support and manual steps required to provision Windows based EKS nodes -- Support for module created security group, bring your own security groups, as well as adding additional security group rules to the module created security group(s) -- Support for creating node groups/profiles separate from the cluster through the use of sub-modules (same as what is used by root module) -- Support for node group/profile "default" settings - useful for when creating multiple node groups/Fargate profiles where you want to set a common set of configurations once, and then individually control only select features on certain node groups/profiles - -### [IRSA Terraform Module](https://github.com/terraform-aws-modules/terraform-aws-iam/tree/master/modules/iam-role-for-service-accounts-eks) - -An IAM role for service accounts (IRSA) sub-module has been created to make deploying common addons/controllers easier. Instead of users having to create a custom IAM role with the necessary federated role assumption required for IRSA plus find and craft the associated policy required for the addon/controller, users can create the IRSA role and policy with a few lines of code. See the [`terraform-aws-iam/examples/iam-role-for-service-accounts`](https://github.com/terraform-aws-modules/terraform-aws-iam/blob/master/examples/iam-role-for-service-accounts-eks/main.tf) directory for examples on how to use the IRSA sub-module in conjunction with this (`terraform-aws-eks`) module. - -Some of the addon/controller policies that are currently supported include: - -- [Cert-Manager](https://cert-manager.io/docs/configuration/acme/dns01/route53/#set-up-an-iam-role) -- [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) -- [EBS CSI Driver](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/example-iam-policy.json) -- [EFS CSI Driver](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/docs/iam-policy-example.json) -- [External DNS](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md#iam-policy) -- [External Secrets](https://github.com/external-secrets/kubernetes-external-secrets#add-a-secret) -- [FSx for Lustre CSI Driver](https://github.com/kubernetes-sigs/aws-fsx-csi-driver/blob/master/docs/README.md) -- [Karpenter](https://github.com/aws/karpenter/blob/main/website/content/en/preview/getting-started/cloudformation.yaml) -- [Load Balancer Controller](https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/main/docs/install/iam_policy.json) - - [Load Balancer Controller Target Group Binding Only](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/installation/#iam-permission-subset-for-those-who-use-targetgroupbinding-only-and-dont-plan-to-use-the-aws-load-balancer-controller-to-manage-security-group-rules) -- [App Mesh Controller](https://github.com/aws/aws-app-mesh-controller-for-k8s/blob/master/config/iam/controller-iam-policy.json) - - [App Mesh Envoy Proxy](https://raw.githubusercontent.com/aws/aws-app-mesh-controller-for-k8s/master/config/iam/envoy-iam-policy.json) -- [Managed Service for Prometheus](https://docs.aws.amazon.com/prometheus/latest/userguide/set-up-irsa.html) -- [Node Termination Handler](https://github.com/aws/aws-node-termination-handler#5-create-an-iam-role-for-the-pods) -- [Velero](https://github.com/vmware-tanzu/velero-plugin-for-aws#option-1-set-permissions-with-an-iam-user) -- [VPC CNI](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html) - -See [terraform-aws-iam/modules/iam-role-for-service-accounts](https://github.com/terraform-aws-modules/terraform-aws-iam/tree/master/modules/iam-role-for-service-accounts-eks) for current list of supported addon/controller policies as more are added to the project. +- [EKS Blueprints](https://github.com/aws-ia/terraform-aws-eks-blueprints) ## Usage ```hcl module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 19.0" + version = "~> 20.0" cluster_name = "my-cluster" - cluster_version = "1.27" + cluster_version = "1.29" cluster_endpoint_public_access = true @@ -98,51 +58,13 @@ module "eks" { subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] control_plane_subnet_ids = ["subnet-xyzde987", "subnet-slkjf456", "subnet-qeiru789"] - # Self Managed Node Group(s) - self_managed_node_group_defaults = { - instance_type = "m6i.large" - update_launch_template_default_version = true - iam_role_additional_policies = { - AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" - } - } - - self_managed_node_groups = { - one = { - name = "mixed-1" - max_size = 5 - desired_size = 2 - - use_mixed_instances_policy = true - mixed_instances_policy = { - instances_distribution = { - on_demand_base_capacity = 0 - on_demand_percentage_above_base_capacity = 10 - spot_allocation_strategy = "capacity-optimized" - } - - override = [ - { - instance_type = "m5.large" - weighted_capacity = "1" - }, - { - instance_type = "m6i.large" - weighted_capacity = "2" - }, - ] - } - } - } - # EKS Managed Node Group(s) eks_managed_node_group_defaults = { instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] } eks_managed_node_groups = { - blue = {} - green = { + example = { min_size = 1 max_size = 10 desired_size = 1 @@ -152,47 +74,28 @@ module "eks" { } } - # Fargate Profile(s) - fargate_profiles = { - default = { - name = "default" - selectors = [ - { - namespace = "default" + # Cluster access entry + # To add the current caller identity as an administrator + enable_cluster_creator_admin_permissions = true + + access_entries = { + # One access entry with a policy associated + example = { + kubernetes_groups = [] + principal_arn = "arn:aws:iam::123456789012:role/something" + + policy_associations = { + example = { + policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" + access_scope = { + namespaces = ["default"] + type = "namespace" + } } - ] + } } } - # aws-auth configmap - manage_aws_auth_configmap = true - - aws_auth_roles = [ - { - rolearn = "arn:aws:iam::66666666666:role/role1" - username = "role1" - groups = ["system:masters"] - }, - ] - - aws_auth_users = [ - { - userarn = "arn:aws:iam::66666666666:user/user1" - username = "user1" - groups = ["system:masters"] - }, - { - userarn = "arn:aws:iam::66666666666:user/user2" - username = "user2" - groups = ["system:masters"] - }, - ] - - aws_auth_accounts = [ - "777777777777", - "888888888888", - ] - tags = { Environment = "dev" Terraform = "true" @@ -200,9 +103,72 @@ module "eks" { } ``` +### Cluster Access Entry + +When enabling `authentication_mode = "API_AND_CONFIG_MAP"`, EKS will automatically create an access entry for the IAM role(s) used by managed nodegroup(s) and Fargate profile(s). There are no additional actions required by users. For self-managed nodegroups and the Karpenter sub-module, this project automatically adds the access entry on behalf of users so there are no additional actions required by users. + +On clusters that were created prior to CAM support, there will be an existing access entry for the cluster creator. This was previously not visible when using `aws-auth` ConfigMap, but will become visible when access entry is enabled. + +### Bootstrap Cluster Creator Admin Permissions + +Setting the `bootstrap_cluster_creator_admin_permissions` is a one time operation when the cluster is created; it cannot be modified later through the EKS API. In this project we are hardcoding this to `false`. If users wish to achieve the same functionality, we will do that through an access entry which can be enabled or disabled at any time of their choosing using the variable `enable_cluster_creator_admin_permissions` + +### Enabling EFA Support + +When enabling EFA support via `enable_efa_support = true`, there are two locations this can be specified - one at the cluster level, and one at the nodegroup level. Enabling at the cluster level will add the EFA required ingress/egress rules to the shared security group created for the nodegroup(s). Enabling at the nodegroup level will do the following (per nodegroup where enabled): + +1. All EFA interfaces supported by the instance will be exposed on the launch template used by the nodegroup +2. A placement group with `strategy = "clustered"` per EFA requirements is created and passed to the launch template used by the nodegroup +3. Data sources will reverse lookup the availability zones that support the instance type selected based on the subnets provided, ensuring that only the associated subnets are passed to the launch template and therefore used by the placement group. This avoids the placement group being created in an availability zone that does not support the instance type selected. + +> [!TIP] +> Use the [aws-efa-k8s-device-plugin](https://github.com/aws/eks-charts/tree/master/stable/aws-efa-k8s-device-plugin) Helm chart to expose the EFA interfaces on the nodes as an extended resource, and allow pods to request the interfaces be mounted to their containers. +> +> The EKS AL2 GPU AMI comes with the necessary EFA components pre-installed - you just need to expose the EFA devices on the nodes via their launch templates, ensure the required EFA security group rules are in place, and deploy the `aws-efa-k8s-device-plugin` in order to start utilizing EFA within your cluster. Your application container will need to have the necessary libraries and runtime in order to utilize communication over the EFA interfaces (NCCL, aws-ofi-nccl, hwloc, libfabric, aws-neuornx-collectives, CUDA, etc.). + +If you disable the creation and use of the managed nodegroup custom launch template (`create_launch_template = false` and/or `use_custom_launch_template = false`), this will interfere with the EFA functionality provided. In addition, if you do not supply an `instance_type` for self-managed nodegroup(s), or `instance_types` for the managed nodegroup(s), this will also interfere with the functionality. In order to support the EFA functionality provided by `enable_efa_support = true`, you must utilize the custom launch template created/provided by this module, and supply an `instance_type`/`instance_types` for the respective nodegroup. + +The logic behind supporting EFA uses a data source to lookup the instance type to retrieve the number of interfaces that the instance supports in order to enumerate and expose those interfaces on the launch template created. For managed nodegroups where a list of instance types are supported, the first instance type in the list is used to calculate the number of EFA interfaces supported. Mixing instance types with varying number of interfaces is not recommended for EFA (or in some cases, mixing instance types is not supported - i.e. - p5.48xlarge and p4d.24xlarge). In addition to exposing the EFA interfaces and updating the security group rules, a placement group is created per the EFA requirements and only the availability zones that support the instance type selected are used in the subnets provided to the nodegroup. + +In order to enable EFA support, you will have to specify `enable_efa_support = true` on both the cluster and each nodegroup that you wish to enable EFA support for: + +```hcl +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "~> 20.0" + + # Truncated for brevity ... + + # Adds the EFA required security group rules to the shared + # security group created for the nodegroup(s) + enable_efa_support = true + + eks_managed_node_groups = { + example = { + instance_types = ["p5.48xlarge"] + + # Exposes all EFA interfaces on the launch template created by the nodegroup(s) + # This would expose all 32 EFA interfaces for the p5.48xlarge instance type + enable_efa_support = true + + pre_bootstrap_user_data = <<-EOT + # Mount NVME instance store volumes since they are typically + # available on instance types that support EFA + setup-local-disks raid0 + EOT + + # EFA should only be enabled when connecting 2 or more nodes + # Do not use EFA on a single node workload + min_size = 2 + max_size = 10 + desired_size = 2 + } + } +} +``` + ## Examples -- [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete): EKS Cluster using all available node group types in various combinations demonstrating many of the supported features and configurations - [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group): EKS Cluster using EKS managed node groups - [Fargate Profile](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile): EKS cluster using [Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) - [Karpenter](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/karpenter): EKS Cluster with [Karpenter](https://karpenter.sh/) provisioned for intelligent data plane management @@ -222,9 +188,8 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.47 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [aws](#requirement\_aws) | >= 5.40 | | [time](#requirement\_time) | >= 0.9 | | [tls](#requirement\_tls) | >= 3.0 | @@ -232,8 +197,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.47 | -| [kubernetes](#provider\_kubernetes) | >= 2.10 | +| [aws](#provider\_aws) | >= 5.40 | | [time](#provider\_time) | >= 0.9 | | [tls](#provider\_tls) | >= 3.0 | @@ -243,7 +207,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple |------|--------|---------| | [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ./modules/eks-managed-node-group | n/a | | [fargate\_profile](#module\_fargate\_profile) | ./modules/fargate-profile | n/a | -| [kms](#module\_kms) | terraform-aws-modules/kms/aws | 1.1.0 | +| [kms](#module\_kms) | terraform-aws-modules/kms/aws | 2.1.0 | | [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ./modules/self-managed-node-group | n/a | ## Resources @@ -252,6 +216,8 @@ We are grateful to the community for contributing bugfixes and improvements! Ple |------|------| | [aws_cloudwatch_log_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | | [aws_ec2_tag.cluster_primary_security_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_tag) | resource | +| [aws_eks_access_entry.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_entry) | resource | +| [aws_eks_access_policy_association.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_policy_association) | resource | | [aws_eks_addon.before_compute](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) | resource | | [aws_eks_addon.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_addon) | resource | | [aws_eks_cluster.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster) | resource | @@ -267,8 +233,6 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [aws_security_group.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_security_group_rule.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | | [aws_security_group_rule.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource | -| [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | -| [kubernetes_config_map_v1_data.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map_v1_data) | resource | | [time_sleep.this](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source | @@ -282,15 +246,13 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| [access\_entries](#input\_access\_entries) | Map of access entries to add to the cluster | `any` | `{}` | no | | [attach\_cluster\_encryption\_policy](#input\_attach\_cluster\_encryption\_policy) | Indicates whether or not to attach an additional policy for the cluster IAM role to utilize the encryption key provided | `bool` | `true` | no | -| [aws\_auth\_accounts](#input\_aws\_auth\_accounts) | List of account maps to add to the aws-auth configmap | `list(any)` | `[]` | no | -| [aws\_auth\_fargate\_profile\_pod\_execution\_role\_arns](#input\_aws\_auth\_fargate\_profile\_pod\_execution\_role\_arns) | List of Fargate profile pod execution role ARNs to add to the aws-auth configmap | `list(string)` | `[]` | no | -| [aws\_auth\_node\_iam\_role\_arns\_non\_windows](#input\_aws\_auth\_node\_iam\_role\_arns\_non\_windows) | List of non-Windows based node IAM role ARNs to add to the aws-auth configmap | `list(string)` | `[]` | no | -| [aws\_auth\_node\_iam\_role\_arns\_windows](#input\_aws\_auth\_node\_iam\_role\_arns\_windows) | List of Windows based node IAM role ARNs to add to the aws-auth configmap | `list(string)` | `[]` | no | -| [aws\_auth\_roles](#input\_aws\_auth\_roles) | List of role maps to add to the aws-auth configmap | `list(any)` | `[]` | no | -| [aws\_auth\_users](#input\_aws\_auth\_users) | List of user maps to add to the aws-auth configmap | `list(any)` | `[]` | no | +| [authentication\_mode](#input\_authentication\_mode) | The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP` | `string` | `"API_AND_CONFIG_MAP"` | no | +| [cloudwatch\_log\_group\_class](#input\_cloudwatch\_log\_group\_class) | Specified the log class of the log group. Possible values are: `STANDARD` or `INFREQUENT_ACCESS` | `string` | `null` | no | | [cloudwatch\_log\_group\_kms\_key\_id](#input\_cloudwatch\_log\_group\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `null` | no | | [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no | +| [cloudwatch\_log\_group\_tags](#input\_cloudwatch\_log\_group\_tags) | A map of additional tags to add to the cloudwatch log group created | `map(string)` | `{}` | no | | [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no | | [cluster\_addons](#input\_cluster\_addons) | Map of cluster addon configurations to enable for the cluster. Addon name can be the map keys or set with `name` | `any` | `{}` | no | | [cluster\_addons\_timeouts](#input\_cluster\_addons\_timeouts) | Create, update, and delete timeout configurations for the cluster addons | `map(string)` | `{}` | no | @@ -304,9 +266,8 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_endpoint\_private\_access](#input\_cluster\_endpoint\_private\_access) | Indicates whether or not the Amazon EKS private API server endpoint is enabled | `bool` | `true` | no | | [cluster\_endpoint\_public\_access](#input\_cluster\_endpoint\_public\_access) | Indicates whether or not the Amazon EKS public API server endpoint is enabled | `bool` | `false` | no | | [cluster\_endpoint\_public\_access\_cidrs](#input\_cluster\_endpoint\_public\_access\_cidrs) | List of CIDR blocks which can access the Amazon EKS public API server endpoint | `list(string)` |
[
"0.0.0.0/0"
]
| no | -| [cluster\_iam\_role\_dns\_suffix](#input\_cluster\_iam\_role\_dns\_suffix) | Base DNS domain name for the current partition (e.g., amazonaws.com in AWS Commercial, amazonaws.com.cn in AWS China) | `string` | `null` | no | | [cluster\_identity\_providers](#input\_cluster\_identity\_providers) | Map of cluster identity provider configurations to enable for the cluster. Note - this is different/separate from IRSA | `any` | `{}` | no | -| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created | `string` | `null` | no | +| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created | `string` | `"ipv4"` | no | | [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no | | [cluster\_security\_group\_additional\_rules](#input\_cluster\_security\_group\_additional\_rules) | List of additional security group rules to add to the cluster security group created. Set `source_node_security_group = true` inside rules to set the `node_security_group` as source | `any` | `{}` | no | | [cluster\_security\_group\_description](#input\_cluster\_security\_group\_description) | Description of the cluster security group created | `string` | `"EKS cluster security group"` | no | @@ -320,8 +281,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes `.` version to use for the EKS cluster (i.e.: `1.27`) | `string` | `null` | no | | [control\_plane\_subnet\_ids](#input\_control\_plane\_subnet\_ids) | A list of subnet IDs where the EKS cluster control plane (ENIs) will be provisioned. Used for expanding the pool of subnets used by nodes/node groups without replacing the EKS control plane | `list(string)` | `[]` | no | -| [create](#input\_create) | Controls if EKS resources should be created (affects nearly all resources) | `bool` | `true` | no | -| [create\_aws\_auth\_configmap](#input\_create\_aws\_auth\_configmap) | Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap` | `bool` | `false` | no | +| [create](#input\_create) | Controls if resources should be created (affects nearly all resources) | `bool` | `true` | no | | [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no | | [create\_cluster\_primary\_security\_group\_tags](#input\_create\_cluster\_primary\_security\_group\_tags) | Indicates whether or not to tag the cluster's primary security group. This security group is created by the EKS service, not the module, and therefore tagging is handled after cluster creation | `bool` | `true` | no | | [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Determines if a security group is created for the cluster. Note: the EKS service creates a primary security group for the cluster by default | `bool` | `true` | no | @@ -333,8 +293,10 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [dataplane\_wait\_duration](#input\_dataplane\_wait\_duration) | Duration to wait after the EKS cluster has become active before creating the dataplane components (EKS managed nodegroup(s), self-managed nodegroup(s), Fargate profile(s)) | `string` | `"30s"` | no | | [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no | | [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no | +| [enable\_cluster\_creator\_admin\_permissions](#input\_enable\_cluster\_creator\_admin\_permissions) | Indicates whether or not to add the cluster creator (the identity used by Terraform) as an administrator via access entry | `bool` | `false` | no | +| [enable\_efa\_support](#input\_enable\_efa\_support) | Determines whether to enable Elastic Fabric Adapter (EFA) support | `bool` | `false` | no | | [enable\_irsa](#input\_enable\_irsa) | Determines whether to create an OpenID Connect Provider for EKS to enable IRSA | `bool` | `true` | no | -| [enable\_kms\_key\_rotation](#input\_enable\_kms\_key\_rotation) | Specifies whether key rotation is enabled. Defaults to `true` | `bool` | `true` | no | +| [enable\_kms\_key\_rotation](#input\_enable\_kms\_key\_rotation) | Specifies whether key rotation is enabled | `bool` | `true` | no | | [fargate\_profile\_defaults](#input\_fargate\_profile\_defaults) | Map of Fargate Profile default configurations | `any` | `{}` | no | | [fargate\_profiles](#input\_fargate\_profiles) | Map of Fargate Profile definitions to create | `any` | `{}` | no | | [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | @@ -345,17 +307,17 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no | | [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no | | [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `bool` | `true` | no | +| [include\_oidc\_root\_ca\_thumbprint](#input\_include\_oidc\_root\_ca\_thumbprint) | Determines whether to include the root CA thumbprint in the OpenID Connect (OIDC) identity provider's server certificate(s) | `bool` | `true` | no | | [kms\_key\_administrators](#input\_kms\_key\_administrators) | A list of IAM ARNs for [key administrators](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-default-allow-administrators). If no value is provided, the current caller identity is used to ensure at least one key admin is available | `list(string)` | `[]` | no | | [kms\_key\_aliases](#input\_kms\_key\_aliases) | A list of aliases to create. Note - due to the use of `toset()`, values must be static strings and not computed values | `list(string)` | `[]` | no | | [kms\_key\_deletion\_window\_in\_days](#input\_kms\_key\_deletion\_window\_in\_days) | The waiting period, specified in number of days. After the waiting period ends, AWS KMS deletes the KMS key. If you specify a value, it must be between `7` and `30`, inclusive. If you do not specify a value, it defaults to `30` | `number` | `null` | no | | [kms\_key\_description](#input\_kms\_key\_description) | The description of the key as viewed in AWS console | `string` | `null` | no | -| [kms\_key\_enable\_default\_policy](#input\_kms\_key\_enable\_default\_policy) | Specifies whether to enable the default key policy. Defaults to `false` | `bool` | `false` | no | +| [kms\_key\_enable\_default\_policy](#input\_kms\_key\_enable\_default\_policy) | Specifies whether to enable the default key policy | `bool` | `true` | no | | [kms\_key\_override\_policy\_documents](#input\_kms\_key\_override\_policy\_documents) | List of IAM policy documents that are merged together into the exported document. In merging, statements with non-blank `sid`s will override statements with the same `sid` | `list(string)` | `[]` | no | | [kms\_key\_owners](#input\_kms\_key\_owners) | A list of IAM ARNs for those who will have full key permissions (`kms:*`) | `list(string)` | `[]` | no | | [kms\_key\_service\_users](#input\_kms\_key\_service\_users) | A list of IAM ARNs for [key service users](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-service-integration) | `list(string)` | `[]` | no | | [kms\_key\_source\_policy\_documents](#input\_kms\_key\_source\_policy\_documents) | List of IAM policy documents that are merged together into the exported document. Statements must have unique `sid`s | `list(string)` | `[]` | no | | [kms\_key\_users](#input\_kms\_key\_users) | A list of IAM ARNs for [key users](https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#key-policy-default-allow-users) | `list(string)` | `[]` | no | -| [manage\_aws\_auth\_configmap](#input\_manage\_aws\_auth\_configmap) | Determines whether to manage the aws-auth configmap | `bool` | `false` | no | | [node\_security\_group\_additional\_rules](#input\_node\_security\_group\_additional\_rules) | List of additional security group rules to add to the node security group created. Set `source_cluster_security_group = true` inside rules to set the `cluster_security_group` as source | `any` | `{}` | no | | [node\_security\_group\_description](#input\_node\_security\_group\_description) | Description of the node security group created | `string` | `"EKS node shared security group"` | no | | [node\_security\_group\_enable\_recommended\_rules](#input\_node\_security\_group\_enable\_recommended\_rules) | Determines whether to enable recommended security group rules for the node security group created. This includes node-to-node TCP ingress on ephemeral ports and allows all egress traffic | `bool` | `true` | no | @@ -377,7 +339,8 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | [DEPRECATED - use `var.manage_aws_auth_configmap`] Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | +| [access\_policy\_associations](#output\_access\_policy\_associations) | Map of eks cluster access policy associations created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | @@ -389,12 +352,14 @@ We are grateful to the community for contributing bugfixes and improvements! Ple | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | | [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_ip\_family](#output\_cluster\_ip\_family) | The IP family used by the cluster (e.g. `ipv4` or `ipv6`) | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | | [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | | [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group | +| [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block where Kubernetes pod and service IP addresses are assigned from | | [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | | [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | | [cluster\_version](#output\_cluster\_version) | The Kubernetes version for the cluster | @@ -414,7 +379,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple ## License -Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-aws-rds-aurora/tree/master/LICENSE) for full details. +Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/LICENSE) for full details. ## Additional information for users from Russia and Belarus diff --git a/docs/README.md b/docs/README.md index 889b8481e4..144826b4cf 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,10 +4,10 @@ - [Frequently Asked Questions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md) - [Compute Resources](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/compute_resources.md) -- [IRSA Integration](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/irsa_integration.md) - [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/user_data.md) - [Network Connectivity](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/network_connectivity.md) - Upgrade Guides - [Upgrade to v17.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-17.0.md) - [Upgrade to v18.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-18.0.md) - [Upgrade to v19.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-19.0.md) + - [Upgrade to v20.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-20.0.md) diff --git a/docs/UPGRADE-18.0.md b/docs/UPGRADE-18.0.md index 3bfb672c3f..3f25ca8e43 100644 --- a/docs/UPGRADE-18.0.md +++ b/docs/UPGRADE-18.0.md @@ -13,6 +13,14 @@ cluster_security_group_name = $CLUSTER_NAME cluster_security_group_description = "EKS cluster security group." ``` +This configuration assumes that [`create_iam_role`](https://github.com/terraform-aws-modules/terraform-aws-eks#input_create_iam_role) is set to `true`, which is the default value. + +As the location of the Terraform state of the IAM role has been changed from 17.x to 18.x, you'll also have to move the state before running `terraform apply` by calling: + +``` +terraform state mv 'module.eks.aws_iam_role.cluster[0]' 'module.eks.aws_iam_role.this[0]' +``` + See more information [here](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1744#issuecomment-1027359982) ## List of backwards incompatible changes diff --git a/docs/UPGRADE-19.0.md b/docs/UPGRADE-19.0.md index 19bd5f3946..f626129be1 100644 --- a/docs/UPGRADE-19.0.md +++ b/docs/UPGRADE-19.0.md @@ -58,7 +58,7 @@ Please consult the `examples` directory for reference example configurations. If ### Variable and output changes 1. Removed variables: - + - `node_security_group_ntp_ipv4_cidr_block` - default security group settings have an egress rule for ALL to `0.0.0.0/0`/`::/0` - `node_security_group_ntp_ipv6_cidr_block` - default security group settings have an egress rule for ALL to `0.0.0.0/0`/`::/0` - Self-managed node groups: @@ -364,8 +364,12 @@ EKS managed node groups on `v18.x` by default create a security group that does # OIDC Identity provider cluster_identity_providers = { - sts = { - client_id = "sts.amazonaws.com" + cognito = { + client_id = "702vqsrjicklgb7c5b7b50i1gc" + issuer_url = "https://cognito-idp.us-west-2.amazonaws.com/us-west-2_re1u6bpRA" + username_claim = "email" + groups_claim = "cognito:groups" + groups_prefix = "gid:" } } diff --git a/docs/UPGRADE-20.0.md b/docs/UPGRADE-20.0.md new file mode 100644 index 0000000000..1f16712fda --- /dev/null +++ b/docs/UPGRADE-20.0.md @@ -0,0 +1,270 @@ +# Upgrade from v19.x to v20.x + +Please consult the `examples` directory for reference example configurations. If you find a bug, please open an issue with supporting configuration to reproduce. + +## List of backwards incompatible changes + +- Minium supported AWS provider version increased to `v5.34` +- Minimum supported Terraform version increased to `v1.3` to support Terraform state `moved` blocks as well as other advanced features +- The `resolve_conflicts` argument within the `cluster_addons` configuration has been replaced with `resolve_conflicts_on_create` and `resolve_conflicts_on_update` now that `resolve_conflicts` is deprecated +- The default/fallback value for the `preserve` argument of `cluster_addons`is now set to `true`. This has shown to be useful for users deprovisioning clusters while avoiding the situation where the CNI is deleted too early and causes resources to be left orphaned resulting in conflicts. +- The Karpenter sub-module's use of the `irsa` naming convention has been removed, along with an update to the Karpenter controller IAM policy to align with Karpenter's `v1beta1`/`v0.32` changes. Instead of referring to the role as `irsa` or `pod_identity`, its simply just an IAM role used by the Karpenter controller and there is support for use with either IRSA and/or Pod Identity (default) at this time +- The `aws-auth` ConfigMap resources have been moved to a standalone sub-module. This removes the Kubernetes provider requirement from the main module and allows for the `aws-auth` ConfigMap to be managed independently of the main module. This sub-module will be removed entirely in the next major release. +- Support for cluster access management has been added with the default authentication mode set as `API_AND_CONFIG_MAP`. This is a one way change if applied; if you wish to use `CONFIG_MAP`, you will need to set `authentication_mode = "CONFIG_MAP"` explicitly when upgrading. +- Karpenter EventBridge rule key `spot_interrupt` updated to correct mis-spelling (was `spot_interupt`). This will cause the rule to be replaced + +### ⚠️ Upcoming Changes Planned in v21.0 ⚠️ + +To give users advanced notice and provide some future direction for this module, these are the following changes we will be looking to make in the next major release of this module: + +1. The `aws-auth` sub-module will be removed entirely from the project. Since this sub-module is captured in the v20.x releases, users can continue using it even after the module moves forward with the next major version. The long term strategy and direction is cluster access entry and to rely only on the AWS Terraform provider. +2. The default value for `authentication_mode` will change to `API`. Aligning with point 1 above, this is a one way change, but users are free to specify the value of their choosing in place of this default (when the change is made). This module will proceed with an EKS API first strategy. +3. The launch template and autoscaling group usage contained within the EKS managed nodegroup and self-managed nodegroup sub-modules *might be replaced with the [`terraform-aws-autoscaling`](https://github.com/terraform-aws-modules/terraform-aws-autoscaling) module. At minimum, it makes sense to replace most of functionality in the self-managed nodegroup module with this external module, but its not yet clear if there is any benefit of using it in the EKS managed nodegroup sub-module. The interface that users interact with will stay the same, the changes will be internal to the implementation and we will do everything we can to keep the disruption to a minimum. +4. The `platform` variable will be replaced and instead `ami_type` will become the standard across both self-managed nodegroup(s) and EKS managed nodegroup(s). As EKS expands its portfolio of supported operating systems, the `ami_type` is better suited to associate the correct user data format to the respective OS. The `platform` variable is a legacy artifact of self-managed nodegroups but not as descriptive as the `ami_type`, and therefore it will be removed in favor of `ami_type`. + +## Additional changes + +### Added + + - A module tag has been added to the cluster control plane + - Support for cluster access entries. The `bootstrap_cluster_creator_admin_permissions` setting on the control plane has been hardcoded to `false` since this operation is a one time operation only at cluster creation per the EKS API. Instead, users can enable/disable `enable_cluster_creator_admin_permissions` at any time to achieve the same functionality. This takes the identity that Terraform is using to make API calls and maps it into a cluster admin via an access entry. For users on existing clusters, you will need to remove the default cluster administrator that was created by EKS prior to the cluster access entry APIs - see the section [`Removing the default cluster administrator`](https://aws.amazon.com/blogs/containers/a-deep-dive-into-simplified-amazon-eks-access-management-controls/) for more details. + - Support for specifying the CloudWatch log group class (standard or infrequent access) + - Native support for Windows based managed nodegroups similar to AL2 and Bottlerocket + - Self-managed nodegroups now support `instance_maintenance_policy` and have added `max_healthy_percentage`, `scale_in_protected_instances`, and `standby_instances` arguments to the `instance_refresh.preferences` block + +### Modified + + - For `sts:AssumeRole` permissions by services, the use of dynamically looking up the DNS suffix has been replaced with the static value of `amazonaws.com`. This does not appear to change by partition and instead requires users to set this manually for non-commercial regions. + - The default value for `kms_key_enable_default_policy` has changed from `false` to `true` to align with the default behavior of the `aws_kms_key` resource + - The Karpenter default value for `create_instance_profile` has changed from `true` to `false` to align with the changes in Karpenter v0.32 + - The Karpenter variable `create_instance_profile` default value has changed from `true` to `false`. Starting with Karpenter `v0.32.0`, Karpenter accepts an IAM role and creates the EC2 instance profile used by the nodes + +### Removed + + - The `complete` example has been removed due to its redundancy with the other examples + - References to the IRSA sub-module in the IAM repository have been removed. Once https://github.com/clowdhaus/terraform-aws-eks-pod-identity has been updated and moved into the organization, the documentation here will be updated to mention the new module. + +### Variable and output changes + +1. Removed variables: + + - `cluster_iam_role_dns_suffix` - replaced with a static string of `amazonaws.com` + - `manage_aws_auth_configmap` + - `create_aws_auth_configmap` + - `aws_auth_node_iam_role_arns_non_windows` + - `aws_auth_node_iam_role_arns_windows` + - `aws_auth_fargate_profile_pod_execution_role_arn` + - `aws_auth_roles` + - `aws_auth_users` + - `aws_auth_accounts` + + - Karpenter + - `irsa_tag_key` + - `irsa_tag_values` + - `irsa_subnet_account_id` + - `enable_karpenter_instance_profile_creation` + +2. Renamed variables: + + - Karpenter + - `create_irsa` -> `create_iam_role` + - `irsa_name` -> `iam_role_name` + - `irsa_use_name_prefix` -> `iam_role_name_prefix` + - `irsa_path` -> `iam_role_path` + - `irsa_description` -> `iam_role_description` + - `irsa_max_session_duration` -> `iam_role_max_session_duration` + - `irsa_permissions_boundary_arn` -> `iam_role_permissions_boundary_arn` + - `irsa_tags` -> `iam_role_tags` + - `policies` -> `iam_role_policies` + - `irsa_policy_name` -> `iam_policy_name` + - `irsa_ssm_parameter_arns` -> `ami_id_ssm_parameter_arns` + - `create_iam_role` -> `create_node_iam_role` + - `iam_role_additional_policies` -> `node_iam_role_additional_policies` + - `policies` -> `iam_role_policies` + - `iam_role_arn` -> `node_iam_role_arn` + - `iam_role_name` -> `node_iam_role_name` + - `iam_role_name_prefix` -> `node_iam_role_name_prefix` + - `iam_role_path` -> `node_iam_role_path` + - `iam_role_description` -> `node_iam_role_description` + - `iam_role_max_session_duration` -> `node_iam_role_max_session_duration` + - `iam_role_permissions_boundary_arn` -> `node_iam_role_permissions_boundary_arn` + - `iam_role_attach_cni_policy` -> `node_iam_role_attach_cni_policy` + - `iam_role_additional_policies` -> `node_iam_role_additional_policies` + - `iam_role_tags` -> `node_iam_role_tags` + +3. Added variables: + + - `create_access_entry` + - `enable_cluster_creator_admin_permissions` + - `authentication_mode` + - `access_entries` + - `cloudwatch_log_group_class` + + - Karpenter + - `iam_policy_name` + - `iam_policy_use_name_prefix` + - `iam_policy_description` + - `iam_policy_path` + - `enable_irsa` + - `create_access_entry` + - `access_entry_type` + + - Self-managed nodegroup + - `instance_maintenance_policy` + - `create_access_entry` + - `iam_role_arn` + +4. Removed outputs: + + - `aws_auth_configmap_yaml` + +5. Renamed outputs: + + - Karpenter + - `irsa_name` -> `iam_role_name` + - `irsa_arn` -> `iam_role_arn` + - `irsa_unique_id` -> `iam_role_unique_id` + - `role_name` -> `node_iam_role_name` + - `role_arn` -> `node_iam_role_arn` + - `role_unique_id` -> `node_iam_role_unique_id` + +6. Added outputs: + + - `access_entries` + + - Karpenter + - `node_access_entry_arn` + + - Self-managed nodegroup + - `access_entry_arn` + +## Upgrade Migrations + +### Diff of Before (v19.21) vs After (v20.0) + +```diff + module "eks" { + source = "terraform-aws-modules/eks/aws" +- version = "~> 19.21" ++ version = "~> 20.0" + +# If you want to maintain the current default behavior of v19.x ++ kms_key_enable_default_policy = false + +- manage_aws_auth_configmap = true + +- aws_auth_roles = [ +- { +- rolearn = "arn:aws:iam::66666666666:role/role1" +- username = "role1" +- groups = ["custom-role-group"] +- }, +- ] + +- aws_auth_users = [ +- { +- userarn = "arn:aws:iam::66666666666:user/user1" +- username = "user1" +- groups = ["custom-users-group"] +- }, +- ] +} + ++ module "eks" { ++ source = "terraform-aws-modules/eks/aws//modules/aws-auth" ++ version = "~> 20.0" + ++ manage_aws_auth_configmap = true + ++ aws_auth_roles = [ ++ { ++ rolearn = "arn:aws:iam::66666666666:role/role1" ++ username = "role1" ++ groups = ["custom-role-group"] ++ }, ++ ] + ++ aws_auth_users = [ ++ { ++ userarn = "arn:aws:iam::66666666666:user/user1" ++ username = "user1" ++ groups = ["custom-users-group"] ++ }, ++ ] ++ } +``` + +### Karpenter Diff of Before (v19.21) vs After (v20.0) + +```diff + module "eks" { + source = "terraform-aws-modules/eks/aws//modules/karpenter" +- version = "~> 19.21" ++ version = "~> 20.0" + +# If you wish to maintain the current default behavior of v19.x ++ enable_irsa = true ++ create_instance_profile = true + +# To avoid any resource re-creation ++ iam_role_name = "KarpenterIRSA-${module.eks.cluster_name}" ++ iam_role_description = "Karpenter IAM role for service account" ++ iam_policy_name = "KarpenterIRSA-${module.eks.cluster_name}" ++ iam_policy_description = "Karpenter IAM role for service account" +} +``` + +## Terraform State Moves + +#### ⚠️ Authentication Mode Changes ⚠️ + +Changing the `authentication_mode` is a one-way decision. See [announcement blog](https://aws.amazon.com/blogs/containers/a-deep-dive-into-simplified-amazon-eks-access-management-controls/) for further details: + +> Switching authentication modes on an existing cluster is a one-way operation. You can switch from CONFIG_MAP to API_AND_CONFIG_MAP. You can then switch from API_AND_CONFIG_MAP to API. You cannot revert these operations in the opposite direction. Meaning you cannot switch back to CONFIG_MAP or API_AND_CONFIG_MAP from API. And you cannot switch back to CONFIG_MAP from API_AND_CONFIG_MAP. + +> [!IMPORTANT] +> If migrating to cluster access entries and you will NOT have any entries that remain in the `aws-auth` ConfigMap, you do not need to remove the configmap from the statefile. You can simply follow the migration guide and once access entries have been created, you can let Terraform remove/delete the `aws-auth` ConfigMap. +> +> If you WILL have entries that remain in the `aws-auth` ConfigMap, then you will need to remove the ConfigMap resources from the statefile to avoid any disruptions. When you add the new `aws-auth` sub-module and apply the changes, the sub-module will upsert the ConfigMap on the cluster. Provided the necessary entries are defined in that sub-module's definition, it will "re-adopt" the ConfigMap under Terraform's control. + +### authentication_mode = "CONFIG_MAP" + +If using `authentication_mode = "CONFIG_MAP"`, before making any changes, you will first need to remove the configmap from the statefile to avoid any disruptions: + +```sh +terraform state rm 'module.eks.kubernetes_config_map_v1_data.aws_auth[0]' +terraform state rm 'module.eks.kubernetes_config_map.aws_auth[0]' # include if Terraform created the original configmap +``` + +Once the configmap has been removed from the statefile, you can add the new `aws-auth` sub-module and copy the relevant definitions from the EKS module over to the new `aws-auth` sub-module definition (see before after diff above). + +> [!CAUTION] +> You will need to add entries to the `aws-auth` sub-module for any IAM roles used by nodegroups and/or Fargate profiles - the module no longer handles this in the background on behalf of users. +> +> When you apply the changes with the new sub-module, the configmap in the cluster will get updated with the contents provided in the sub-module definition, so please be sure all of the necessary entries are added before applying the changes. + +### authentication_mode = "API_AND_CONFIG_MAP" + +When using `authentication_mode = "API_AND_CONFIG_MAP"` and there are entries that will remain in the configmap (entries that cannot be replaced by cluster access entry), you will first need to update the `authentication_mode` on the cluster to `"API_AND_CONFIG_MAP"`. To help make this upgrade process easier, a copy of the changes defined in the [`v20.0.0`](https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2858) PR have been captured [here](https://github.com/clowdhaus/terraform-aws-eks-v20-migrate) but with the `aws-auth` components still provided in the module. This means you get the equivalent of the `v20.0.0` module, but it still includes support for the `aws-auth` configmap. You can follow the provided README on that interim migration module for the order of execution and return here once the `authentication_mode` has been updated to `"API_AND_CONFIG_MAP"`. Note - EKS automatically adds access entries for the roles used by EKS managed nodegroups and Fargate profiles; users do not need to do anything additional for these roles. + +Once the `authentication_mode` has been updated, next you will need to remove the configmap from the statefile to avoid any disruptions: + +> [!NOTE] +> This is only required if there are entries that will remain in the `aws-auth` ConfigMap after migrating. Otherwise, you can skip this step and let Terraform destroy the ConfigMap. + +```sh +terraform state rm 'module.eks.kubernetes_config_map_v1_data.aws_auth[0]' +terraform state rm 'module.eks.kubernetes_config_map.aws_auth[0]' # include if Terraform created the original configmap +``` + +#### ℹ️ Terraform 1.7+ users + +If you are using Terraform `v1.7+`, you can utilize the [`remove`](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources) to facilitate both the removal of the configmap through code. You can create a fork/clone of the provided [migration module](https://github.com/clowdhaus/terraform-aws-eks-migrate-v19-to-v20) and add the `remove` blocks and apply those changes before proceeding. We do not want to force users onto the bleeding edge with this module, so we have not included `remove` support at this time. + +Once the configmap has been removed from the statefile, you can add the new `aws-auth` sub-module and copy the relevant definitions from the EKS module over to the new `aws-auth` sub-module definition (see before after diff above). When you apply the changes with the new sub-module, the configmap in the cluster will get updated with the contents provided in the sub-module definition, so please be sure all of the necessary entries are added before applying the changes. In the before/example above - the configmap would remove any entries for roles used by nodegroups and/or Fargate Profiles, but maintain the custom entries for users and roles passed into the module definition. + +### authentication_mode = "API" + +In order to switch to `API` only using cluster access entry, you first need to update the `authentication_mode` on the cluster to `API_AND_CONFIG_MAP` without modifying the `aws-auth` configmap. To help make this upgrade process easier, a copy of the changes defined in the [`v20.0.0`](https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2858) PR have been captured [here](https://github.com/clowdhaus/terraform-aws-eks-v20-migrate) but with the `aws-auth` components still provided in the module. This means you get the equivalent of the `v20.0.0` module, but it still includes support for the `aws-auth` configmap. You can follow the provided README on that interim migration module for the order of execution and return here once the `authentication_mode` has been updated to `"API_AND_CONFIG_MAP"`. Note - EKS automatically adds access entries for the roles used by EKS managed nodegroups and Fargate profiles; users do not need to do anything additional for these roles. + +Once the `authentication_mode` has been updated, you can update the `authentication_mode` on the cluster to `API` and remove the `aws-auth` configmap components. diff --git a/docs/compute_resources.md b/docs/compute_resources.md index 29fa2efb0f..721c29c1ee 100644 --- a/docs/compute_resources.md +++ b/docs/compute_resources.md @@ -31,7 +31,6 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ use_custom_launch_template = false ami_type = "BOTTLEROCKET_x86_64" - platform = "bottlerocket" } } ``` @@ -42,7 +41,6 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ eks_managed_node_groups = { bottlerocket_prepend_userdata = { ami_type = "BOTTLEROCKET_x86_64" - platform = "bottlerocket" bootstrap_extra_args = <<-EOT # extra args added @@ -84,7 +82,7 @@ Refer to the [EKS Managed Node Group documentation](https://docs.aws.amazon.com/ eks_managed_node_groups = { bottlerocket_custom_ami = { ami_id = "ami-0ff61e0bcfc81dc94" - platform = "bottlerocket" + ami_type = "BOTTLEROCKET_x86_64" # use module user data template to bootstrap enable_bootstrap_user_data = true @@ -123,15 +121,15 @@ Refer to the [Self Managed Node Group documentation](https://docs.aws.amazon.com } ``` -2. To use Bottlerocket, specify the `platform` as `bottlerocket` and supply a Bottlerocket OS AMI: +2. To use Bottlerocket, specify the `ami_type` as one of the respective `"BOTTLEROCKET_*" types` and supply a Bottlerocket OS AMI: ```hcl cluster_version = "1.27" self_managed_node_groups = { bottlerocket = { - platform = "bottlerocket" ami_id = data.aws_ami.bottlerocket_ami.id + ami_type = "BOTTLEROCKET_x86_64" } } ``` @@ -177,7 +175,6 @@ For example, the following creates 4 AWS EKS Managed Node Groups: # This overrides the OS used bottlerocket = { ami_type = "BOTTLEROCKET_x86_64" - platform = "bottlerocket" } } ``` diff --git a/docs/faq.md b/docs/faq.md index 215cdb2086..2abd6695c2 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -4,8 +4,9 @@ - [I received an error: `expect exactly one securityGroup tagged with kubernetes.io/cluster/ ...`](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#i-received-an-error-expect-exactly-one-securitygroup-tagged-with-kubernetesioclustername-) - [Why are nodes not being registered?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#why-are-nodes-not-being-registered) - [Why are there no changes when a node group's `desired_size` is modified?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#why-are-there-no-changes-when-a-node-groups-desired_size-is-modified) -- [How can I deploy Windows based nodes?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#how-can-i-deploy-windows-based-nodes) - [How do I access compute resource attributes?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#how-do-i-access-compute-resource-attributes) +- [What add-ons are available?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#what-add-ons-are-available) +- [What configuration values are available for an add-on?](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md#what-configuration-values-are-available-for-an-add-on) ### Setting `disk_size` or `remote_access` does not make any changes @@ -24,27 +25,7 @@ By default, EKS creates a cluster primary security group that is created outside attach_cluster_primary_security_group = true # default is false ``` -2. If you want to use the cluster primary security group, you can disable the tag passed to the node security group by overriding the tag expected value like: - -```hcl - attach_cluster_primary_security_group = true # default is false - - node_security_group_tags = { - "kubernetes.io/cluster/" = null # or any other value other than "owned" - } -``` - -3. By overriding the tag expected value on the cluster primary security group like: - -```hcl - attach_cluster_primary_security_group = true # default is false - - cluster_tags = { - "kubernetes.io/cluster/" = null # or any other value other than "owned" - } -``` - -4. By not attaching the cluster primary security group. The cluster primary security group has quite broad access and the module has instead provided a security group with the minimum amount of access to launch an empty EKS cluster successfully and users are encouraged to open up access when necessary to support their workload. +2. By not attaching the cluster primary security group. The cluster primary security group has quite broad access and the module has instead provided a security group with the minimum amount of access to launch an empty EKS cluster successfully and users are encouraged to open up access when necessary to support their workload. ```hcl attach_cluster_primary_security_group = false # this is the default for the module @@ -52,6 +33,8 @@ By default, EKS creates a cluster primary security group that is created outside In theory, if you are attaching the cluster primary security group, you shouldn't need to use the shared node security group created by the module. However, this is left up to users to decide for their requirements and use case. +If you choose to use [Custom Networking](https://docs.aws.amazon.com/eks/latest/userguide/cni-custom-network.html), make sure to only attach the security groups matching your choice above in your ENIConfig resources. This will ensure you avoid redundant tags. + ### Why are nodes not being registered? Nodes not being able to register with the EKS control plane is generally due to networking mis-configurations. @@ -75,14 +58,6 @@ If you require a public endpoint, setting up both (public and private) and restr The module is configured to ignore this value. Unfortunately, Terraform does not support variables within the `lifecycle` block. The setting is ignored to allow autoscaling via controllers such as cluster autoscaler or Karpenter to work properly and without interference by Terraform. Changing the desired count must be handled outside of Terraform once the node group is created. -### How can I deploy Windows based nodes? - -To enable Windows support for your EKS cluster, you will need to apply some configuration manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support). - -In addition, Windows based nodes require an additional cluster RBAC role (`eks:kube-proxy-windows`). - -Note: Windows based node support is limited to a default user data template that is provided due to the lack of Windows support and manual steps required to provision Windows based EKS nodes. - ### How do I access compute resource attributes? Examples of accessing the attributes of the compute resource(s) created by the root module are shown below. Note - the assumption is that your cluster module definition is named `eks` as in `module "eks" { ... }`: @@ -91,7 +66,7 @@ Examples of accessing the attributes of the compute resource(s) created by the r ```hcl eks_managed_role_arns = [for group in module.eks_managed_node_group : group.iam_role_arn] -```` +``` - Self Managed Node Group attributes @@ -104,3 +79,214 @@ self_managed_role_arns = [for group in module.self_managed_node_group : group.ia ```hcl fargate_profile_pod_execution_role_arns = [for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn] ``` + +### What add-ons are available? + +The available EKS add-ons can be [found here](https://docs.aws.amazon.com/eks/latest/userguide/eks-add-ons.html). You can also retrieve the available addons from the API using: + +```sh +aws eks describe-addon-versions --query 'addons[*].addonName' +``` + +### What configuration values are available for an add-on? + +You can retrieve the configuration value schema for a given addon using the following command: + +```sh +aws eks describe-addon-configuration --addon-name --addon-version --query 'configurationSchema' --output text | jq +``` + +For example: + +```sh +aws eks describe-addon-configuration --addon-name coredns --addon-version v1.11.1-eksbuild.8 --query 'configurationSchema' --output text | jq +``` + +Returns (at the time of writing): + +```json +{ + "$ref": "#/definitions/Coredns", + "$schema": "http://json-schema.org/draft-06/schema#", + "definitions": { + "Coredns": { + "additionalProperties": false, + "properties": { + "affinity": { + "default": { + "affinity": { + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "kubernetes.io/os", + "operator": "In", + "values": [ + "linux" + ] + }, + { + "key": "kubernetes.io/arch", + "operator": "In", + "values": [ + "amd64", + "arm64" + ] + } + ] + } + ] + } + }, + "podAntiAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [ + { + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [ + { + "key": "k8s-app", + "operator": "In", + "values": [ + "kube-dns" + ] + } + ] + }, + "topologyKey": "kubernetes.io/hostname" + }, + "weight": 100 + } + ] + } + } + }, + "description": "Affinity of the coredns pods", + "type": [ + "object", + "null" + ] + }, + "computeType": { + "type": "string" + }, + "corefile": { + "description": "Entire corefile contents to use with installation", + "type": "string" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "podAnnotations": { + "properties": {}, + "title": "The podAnnotations Schema", + "type": "object" + }, + "podDisruptionBudget": { + "description": "podDisruptionBudget configurations", + "enabled": { + "default": true, + "description": "the option to enable managed PDB", + "type": "boolean" + }, + "maxUnavailable": { + "anyOf": [ + { + "pattern": ".*%$", + "type": "string" + }, + { + "type": "integer" + } + ], + "default": 1, + "description": "minAvailable value for managed PDB, can be either string or integer; if it's string, should end with %" + }, + "minAvailable": { + "anyOf": [ + { + "pattern": ".*%$", + "type": "string" + }, + { + "type": "integer" + } + ], + "description": "maxUnavailable value for managed PDB, can be either string or integer; if it's string, should end with %" + }, + "type": "object" + }, + "podLabels": { + "properties": {}, + "title": "The podLabels Schema", + "type": "object" + }, + "replicaCount": { + "type": "integer" + }, + "resources": { + "$ref": "#/definitions/Resources" + }, + "tolerations": { + "default": [ + { + "key": "CriticalAddonsOnly", + "operator": "Exists" + }, + { + "effect": "NoSchedule", + "key": "node-role.kubernetes.io/control-plane" + } + ], + "description": "Tolerations of the coredns pod", + "items": { + "type": "object" + }, + "type": "array" + }, + "topologySpreadConstraints": { + "description": "The coredns pod topology spread constraints", + "type": "array" + } + }, + "title": "Coredns", + "type": "object" + }, + "Limits": { + "additionalProperties": false, + "properties": { + "cpu": { + "type": "string" + }, + "memory": { + "type": "string" + } + }, + "title": "Limits", + "type": "object" + }, + "Resources": { + "additionalProperties": false, + "properties": { + "limits": { + "$ref": "#/definitions/Limits" + }, + "requests": { + "$ref": "#/definitions/Limits" + } + }, + "title": "Resources", + "type": "object" + } + } +} +``` + +> [!NOTE] +> The available configuration values will vary between add-on versions, +> typically more configuration values will be added in later versions as functionality is enabled by EKS. diff --git a/docs/irsa_integration.md b/docs/irsa_integration.md deleted file mode 100644 index cc6a549500..0000000000 --- a/docs/irsa_integration.md +++ /dev/null @@ -1,84 +0,0 @@ - -### IRSA Integration - -An [IAM role for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) module has been created to work in conjunction with this module. The [`iam-role-for-service-accounts`](https://github.com/terraform-aws-modules/terraform-aws-iam/tree/master/modules/iam-role-for-service-accounts-eks) module has a set of pre-defined IAM policies for common addons. Check [`policy.tf`](https://github.com/terraform-aws-modules/terraform-aws-iam/blob/master/modules/iam-role-for-service-accounts-eks/policies.tf) for a list of the policies currently supported. One example of this integration is shown below, and more can be found in the [`iam-role-for-service-accounts`](https://github.com/terraform-aws-modules/terraform-aws-iam/blob/master/examples/iam-role-for-service-accounts-eks/main.tf) example directory: - -```hcl -module "eks" { - source = "terraform-aws-modules/eks/aws" - - cluster_name = "example" - cluster_version = "1.27" - - cluster_addons = { - vpc-cni = { - resolve_conflicts = "OVERWRITE" - service_account_role_arn = module.vpc_cni_irsa.iam_role_arn - } - } - - vpc_id = "vpc-1234556abcdef" - subnet_ids = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] - - eks_managed_node_group_defaults = { - # We are using the IRSA created below for permissions - # However, we have to provision a new cluster with the policy attached FIRST - # before we can disable. Without this initial policy, - # the VPC CNI fails to assign IPs and nodes cannot join the new cluster - iam_role_attach_cni_policy = true - } - - eks_managed_node_groups = { - default = {} - } - - tags = { - Environment = "dev" - Terraform = "true" - } -} - -module "vpc_cni_irsa" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - - role_name = "vpc_cni" - attach_vpc_cni_policy = true - vpc_cni_enable_ipv4 = true - - oidc_providers = { - main = { - provider_arn = module.eks.oidc_provider_arn - namespace_service_accounts = ["kube-system:aws-node"] - } - } - - tags = { - Environment = "dev" - Terraform = "true" - } -} - -module "karpenter_irsa" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - - role_name = "karpenter_controller" - attach_karpenter_controller_policy = true - - karpenter_controller_cluster_id = module.eks.cluster_id - karpenter_controller_node_iam_role_arns = [ - module.eks.eks_managed_node_groups["default"].iam_role_arn - ] - - oidc_providers = { - main = { - provider_arn = module.eks.oidc_provider_arn - namespace_service_accounts = ["karpenter:karpenter"] - } - } - - tags = { - Environment = "dev" - Terraform = "true" - } -} -``` diff --git a/docs/user_data.md b/docs/user_data.md index 7b236944ff..1ddc5d1aa7 100644 --- a/docs/user_data.md +++ b/docs/user_data.md @@ -7,11 +7,12 @@ Users can see the various methods of using and providing user data through the [ - AWS EKS Managed Node Groups - By default, any supplied user data is pre-pended to the user data supplied by the EKS Managed Node Group service - If users supply an `ami_id`, the service no longers supplies user data to bootstrap nodes; users can enable `enable_bootstrap_user_data` and use the module provided user data template, or provide their own user data template - - `bottlerocket` platform user data must be in TOML format + - AMI types of `BOTTLEROCKET_*`, user data must be in TOML format + - AMI types of `WINDOWS_*`, user data must be in powershell/PS1 script format - Self Managed Node Groups - - `linux` platform (default) -> the user data template (bash/shell script) provided by the module is used as the default; users are able to provide their own user data template - - `bottlerocket` platform -> the user data template (TOML file) provided by the module is used as the default; users are able to provide their own user data template - - `windows` platform -> the user data template (powershell/PS1 script) provided by the module is used as the default; users are able to provide their own user data template + - `AL2_x86_64` AMI type (default) -> the user data template (bash/shell script) provided by the module is used as the default; users are able to provide their own user data template + - `BOTTLEROCKET_*` AMI types -> the user data template (TOML file) provided by the module is used as the default; users are able to provide their own user data template + - `WINDOWS_*` AMI types -> the user data template (powershell/PS1 script) provided by the module is used as the default; users are able to provide their own user data template The templates provided by the module can be found under the [templates directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/templates) @@ -36,7 +37,7 @@ When using an EKS managed node group, users have 2 primary routes for interactin bootstrap_extra_args = "..." post_bootstrap_user_data = "..." ``` - - If the AMI is **NOT** an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node when launched, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data. + - If the AMI is **NOT** an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node when launched, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective AMI type are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data. - Users can use the following variables to facilitate this process: ```hcl user_data_template_path = "./your/user_data.sh" # user supplied bootstrap user data template @@ -45,12 +46,12 @@ When using an EKS managed node group, users have 2 primary routes for interactin post_bootstrap_user_data = "..." ``` -| ℹ️ When using bottlerocket as the desired platform, since the user data for bottlerocket is TOML, all configurations are merged in the one file supplied as user data. Therefore, `pre_bootstrap_user_data` and `post_bootstrap_user_data` are not valid since the bottlerocket OS handles when various settings are applied. If you wish to supply additional configuration settings when using bottlerocket, supply them via the `bootstrap_extra_args` variable. For the linux platform, `bootstrap_extra_args` are settings that will be supplied to the [AWS EKS Optimized AMI bootstrap script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh#L14) such as kubelet extra args, etc. See the [bottlerocket GitHub repository documentation](https://github.com/bottlerocket-os/bottlerocket#description-of-settings) for more details on what settings can be supplied via the `bootstrap_extra_args` variable. | +| ℹ️ When using bottlerocket, the supplied user data (TOML format) is merged in with the values supplied by EKS. Therefore, `pre_bootstrap_user_data` and `post_bootstrap_user_data` are not valid since the bottlerocket OS handles when various settings are applied. If you wish to supply additional configuration settings when using bottlerocket, supply them via the `bootstrap_extra_args` variable. For the `AL2_*` AMI types, `bootstrap_extra_args` are settings that will be supplied to the [AWS EKS Optimized AMI bootstrap script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh#L14) such as kubelet extra args, etc. See the [bottlerocket GitHub repository documentation](https://github.com/bottlerocket-os/bottlerocket#description-of-settings) for more details on what settings can be supplied via the `bootstrap_extra_args` variable. | | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ### Self Managed Node Group -Self managed node groups require users to provide the necessary bootstrap user data. Users can elect to use the user data template provided by the module for their platform/OS or provide their own user data template for rendering by the module. +Self managed node groups require users to provide the necessary bootstrap user data. Users can elect to use the user data template provided by the module for their respective AMI type or provide their own user data template for rendering by the module. - If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched: - Users can use the following variables to facilitate this process: @@ -60,7 +61,7 @@ Self managed node groups require users to provide the necessary bootstrap user d bootstrap_extra_args = "..." post_bootstrap_user_data = "..." ``` - - If the AMI is **NOT** an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node when launched, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data. + - If the AMI is **NOT** an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node when launched, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective AMI type are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data. - Users can use the following variables to facilitate this process: ```hcl user_data_template_path = "./your/user_data.sh" # user supplied bootstrap user data template diff --git a/examples/complete/README.md b/examples/complete/README.md deleted file mode 100644 index c03547b5e2..0000000000 --- a/examples/complete/README.md +++ /dev/null @@ -1,107 +0,0 @@ -# Complete AWS EKS Cluster - -Configuration in this directory creates an AWS EKS cluster with a broad mix of various features and settings provided by this module: - -- AWS EKS cluster -- Disabled EKS cluster -- Self managed node group -- Externally attached self managed node group -- Disabled self managed node group -- EKS managed node group -- Externally attached EKS managed node group -- Disabled self managed node group -- Fargate profile -- Externally attached Fargate profile -- Disabled Fargate profile -- Cluster addons: CoreDNS, Kube-Proxy, and VPC-CNI -- IAM roles for service accounts - -## Usage - -To run this example you need to execute: - -```bash -$ terraform init -$ terraform plan -$ terraform apply -``` - -Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. - - -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.47 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | - -## Providers - -| Name | Version | -|------|---------| -| [aws](#provider\_aws) | >= 4.47 | - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [disabled\_eks](#module\_disabled\_eks) | ../.. | n/a | -| [disabled\_eks\_managed\_node\_group](#module\_disabled\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a | -| [disabled\_fargate\_profile](#module\_disabled\_fargate\_profile) | ../../modules/fargate-profile | n/a | -| [disabled\_self\_managed\_node\_group](#module\_disabled\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a | -| [eks](#module\_eks) | ../.. | n/a | -| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a | -| [fargate\_profile](#module\_fargate\_profile) | ../../modules/fargate-profile | n/a | -| [kms](#module\_kms) | terraform-aws-modules/kms/aws | ~> 1.5 | -| [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a | -| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 4.0 | - -## Resources - -| Name | Type | -|------|------| -| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | -| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | - -## Inputs - -No inputs. - -## Outputs - -| Name | Description | -|------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | -| [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | -| [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | -| [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | -| [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster | -| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster | -| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server | -| [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster | -| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster | -| [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | -| [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | -| [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | -| [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | -| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | -| [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | -| [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | -| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | -| [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | -| [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | -| [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created | -| [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups | -| [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created | -| [kms\_key\_arn](#output\_kms\_key\_arn) | The Amazon Resource Name (ARN) of the key | -| [kms\_key\_id](#output\_kms\_key\_id) | The globally unique identifier for the key | -| [kms\_key\_policy](#output\_kms\_key\_policy) | The IAM resource policy set on the key | -| [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) | -| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` | -| [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created | -| [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups | - diff --git a/examples/complete/main.tf b/examples/complete/main.tf deleted file mode 100644 index e946906821..0000000000 --- a/examples/complete/main.tf +++ /dev/null @@ -1,469 +0,0 @@ -provider "aws" { - region = local.region -} - -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - -data "aws_availability_zones" "available" {} -data "aws_caller_identity" "current" {} - -locals { - name = "ex-${replace(basename(path.cwd), "_", "-")}" - region = "eu-west-1" - - vpc_cidr = "10.0.0.0/16" - azs = slice(data.aws_availability_zones.available.names, 0, 3) - - tags = { - Example = local.name - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" - } -} - -################################################################################ -# EKS Module -################################################################################ - -module "eks" { - source = "../.." - - cluster_name = local.name - cluster_endpoint_public_access = true - - cluster_addons = { - coredns = { - preserve = true - most_recent = true - - timeouts = { - create = "25m" - delete = "10m" - } - } - kube-proxy = { - most_recent = true - } - vpc-cni = { - most_recent = true - } - } - - # External encryption key - create_kms_key = false - cluster_encryption_config = { - resources = ["secrets"] - provider_key_arn = module.kms.key_arn - } - - iam_role_additional_policies = { - additional = aws_iam_policy.additional.arn - } - - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets - control_plane_subnet_ids = module.vpc.intra_subnets - - # Extend cluster security group rules - cluster_security_group_additional_rules = { - ingress_nodes_ephemeral_ports_tcp = { - description = "Nodes on ephemeral ports" - protocol = "tcp" - from_port = 1025 - to_port = 65535 - type = "ingress" - source_node_security_group = true - } - # Test: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2319 - ingress_source_security_group_id = { - description = "Ingress from another computed security group" - protocol = "tcp" - from_port = 22 - to_port = 22 - type = "ingress" - source_security_group_id = aws_security_group.additional.id - } - } - - # Extend node-to-node security group rules - node_security_group_additional_rules = { - ingress_self_all = { - description = "Node to node all ports/protocols" - protocol = "-1" - from_port = 0 - to_port = 0 - type = "ingress" - self = true - } - # Test: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2319 - ingress_source_security_group_id = { - description = "Ingress from another computed security group" - protocol = "tcp" - from_port = 22 - to_port = 22 - type = "ingress" - source_security_group_id = aws_security_group.additional.id - } - } - - # Self Managed Node Group(s) - self_managed_node_group_defaults = { - vpc_security_group_ids = [aws_security_group.additional.id] - iam_role_additional_policies = { - additional = aws_iam_policy.additional.arn - } - - instance_refresh = { - strategy = "Rolling" - preferences = { - min_healthy_percentage = 66 - } - } - } - - self_managed_node_groups = { - spot = { - instance_type = "m5.large" - instance_market_options = { - market_type = "spot" - } - - pre_bootstrap_user_data = <<-EOT - echo "foo" - export FOO=bar - EOT - - bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" - - post_bootstrap_user_data = <<-EOT - cd /tmp - sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm - sudo systemctl enable amazon-ssm-agent - sudo systemctl start amazon-ssm-agent - EOT - } - } - - # EKS Managed Node Group(s) - eks_managed_node_group_defaults = { - ami_type = "AL2_x86_64" - instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] - - attach_cluster_primary_security_group = true - vpc_security_group_ids = [aws_security_group.additional.id] - iam_role_additional_policies = { - additional = aws_iam_policy.additional.arn - } - } - - eks_managed_node_groups = { - blue = {} - green = { - min_size = 1 - max_size = 10 - desired_size = 1 - - instance_types = ["t3.large"] - capacity_type = "SPOT" - labels = { - Environment = "test" - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" - } - - taints = { - dedicated = { - key = "dedicated" - value = "gpuGroup" - effect = "NO_SCHEDULE" - } - } - - update_config = { - max_unavailable_percentage = 33 # or set `max_unavailable` - } - - tags = { - ExtraTag = "example" - } - } - } - - # Fargate Profile(s) - fargate_profiles = { - default = { - name = "default" - selectors = [ - { - namespace = "kube-system" - labels = { - k8s-app = "kube-dns" - } - }, - { - namespace = "default" - } - ] - - tags = { - Owner = "test" - } - - timeouts = { - create = "20m" - delete = "20m" - } - } - } - - # Create a new cluster where both an identity provider and Fargate profile is created - # will result in conflicts since only one can take place at a time - # # OIDC Identity provider - # cluster_identity_providers = { - # sts = { - # client_id = "sts.amazonaws.com" - # } - # } - - # aws-auth configmap - manage_aws_auth_configmap = true - - aws_auth_node_iam_role_arns_non_windows = [ - module.eks_managed_node_group.iam_role_arn, - module.self_managed_node_group.iam_role_arn, - ] - aws_auth_fargate_profile_pod_execution_role_arns = [ - module.fargate_profile.fargate_profile_pod_execution_role_arn - ] - - aws_auth_roles = [ - { - rolearn = module.eks_managed_node_group.iam_role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - ] - }, - { - rolearn = module.self_managed_node_group.iam_role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - ] - }, - { - rolearn = module.fargate_profile.fargate_profile_pod_execution_role_arn - username = "system:node:{{SessionName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - "system:node-proxier", - ] - } - ] - - aws_auth_users = [ - { - userarn = "arn:aws:iam::66666666666:user/user1" - username = "user1" - groups = ["system:masters"] - }, - { - userarn = "arn:aws:iam::66666666666:user/user2" - username = "user2" - groups = ["system:masters"] - }, - ] - - aws_auth_accounts = [ - "777777777777", - "888888888888", - ] - - tags = local.tags -} - -################################################################################ -# Sub-Module Usage on Existing/Separate Cluster -################################################################################ - -module "eks_managed_node_group" { - source = "../../modules/eks-managed-node-group" - - name = "separate-eks-mng" - cluster_name = module.eks.cluster_name - cluster_version = module.eks.cluster_version - - subnet_ids = module.vpc.private_subnets - cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id - vpc_security_group_ids = [ - module.eks.cluster_security_group_id, - ] - - ami_type = "BOTTLEROCKET_x86_64" - platform = "bottlerocket" - - # this will get added to what AWS provides - bootstrap_extra_args = <<-EOT - # extra args added - [settings.kernel] - lockdown = "integrity" - - [settings.kubernetes.node-labels] - "label1" = "foo" - "label2" = "bar" - EOT - - tags = merge(local.tags, { Separate = "eks-managed-node-group" }) -} - -module "self_managed_node_group" { - source = "../../modules/self-managed-node-group" - - name = "separate-self-mng" - cluster_name = module.eks.cluster_name - cluster_version = module.eks.cluster_version - cluster_endpoint = module.eks.cluster_endpoint - cluster_auth_base64 = module.eks.cluster_certificate_authority_data - - instance_type = "m5.large" - - subnet_ids = module.vpc.private_subnets - vpc_security_group_ids = [ - module.eks.cluster_primary_security_group_id, - module.eks.cluster_security_group_id, - ] - - tags = merge(local.tags, { Separate = "self-managed-node-group" }) -} - -module "fargate_profile" { - source = "../../modules/fargate-profile" - - name = "separate-fargate-profile" - cluster_name = module.eks.cluster_name - - subnet_ids = module.vpc.private_subnets - selectors = [{ - namespace = "kube-system" - }] - - tags = merge(local.tags, { Separate = "fargate-profile" }) -} - -################################################################################ -# Disabled creation -################################################################################ - -module "disabled_eks" { - source = "../.." - - create = false -} - -module "disabled_fargate_profile" { - source = "../../modules/fargate-profile" - - create = false -} - -module "disabled_eks_managed_node_group" { - source = "../../modules/eks-managed-node-group" - - create = false -} - -module "disabled_self_managed_node_group" { - source = "../../modules/self-managed-node-group" - - create = false -} - -################################################################################ -# Supporting resources -################################################################################ - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 4.0" - - name = local.name - cidr = local.vpc_cidr - - azs = local.azs - private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] - public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] - intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)] - - enable_nat_gateway = true - single_nat_gateway = true - - public_subnet_tags = { - "kubernetes.io/role/elb" = 1 - } - - private_subnet_tags = { - "kubernetes.io/role/internal-elb" = 1 - } - - tags = local.tags -} - -resource "aws_security_group" "additional" { - name_prefix = "${local.name}-additional" - vpc_id = module.vpc.vpc_id - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = [ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - ] - } - - tags = merge(local.tags, { Name = "${local.name}-additional" }) -} - -resource "aws_iam_policy" "additional" { - name = "${local.name}-additional" - - policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Action = [ - "ec2:Describe*", - ] - Effect = "Allow" - Resource = "*" - }, - ] - }) -} - -module "kms" { - source = "terraform-aws-modules/kms/aws" - version = "~> 1.5" - - aliases = ["eks/${local.name}"] - description = "${local.name} cluster encryption key" - enable_default_policy = true - key_owners = [data.aws_caller_identity.current.arn] - - tags = local.tags -} diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf deleted file mode 100644 index c1020f3333..0000000000 --- a/examples/complete/outputs.tf +++ /dev/null @@ -1,192 +0,0 @@ -################################################################################ -# Cluster -################################################################################ - -output "cluster_arn" { - description = "The Amazon Resource Name (ARN) of the cluster" - value = module.eks.cluster_arn -} - -output "cluster_certificate_authority_data" { - description = "Base64 encoded certificate data required to communicate with the cluster" - value = module.eks.cluster_certificate_authority_data -} - -output "cluster_endpoint" { - description = "Endpoint for your Kubernetes API server" - value = module.eks.cluster_endpoint -} - -output "cluster_id" { - description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts" - value = module.eks.cluster_id -} - -output "cluster_name" { - description = "The name of the EKS cluster" - value = module.eks.cluster_name -} - -output "cluster_oidc_issuer_url" { - description = "The URL on the EKS cluster for the OpenID Connect identity provider" - value = module.eks.cluster_oidc_issuer_url -} - -output "cluster_platform_version" { - description = "Platform version for the cluster" - value = module.eks.cluster_platform_version -} - -output "cluster_status" { - description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`" - value = module.eks.cluster_status -} - -output "cluster_security_group_id" { - description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console" - value = module.eks.cluster_security_group_id -} - -################################################################################ -# KMS Key -################################################################################ - -output "kms_key_arn" { - description = "The Amazon Resource Name (ARN) of the key" - value = module.eks.kms_key_arn -} - -output "kms_key_id" { - description = "The globally unique identifier for the key" - value = module.eks.kms_key_id -} - -output "kms_key_policy" { - description = "The IAM resource policy set on the key" - value = module.eks.kms_key_policy -} - -################################################################################ -# Security Group -################################################################################ - -output "cluster_security_group_arn" { - description = "Amazon Resource Name (ARN) of the cluster security group" - value = module.eks.cluster_security_group_arn -} - -################################################################################ -# IRSA -################################################################################ - -output "oidc_provider" { - description = "The OpenID Connect identity provider (issuer URL without leading `https://`)" - value = module.eks.oidc_provider -} - -output "oidc_provider_arn" { - description = "The ARN of the OIDC Provider if `enable_irsa = true`" - value = module.eks.oidc_provider_arn -} - -output "cluster_tls_certificate_sha1_fingerprint" { - description = "The SHA1 fingerprint of the public key of the cluster's certificate" - value = module.eks.cluster_tls_certificate_sha1_fingerprint -} - -################################################################################ -# IAM Role -################################################################################ - -output "cluster_iam_role_name" { - description = "IAM role name of the EKS cluster" - value = module.eks.cluster_iam_role_name -} - -output "cluster_iam_role_arn" { - description = "IAM role ARN of the EKS cluster" - value = module.eks.cluster_iam_role_arn -} - -output "cluster_iam_role_unique_id" { - description = "Stable and unique string identifying the IAM role" - value = module.eks.cluster_iam_role_unique_id -} - -################################################################################ -# EKS Addons -################################################################################ - -output "cluster_addons" { - description = "Map of attribute maps for all EKS cluster addons enabled" - value = module.eks.cluster_addons -} - -################################################################################ -# EKS Identity Provider -################################################################################ - -output "cluster_identity_providers" { - description = "Map of attribute maps for all EKS identity providers enabled" - value = module.eks.cluster_identity_providers -} - -################################################################################ -# CloudWatch Log Group -################################################################################ - -output "cloudwatch_log_group_name" { - description = "Name of cloudwatch log group created" - value = module.eks.cloudwatch_log_group_name -} - -output "cloudwatch_log_group_arn" { - description = "Arn of cloudwatch log group created" - value = module.eks.cloudwatch_log_group_arn -} - -################################################################################ -# Fargate Profile -################################################################################ - -output "fargate_profiles" { - description = "Map of attribute maps for all EKS Fargate Profiles created" - value = module.eks.fargate_profiles -} - -################################################################################ -# EKS Managed Node Group -################################################################################ - -output "eks_managed_node_groups" { - description = "Map of attribute maps for all EKS managed node groups created" - value = module.eks.eks_managed_node_groups -} - -output "eks_managed_node_groups_autoscaling_group_names" { - description = "List of the autoscaling group names created by EKS managed node groups" - value = module.eks.eks_managed_node_groups_autoscaling_group_names -} - -################################################################################ -# Self Managed Node Group -################################################################################ - -output "self_managed_node_groups" { - description = "Map of attribute maps for all self managed node groups created" - value = module.eks.self_managed_node_groups -} - -output "self_managed_node_groups_autoscaling_group_names" { - description = "List of the autoscaling group names created by self-managed node groups" - value = module.eks.self_managed_node_groups_autoscaling_group_names -} - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} diff --git a/examples/complete/versions.tf b/examples/complete/versions.tf deleted file mode 100644 index aeb892f359..0000000000 --- a/examples/complete/versions.tf +++ /dev/null @@ -1,14 +0,0 @@ -terraform { - required_version = ">= 1.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 4.47" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } - } -} diff --git a/examples/eks_managed_node_group/README.md b/examples/eks_managed_node_group/README.md index 090273396c..f055d49f87 100644 --- a/examples/eks_managed_node_group/README.md +++ b/examples/eks_managed_node_group/README.md @@ -29,25 +29,26 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.47 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [aws](#requirement\_aws) | >= 5.40 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.47 | +| [aws](#provider\_aws) | >= 5.40 | ## Modules | Name | Source | Version | |------|--------|---------| -| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 1.5 | +| [disabled\_eks](#module\_disabled\_eks) | ../.. | n/a | +| [disabled\_eks\_managed\_node\_group](#module\_disabled\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a | +| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 2.1 | | [eks](#module\_eks) | ../.. | n/a | +| [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a | | [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 | -| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 4.0 | -| [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 5.0 | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 | ## Resources @@ -55,6 +56,7 @@ Note that this example may create resources which cost money. Run `terraform des |------|------| | [aws_autoscaling_group_tag.cluster_autoscaler_label_tags](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group_tag) | resource | | [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_security_group.remote_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_ami.eks_default_arm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | @@ -70,7 +72,7 @@ No inputs. | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | @@ -82,12 +84,14 @@ No inputs. | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | | [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_ip\_family](#output\_cluster\_ip\_family) | The IP family used by the cluster (e.g. `ipv4` or `ipv6`) | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | | [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | | [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group | +| [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block where Kubernetes pod and service IP addresses are assigned from | | [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | | [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | | [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created | diff --git a/examples/eks_managed_node_group/main.tf b/examples/eks_managed_node_group/main.tf index 1fb1b8b723..2f1202f5c5 100644 --- a/examples/eks_managed_node_group/main.tf +++ b/examples/eks_managed_node_group/main.tf @@ -2,24 +2,12 @@ provider "aws" { region = local.region } -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - data "aws_caller_identity" "current" {} data "aws_availability_zones" "available" {} locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" - cluster_version = "1.27" + cluster_version = "1.29" region = "eu-west-1" vpc_cidr = "10.0.0.0/16" @@ -44,16 +32,15 @@ module "eks" { cluster_endpoint_public_access = true # IPV6 - cluster_ip_family = "ipv6" - - # We are using the IRSA created below for permissions - # However, we have to deploy with the policy attached FIRST (when creating a fresh cluster) - # and then turn this off after the cluster/node group is created. Without this initial policy, - # the VPC CNI fails to assign IPs and nodes cannot join the cluster - # See https://github.com/aws/containers-roadmap/issues/1666 for more context - # TODO - remove this policy once AWS releases a managed version similar to AmazonEKS_CNI_Policy (IPv4) + cluster_ip_family = "ipv6" create_cni_ipv6_iam_policy = true + enable_cluster_creator_admin_permissions = true + + # Enable EFA support by adding necessary security group rules + # to the shared node security group + enable_efa_support = true + cluster_addons = { coredns = { most_recent = true @@ -62,9 +49,8 @@ module "eks" { most_recent = true } vpc-cni = { - most_recent = true - before_compute = true - service_account_role_arn = module.vpc_cni_irsa.iam_role_arn + most_recent = true + before_compute = true configuration_values = jsonencode({ env = { # Reference docs https://docs.aws.amazon.com/eks/latest/userguide/cni-increase-ip-addresses.html @@ -79,18 +65,9 @@ module "eks" { subnet_ids = module.vpc.private_subnets control_plane_subnet_ids = module.vpc.intra_subnets - manage_aws_auth_configmap = true - eks_managed_node_group_defaults = { ami_type = "AL2_x86_64" instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] - - # We are using the IRSA created below for permissions - # However, we have to deploy with the policy attached FIRST (when creating a fresh cluster) - # and then turn this off after the cluster/node group is created. Without this initial policy, - # the VPC CNI fails to assign IPs and nodes cannot join the cluster - # See https://github.com/aws/containers-roadmap/issues/1666 for more context - iam_role_attach_cni_policy = true } eks_managed_node_groups = { @@ -109,6 +86,31 @@ module "eks" { } } + # AL2023 node group utilizing new user data format which utilizes nodeadm + # to join nodes to the cluster (instead of /etc/eks/bootstrap.sh) + al2023_nodeadm = { + ami_type = "AL2023_x86_64_STANDARD" + + use_latest_ami_release_version = true + + cloudinit_pre_nodeadm = [ + { + content_type = "application/node.eks.aws" + content = <<-EOT + --- + apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + EOT + } + ] + } + # Default node group - as provided by AWS EKS using Bottlerocket bottlerocket_default = { # By default, the module creates a launch template to ensure tags are propagated to instances, etc., @@ -116,13 +118,13 @@ module "eks" { use_custom_launch_template = false ami_type = "BOTTLEROCKET_x86_64" - platform = "bottlerocket" } # Adds to the AWS provided user data bottlerocket_add = { ami_type = "BOTTLEROCKET_x86_64" - platform = "bottlerocket" + + use_latest_ami_release_version = true # This will get added to what AWS provides bootstrap_extra_args = <<-EOT @@ -136,7 +138,7 @@ module "eks" { bottlerocket_custom = { # Current bottlerocket AMI ami_id = data.aws_ami.eks_default_bottlerocket.image_id - platform = "bottlerocket" + ami_type = "BOTTLEROCKET_x86_64" # Use module user data template to bootstrap enable_bootstrap_user_data = true @@ -264,43 +266,134 @@ module "eks" { additional = aws_iam_policy.node_additional.arn } - schedules = { - scale-up = { - min_size = 2 - max_size = "-1" # Retains current max size - desired_size = 2 - start_time = "2023-03-05T00:00:00Z" - end_time = "2024-03-05T00:00:00Z" - timezone = "Etc/GMT+0" - recurrence = "0 0 * * *" - }, - scale-down = { - min_size = 0 - max_size = "-1" # Retains current max size - desired_size = 0 - start_time = "2023-03-05T12:00:00Z" - end_time = "2024-03-05T12:00:00Z" - timezone = "Etc/GMT+0" - recurrence = "0 12 * * *" - } + launch_template_tags = { + # enable discovery of autoscaling groups by cluster-autoscaler + "k8s.io/cluster-autoscaler/enabled" : true, + "k8s.io/cluster-autoscaler/${local.name}" : "owned", } tags = { ExtraTag = "EKS managed node group complete example" } } + + efa = { + # Disabling automatic creation due to instance type/quota availability + # Can be enabled when appropriate for testing/validation + create = false + + ami_type = "AL2_x86_64_GPU" + instance_types = ["trn1n.32xlarge"] + + enable_efa_support = true + pre_bootstrap_user_data = <<-EOT + # Mount NVME instance store volumes since they are typically + # available on instances that support EFA + setup-local-disks raid0 + EOT + + min_size = 2 + max_size = 2 + desired_size = 2 + } + } + + access_entries = { + # One access entry with a policy associated + ex-single = { + kubernetes_groups = [] + principal_arn = aws_iam_role.this["single"].arn + + policy_associations = { + single = { + policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" + access_scope = { + namespaces = ["default"] + type = "namespace" + } + } + } + } + + # Example of adding multiple policies to a single access entry + ex-multiple = { + kubernetes_groups = [] + principal_arn = aws_iam_role.this["multiple"].arn + + policy_associations = { + ex-one = { + policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSEditPolicy" + access_scope = { + namespaces = ["default"] + type = "namespace" + } + } + ex-two = { + policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" + access_scope = { + type = "cluster" + } + } + } + } } tags = local.tags } +module "disabled_eks" { + source = "../.." + + create = false +} + +################################################################################ +# Sub-Module Usage on Existing/Separate Cluster +################################################################################ + +module "eks_managed_node_group" { + source = "../../modules/eks-managed-node-group" + + name = "separate-eks-mng" + cluster_name = module.eks.cluster_name + cluster_ip_family = module.eks.cluster_ip_family + cluster_service_cidr = module.eks.cluster_service_cidr + + subnet_ids = module.vpc.private_subnets + cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id + vpc_security_group_ids = [ + module.eks.node_security_group_id, + ] + + ami_type = "BOTTLEROCKET_x86_64" + + # this will get added to what AWS provides + bootstrap_extra_args = <<-EOT + # extra args added + [settings.kernel] + lockdown = "integrity" + + [settings.kubernetes.node-labels] + "label1" = "foo" + "label2" = "bar" + EOT + + tags = merge(local.tags, { Separate = "eks-managed-node-group" }) +} + +module "disabled_eks_managed_node_group" { + source = "../../modules/eks-managed-node-group" + + create = false +} + ################################################################################ # Supporting Resources ################################################################################ module "vpc" { source = "terraform-aws-modules/vpc/aws" - version = "~> 4.0" + version = "~> 5.0" name = local.name cidr = local.vpc_cidr @@ -333,27 +426,9 @@ module "vpc" { tags = local.tags } -module "vpc_cni_irsa" { - source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" - version = "~> 5.0" - - role_name_prefix = "VPC-CNI-IRSA" - attach_vpc_cni_policy = true - vpc_cni_enable_ipv6 = true - - oidc_providers = { - main = { - provider_arn = module.eks.oidc_provider_arn - namespace_service_accounts = ["kube-system:aws-node"] - } - } - - tags = local.tags -} - module "ebs_kms_key" { source = "terraform-aws-modules/kms/aws" - version = "~> 1.5" + version = "~> 2.1" description = "Customer managed key to encrypt EKS managed node group volumes" @@ -459,51 +534,25 @@ data "aws_ami" "eks_default_bottlerocket" { } } -################################################################################ -# Tags for the ASG to support cluster-autoscaler scale up from 0 -################################################################################ - -locals { - - # We need to lookup K8s taint effect from the AWS API value - taint_effects = { - NO_SCHEDULE = "NoSchedule" - NO_EXECUTE = "NoExecute" - PREFER_NO_SCHEDULE = "PreferNoSchedule" - } - - cluster_autoscaler_label_tags = merge([ - for name, group in module.eks.eks_managed_node_groups : { - for label_name, label_value in coalesce(group.node_group_labels, {}) : "${name}|label|${label_name}" => { - autoscaling_group = group.node_group_autoscaling_group_names[0], - key = "k8s.io/cluster-autoscaler/node-template/label/${label_name}", - value = label_value, - } - } - ]...) - - cluster_autoscaler_taint_tags = merge([ - for name, group in module.eks.eks_managed_node_groups : { - for taint in coalesce(group.node_group_taints, []) : "${name}|taint|${taint.key}" => { - autoscaling_group = group.node_group_autoscaling_group_names[0], - key = "k8s.io/cluster-autoscaler/node-template/taint/${taint.key}" - value = "${taint.value}:${local.taint_effects[taint.effect]}" - } - } - ]...) - - cluster_autoscaler_asg_tags = merge(local.cluster_autoscaler_label_tags, local.cluster_autoscaler_taint_tags) -} - -resource "aws_autoscaling_group_tag" "cluster_autoscaler_label_tags" { - for_each = local.cluster_autoscaler_asg_tags +resource "aws_iam_role" "this" { + for_each = toset(["single", "multiple"]) - autoscaling_group_name = each.value.autoscaling_group + name = "ex-${each.key}" - tag { - key = each.value.key - value = each.value.value + # Just using for this example + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Sid = "Example" + Principal = { + Service = "ec2.amazonaws.com" + } + }, + ] + }) - propagate_at_launch = false - } + tags = local.tags } diff --git a/examples/eks_managed_node_group/outputs.tf b/examples/eks_managed_node_group/outputs.tf index 43334ecc0a..24183fd207 100644 --- a/examples/eks_managed_node_group/outputs.tf +++ b/examples/eks_managed_node_group/outputs.tf @@ -47,6 +47,25 @@ output "cluster_primary_security_group_id" { value = module.eks.cluster_primary_security_group_id } +output "cluster_service_cidr" { + description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from" + value = module.eks.cluster_service_cidr +} + +output "cluster_ip_family" { + description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)" + value = module.eks.cluster_ip_family +} + +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = module.eks.access_entries +} + ################################################################################ # KMS Key ################################################################################ @@ -200,12 +219,3 @@ output "self_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by self-managed node groups" value = module.eks.self_managed_node_groups_autoscaling_group_names } - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks_managed_node_group/versions.tf index aeb892f359..6f83215f50 100644 --- a/examples/eks_managed_node_group/versions.tf +++ b/examples/eks_managed_node_group/versions.tf @@ -1,14 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.47" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" + version = ">= 5.40" } } } diff --git a/examples/fargate_profile/README.md b/examples/fargate_profile/README.md index 6ca254df27..604fd3a8ca 100644 --- a/examples/fargate_profile/README.md +++ b/examples/fargate_profile/README.md @@ -19,23 +19,23 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.47 | -| [helm](#requirement\_helm) | >= 2.7 | -| [null](#requirement\_null) | >= 3.0 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [aws](#requirement\_aws) | >= 5.40 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.47 | +| [aws](#provider\_aws) | >= 5.40 | ## Modules | Name | Source | Version | |------|--------|---------| +| [disabled\_fargate\_profile](#module\_disabled\_fargate\_profile) | ../../modules/fargate-profile | n/a | | [eks](#module\_eks) | ../.. | n/a | -| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 4.0 | +| [fargate\_profile](#module\_fargate\_profile) | ../../modules/fargate-profile | n/a | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 | ## Resources @@ -52,7 +52,7 @@ No inputs. | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | @@ -64,12 +64,14 @@ No inputs. | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | | [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_ip\_family](#output\_cluster\_ip\_family) | The IP family used by the cluster (e.g. `ipv4` or `ipv6`) | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | | [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | | [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group | +| [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block where Kubernetes pod and service IP addresses are assigned from | | [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | | [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | | [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created | diff --git a/examples/fargate_profile/main.tf b/examples/fargate_profile/main.tf index 750a2c6edd..16fe82c1bc 100644 --- a/examples/fargate_profile/main.tf +++ b/examples/fargate_profile/main.tf @@ -6,7 +6,7 @@ data "aws_availability_zones" "available" {} locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" - cluster_version = "1.27" + cluster_version = "1.29" region = "eu-west-1" vpc_cidr = "10.0.0.0/16" @@ -35,7 +35,7 @@ module "eks" { vpc-cni = {} coredns = { configuration_values = jsonencode({ - computeType = "Fargate" + computeType = "fargate" }) } } @@ -70,41 +70,56 @@ module "eks" { Application = "app-wildcard" } } - } - }, - { for i in range(3) : - "kube-system-${element(split("-", local.azs[i]), 2)}" => { - selectors = [ - { namespace = "kube-system" } - ] - # We want to create a profile per AZ for high availability - subnet_ids = [element(module.vpc.private_subnets, i)] - } + ] + + # Using specific subnets instead of the subnets supplied for the cluster itself + subnet_ids = [module.vpc.private_subnets[1]] - timeouts = { - create = "20m" - delete = "20m" + tags = { + Owner = "secondary" } } - - kube_system = { - name = "kube-system" + kube-system = { selectors = [ { namespace = "kube-system" } ] } - ) + } tags = local.tags } +################################################################################ +# Sub-Module Usage on Existing/Separate Cluster +################################################################################ + +module "fargate_profile" { + source = "../../modules/fargate-profile" + + name = "separate-fargate-profile" + cluster_name = module.eks.cluster_name + + subnet_ids = module.vpc.private_subnets + selectors = [{ + namespace = "kube-system" + }] + + tags = merge(local.tags, { Separate = "fargate-profile" }) +} + +module "disabled_fargate_profile" { + source = "../../modules/fargate-profile" + + create = false +} + ################################################################################ # Supporting Resources ################################################################################ module "vpc" { source = "terraform-aws-modules/vpc/aws" - version = "~> 4.0" + version = "~> 5.0" name = local.name cidr = local.vpc_cidr diff --git a/examples/fargate_profile/outputs.tf b/examples/fargate_profile/outputs.tf index 43334ecc0a..24183fd207 100644 --- a/examples/fargate_profile/outputs.tf +++ b/examples/fargate_profile/outputs.tf @@ -47,6 +47,25 @@ output "cluster_primary_security_group_id" { value = module.eks.cluster_primary_security_group_id } +output "cluster_service_cidr" { + description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from" + value = module.eks.cluster_service_cidr +} + +output "cluster_ip_family" { + description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)" + value = module.eks.cluster_ip_family +} + +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = module.eks.access_entries +} + ################################################################################ # KMS Key ################################################################################ @@ -200,12 +219,3 @@ output "self_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by self-managed node groups" value = module.eks.self_managed_node_groups_autoscaling_group_names } - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} diff --git a/examples/fargate_profile/versions.tf b/examples/fargate_profile/versions.tf index 17db7d8988..6f83215f50 100644 --- a/examples/fargate_profile/versions.tf +++ b/examples/fargate_profile/versions.tf @@ -1,18 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.47" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.7" - } - null = { - source = "hashicorp/null" - version = ">= 3.0" + version = ">= 5.40" } } } diff --git a/examples/karpenter/README.md b/examples/karpenter/README.md index d336b10b9d..def4eb9f21 100644 --- a/examples/karpenter/README.md +++ b/examples/karpenter/README.md @@ -1,6 +1,6 @@ # Karpenter Example -Configuration in this directory creates an AWS EKS cluster with [Karpenter](https://karpenter.sh/) provisioned for managing compute resource scaling. In the example provided, Karpenter is running on EKS Fargate yet Karpenter is providing compute in the form of EC2 instances. +Configuration in this directory creates an AWS EKS cluster with [Karpenter](https://karpenter.sh/) provisioned for managing compute resource scaling. In the example provided, Karpenter is provisioned on top of an EKS Managed Node Group. ## Usage @@ -22,10 +22,47 @@ aws eks --region eu-west-1 update-kubeconfig --name ex-karpenter kubectl scale deployment inflate --replicas 5 # You can watch Karpenter's controller logs with -kubectl logs -f -n karpenter -l app.kubernetes.io/name=karpenter -c controller +kubectl logs -f -n kube-system -l app.kubernetes.io/name=karpenter -c controller ``` -You should see a new node named `karpenter.sh/provisioner-name/default` eventually come up in the console; this was provisioned by Karpenter in response to the scaled deployment above. +Validate if the Amazon EKS Addons Pods are running in the Managed Node Group and the `inflate` application Pods are running on Karpenter provisioned Nodes. + +```bash +kubectl get nodes -L karpenter.sh/registered +``` + +```text +NAME STATUS ROLES AGE VERSION REGISTERED +ip-10-0-16-155.eu-west-1.compute.internal Ready 100s v1.29.3-eks-ae9a62a true +ip-10-0-3-23.eu-west-1.compute.internal Ready 6m1s v1.29.3-eks-ae9a62a +ip-10-0-41-2.eu-west-1.compute.internal Ready 6m3s v1.29.3-eks-ae9a62a +``` + +```sh +kubectl get pods -A -o custom-columns=NAME:.metadata.name,NODE:.spec.nodeName +``` + +```text +NAME NODE +inflate-75d744d4c6-nqwz8 ip-10-0-16-155.eu-west-1.compute.internal +inflate-75d744d4c6-nrqnn ip-10-0-16-155.eu-west-1.compute.internal +inflate-75d744d4c6-sp4dx ip-10-0-16-155.eu-west-1.compute.internal +inflate-75d744d4c6-xqzd9 ip-10-0-16-155.eu-west-1.compute.internal +inflate-75d744d4c6-xr6p5 ip-10-0-16-155.eu-west-1.compute.internal +aws-node-mnn7r ip-10-0-3-23.eu-west-1.compute.internal +aws-node-rkmvm ip-10-0-16-155.eu-west-1.compute.internal +aws-node-s4slh ip-10-0-41-2.eu-west-1.compute.internal +coredns-68bd859788-7rcfq ip-10-0-3-23.eu-west-1.compute.internal +coredns-68bd859788-l78hw ip-10-0-41-2.eu-west-1.compute.internal +eks-pod-identity-agent-gbx8l ip-10-0-41-2.eu-west-1.compute.internal +eks-pod-identity-agent-s7vt7 ip-10-0-16-155.eu-west-1.compute.internal +eks-pod-identity-agent-xwgqw ip-10-0-3-23.eu-west-1.compute.internal +karpenter-79f59bdfdc-9q5ff ip-10-0-41-2.eu-west-1.compute.internal +karpenter-79f59bdfdc-cxvhr ip-10-0-3-23.eu-west-1.compute.internal +kube-proxy-7crbl ip-10-0-41-2.eu-west-1.compute.internal +kube-proxy-jtzds ip-10-0-16-155.eu-west-1.compute.internal +kube-proxy-sm42c ip-10-0-3-23.eu-west-1.compute.internal +``` ### Tear Down & Clean-Up @@ -51,21 +88,19 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.47 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [aws](#requirement\_aws) | >= 5.40 | | [helm](#requirement\_helm) | >= 2.7 | -| [kubectl](#requirement\_kubectl) | >= 1.14 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | -| [null](#requirement\_null) | >= 3.0 | +| [kubectl](#requirement\_kubectl) | >= 2.0 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.47 | -| [aws.virginia](#provider\_aws.virginia) | >= 4.47 | +| [aws](#provider\_aws) | >= 5.40 | +| [aws.virginia](#provider\_aws.virginia) | >= 5.40 | | [helm](#provider\_helm) | >= 2.7 | -| [kubectl](#provider\_kubectl) | >= 1.14 | +| [kubectl](#provider\_kubectl) | >= 2.0 | ## Modules @@ -73,16 +108,17 @@ Note that this example may create resources which cost money. Run `terraform des |------|--------|---------| | [eks](#module\_eks) | ../.. | n/a | | [karpenter](#module\_karpenter) | ../../modules/karpenter | n/a | -| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 4.0 | +| [karpenter\_disabled](#module\_karpenter\_disabled) | ../../modules/karpenter | n/a | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 | ## Resources | Name | Type | |------|------| | [helm_release.karpenter](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | -| [kubectl_manifest.karpenter_example_deployment](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource | -| [kubectl_manifest.karpenter_node_template](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource | -| [kubectl_manifest.karpenter_provisioner](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource | +| [kubectl_manifest.karpenter_example_deployment](https://registry.terraform.io/providers/alekc/kubectl/latest/docs/resources/manifest) | resource | +| [kubectl_manifest.karpenter_node_class](https://registry.terraform.io/providers/alekc/kubectl/latest/docs/resources/manifest) | resource | +| [kubectl_manifest.karpenter_node_pool](https://registry.terraform.io/providers/alekc/kubectl/latest/docs/resources/manifest) | resource | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | | [aws_ecrpublic_authorization_token.token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecrpublic_authorization_token) | data source | @@ -94,7 +130,7 @@ No inputs. | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | @@ -106,31 +142,33 @@ No inputs. | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | | [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_ip\_family](#output\_cluster\_ip\_family) | The IP family used by the cluster (e.g. `ipv4` or `ipv6`) | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | | [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | | [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group | +| [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block where Kubernetes pod and service IP addresses are assigned from | | [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | | [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | | [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created | | [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups | | [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created | | [karpenter\_event\_rules](#output\_karpenter\_event\_rules) | Map of the event rules created and their attributes | +| [karpenter\_iam\_role\_arn](#output\_karpenter\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the controller IAM role | +| [karpenter\_iam\_role\_name](#output\_karpenter\_iam\_role\_name) | The name of the controller IAM role | +| [karpenter\_iam\_role\_unique\_id](#output\_karpenter\_iam\_role\_unique\_id) | Stable and unique string identifying the controller IAM role | | [karpenter\_instance\_profile\_arn](#output\_karpenter\_instance\_profile\_arn) | ARN assigned by AWS to the instance profile | | [karpenter\_instance\_profile\_id](#output\_karpenter\_instance\_profile\_id) | Instance profile's ID | | [karpenter\_instance\_profile\_name](#output\_karpenter\_instance\_profile\_name) | Name of the instance profile | | [karpenter\_instance\_profile\_unique](#output\_karpenter\_instance\_profile\_unique) | Stable and unique string identifying the IAM instance profile | -| [karpenter\_irsa\_arn](#output\_karpenter\_irsa\_arn) | The Amazon Resource Name (ARN) specifying the IAM role for service accounts | -| [karpenter\_irsa\_name](#output\_karpenter\_irsa\_name) | The name of the IAM role for service accounts | -| [karpenter\_irsa\_unique\_id](#output\_karpenter\_irsa\_unique\_id) | Stable and unique string identifying the IAM role for service accounts | +| [karpenter\_node\_iam\_role\_arn](#output\_karpenter\_node\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role | +| [karpenter\_node\_iam\_role\_name](#output\_karpenter\_node\_iam\_role\_name) | The name of the IAM role | +| [karpenter\_node\_iam\_role\_unique\_id](#output\_karpenter\_node\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | | [karpenter\_queue\_arn](#output\_karpenter\_queue\_arn) | The ARN of the SQS queue | | [karpenter\_queue\_name](#output\_karpenter\_queue\_name) | The name of the created Amazon SQS queue | | [karpenter\_queue\_url](#output\_karpenter\_queue\_url) | The URL for the created Amazon SQS queue | -| [karpenter\_role\_arn](#output\_karpenter\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role | -| [karpenter\_role\_name](#output\_karpenter\_role\_name) | The name of the IAM role | -| [karpenter\_role\_unique\_id](#output\_karpenter\_role\_unique\_id) | Stable and unique string identifying the IAM role | | [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group | | [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group | | [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) | diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf index 86fc0a4f3b..b3db54453e 100644 --- a/examples/karpenter/main.tf +++ b/examples/karpenter/main.tf @@ -7,18 +7,6 @@ provider "aws" { alias = "virginia" } -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - provider "helm" { kubernetes { host = module.eks.cluster_endpoint @@ -53,9 +41,8 @@ data "aws_ecrpublic_authorization_token" "token" { } locals { - name = "ex-${replace(basename(path.cwd), "_", "-")}" - cluster_version = "1.27" - region = "eu-west-1" + name = "ex-${basename(path.cwd)}" + region = "eu-west-1" vpc_cidr = "10.0.0.0/16" azs = slice(data.aws_availability_zones.available.names, 0, 3) @@ -74,67 +61,42 @@ locals { module "eks" { source = "../.." - cluster_name = local.name - cluster_version = local.cluster_version - cluster_endpoint_public_access = true + cluster_name = local.name + cluster_version = "1.29" + + # Gives Terraform identity admin access to cluster which will + # allow deploying resources (Karpenter) into the cluster + enable_cluster_creator_admin_permissions = true + cluster_endpoint_public_access = true cluster_addons = { - kube-proxy = {} - vpc-cni = {} - coredns = { - configuration_values = jsonencode({ - computeType = "Fargate" - # Ensure that we fully utilize the minimum amount of resources that are supplied by - # Fargate https://docs.aws.amazon.com/eks/latest/userguide/fargate-pod-configuration.html - # Fargate adds 256 MB to each pod's memory reservation for the required Kubernetes - # components (kubelet, kube-proxy, and containerd). Fargate rounds up to the following - # compute configuration that most closely matches the sum of vCPU and memory requests in - # order to ensure pods always have the resources that they need to run. - resources = { - limits = { - cpu = "0.25" - # We are targeting the smallest Task size of 512Mb, so we subtract 256Mb from the - # request/limit to ensure we can fit within that task - memory = "256M" - } - requests = { - cpu = "0.25" - # We are targeting the smallest Task size of 512Mb, so we subtract 256Mb from the - # request/limit to ensure we can fit within that task - memory = "256M" - } - } - }) - } + coredns = {} + eks-pod-identity-agent = {} + kube-proxy = {} + vpc-cni = {} } vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets control_plane_subnet_ids = module.vpc.intra_subnets - manage_aws_auth_configmap = true - aws_auth_roles = [ - # We need to add in the Karpenter node IAM role for nodes launched by Karpenter - { - rolearn = module.karpenter.role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - ] - }, - ] - - fargate_profiles = { + eks_managed_node_groups = { karpenter = { - selectors = [ - { namespace = "karpenter" } - ] - } - kube-system = { - selectors = [ - { namespace = "kube-system" } - ] + instance_types = ["m5.large"] + + min_size = 2 + max_size = 3 + desired_size = 2 + + taints = { + # This Taint aims to keep just EKS Addons and Karpenter running on this MNG + # The pods that do not tolerate this taint should run on nodes created by Karpenter + addons = { + key = "CriticalAddonsOnly" + value = "true" + effect = "NO_SCHEDULE" + }, + } } } @@ -153,70 +115,69 @@ module "eks" { module "karpenter" { source = "../../modules/karpenter" - cluster_name = module.eks.cluster_name - irsa_oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name + + enable_pod_identity = true + create_pod_identity_association = true - policies = { + # Used to attach additional IAM policies to the Karpenter node IAM role + node_iam_role_additional_policies = { AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" } tags = local.tags } -resource "helm_release" "karpenter" { - namespace = "karpenter" - create_namespace = true +module "karpenter_disabled" { + source = "../../modules/karpenter" + + create = false +} + +################################################################################ +# Karpenter Helm chart & manifests +# Not required; just to demonstrate functionality of the sub-module +################################################################################ +resource "helm_release" "karpenter" { + namespace = "kube-system" name = "karpenter" repository = "oci://public.ecr.aws/karpenter" repository_username = data.aws_ecrpublic_authorization_token.token.user_name repository_password = data.aws_ecrpublic_authorization_token.token.password chart = "karpenter" - version = "v0.19.3" - - set { - name = "settings.aws.clusterName" - value = module.eks.cluster_name - } - - set { - name = "settings.aws.clusterEndpoint" - value = module.eks.cluster_endpoint - } - - set { - name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" - value = module.karpenter.irsa_arn - } - - set { - name = "settings.aws.defaultInstanceProfile" - value = module.karpenter.instance_profile_name - } - - set { - name = "settings.aws.interruptionQueueName" - value = module.karpenter.queue_name - } + version = "0.36.1" + wait = false + + values = [ + <<-EOT + serviceAccount: + name: ${module.karpenter.service_account} + settings: + clusterName: ${module.eks.cluster_name} + clusterEndpoint: ${module.eks.cluster_endpoint} + interruptionQueue: ${module.karpenter.queue_name} + EOT + ] } -resource "kubectl_manifest" "karpenter_provisioner" { +resource "kubectl_manifest" "karpenter_node_class" { yaml_body = <<-YAML - apiVersion: karpenter.sh/v1alpha5 - kind: Provisioner + apiVersion: karpenter.k8s.aws/v1beta1 + kind: EC2NodeClass metadata: name: default spec: - requirements: - - key: karpenter.sh/capacity-type - operator: In - values: ["spot"] - limits: - resources: - cpu: 1000 - providerRef: - name: default - ttlSecondsAfterEmpty: 30 + amiFamily: AL2 + role: ${module.karpenter.node_iam_role_name} + subnetSelectorTerms: + - tags: + karpenter.sh/discovery: ${module.eks.cluster_name} + securityGroupSelectorTerms: + - tags: + karpenter.sh/discovery: ${module.eks.cluster_name} + tags: + karpenter.sh/discovery: ${module.eks.cluster_name} YAML depends_on = [ @@ -224,23 +185,39 @@ resource "kubectl_manifest" "karpenter_provisioner" { ] } -resource "kubectl_manifest" "karpenter_node_template" { +resource "kubectl_manifest" "karpenter_node_pool" { yaml_body = <<-YAML - apiVersion: karpenter.k8s.aws/v1alpha1 - kind: AWSNodeTemplate + apiVersion: karpenter.sh/v1beta1 + kind: NodePool metadata: name: default spec: - subnetSelector: - karpenter.sh/discovery: ${module.eks.cluster_name} - securityGroupSelector: - karpenter.sh/discovery: ${module.eks.cluster_name} - tags: - karpenter.sh/discovery: ${module.eks.cluster_name} + template: + spec: + nodeClassRef: + name: default + requirements: + - key: "karpenter.k8s.aws/instance-category" + operator: In + values: ["c", "m", "r"] + - key: "karpenter.k8s.aws/instance-cpu" + operator: In + values: ["4", "8", "16", "32"] + - key: "karpenter.k8s.aws/instance-hypervisor" + operator: In + values: ["nitro"] + - key: "karpenter.k8s.aws/instance-generation" + operator: Gt + values: ["2"] + limits: + cpu: 1000 + disruption: + consolidationPolicy: WhenEmpty + consolidateAfter: 30s YAML depends_on = [ - helm_release.karpenter + kubectl_manifest.karpenter_node_class ] } @@ -282,7 +259,7 @@ resource "kubectl_manifest" "karpenter_example_deployment" { module "vpc" { source = "terraform-aws-modules/vpc/aws" - version = "~> 4.0" + version = "~> 5.0" name = local.name cidr = local.vpc_cidr diff --git a/examples/karpenter/outputs.tf b/examples/karpenter/outputs.tf index f0ad50bd6a..de0e2e6a28 100644 --- a/examples/karpenter/outputs.tf +++ b/examples/karpenter/outputs.tf @@ -47,6 +47,25 @@ output "cluster_primary_security_group_id" { value = module.eks.cluster_primary_security_group_id } +output "cluster_service_cidr" { + description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from" + value = module.eks.cluster_service_cidr +} + +output "cluster_ip_family" { + description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)" + value = module.eks.cluster_ip_family +} + +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = module.eks.access_entries +} + ################################################################################ # Security Group ################################################################################ @@ -183,31 +202,22 @@ output "self_managed_node_groups_autoscaling_group_names" { } ################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} - -################################################################################ -# IAM Role for Service Account (IRSA) +# Karpenter controller IAM Role ################################################################################ -output "karpenter_irsa_name" { - description = "The name of the IAM role for service accounts" - value = module.karpenter.irsa_name +output "karpenter_iam_role_name" { + description = "The name of the controller IAM role" + value = module.karpenter.iam_role_name } -output "karpenter_irsa_arn" { - description = "The Amazon Resource Name (ARN) specifying the IAM role for service accounts" - value = module.karpenter.irsa_arn +output "karpenter_iam_role_arn" { + description = "The Amazon Resource Name (ARN) specifying the controller IAM role" + value = module.karpenter.iam_role_arn } -output "karpenter_irsa_unique_id" { - description = "Stable and unique string identifying the IAM role for service accounts" - value = module.karpenter.irsa_unique_id +output "karpenter_iam_role_unique_id" { + description = "Stable and unique string identifying the controller IAM role" + value = module.karpenter.iam_role_unique_id } ################################################################################ @@ -242,19 +252,19 @@ output "karpenter_event_rules" { # Node IAM Role ################################################################################ -output "karpenter_role_name" { +output "karpenter_node_iam_role_name" { description = "The name of the IAM role" - value = module.karpenter.role_name + value = module.karpenter.node_iam_role_name } -output "karpenter_role_arn" { +output "karpenter_node_iam_role_arn" { description = "The Amazon Resource Name (ARN) specifying the IAM role" - value = module.karpenter.role_arn + value = module.karpenter.node_iam_role_arn } -output "karpenter_role_unique_id" { +output "karpenter_node_iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" - value = module.karpenter.role_unique_id + value = module.karpenter.node_iam_role_unique_id } ################################################################################ diff --git a/examples/karpenter/versions.tf b/examples/karpenter/versions.tf index cab7b21121..a43b64f4c4 100644 --- a/examples/karpenter/versions.tf +++ b/examples/karpenter/versions.tf @@ -1,26 +1,18 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.47" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" + version = ">= 5.40" } helm = { source = "hashicorp/helm" version = ">= 2.7" } kubectl = { - source = "gavinbunney/kubectl" - version = ">= 1.14" - } - null = { - source = "hashicorp/null" - version = ">= 3.0" + source = "alekc/kubectl" + version = ">= 2.0" } } } diff --git a/examples/outposts/README.md b/examples/outposts/README.md index 33684940ac..a454fd2c90 100644 --- a/examples/outposts/README.md +++ b/examples/outposts/README.md @@ -36,21 +36,25 @@ $ terraform apply Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. +```bash +terraform destroy +``` + ## Requirements | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.47 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [aws](#requirement\_aws) | >= 5.40 | +| [kubernetes](#requirement\_kubernetes) | >= 2.20 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.47 | -| [kubernetes](#provider\_kubernetes) | >= 2.10 | +| [aws](#provider\_aws) | >= 5.40 | +| [kubernetes](#provider\_kubernetes) | >= 2.20 | ## Modules @@ -80,7 +84,7 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | @@ -92,12 +96,14 @@ Note that this example may create resources which cost money. Run `terraform des | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | | [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_ip\_family](#output\_cluster\_ip\_family) | The IP family used by the cluster (e.g. `ipv4` or `ipv6`) | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | | [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | | [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group | +| [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block where Kubernetes pod and service IP addresses are assigned from | | [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | | [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | | [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created | diff --git a/examples/outposts/main.tf b/examples/outposts/main.tf index 1964520ab8..7b4068e0e1 100644 --- a/examples/outposts/main.tf +++ b/examples/outposts/main.tf @@ -2,21 +2,9 @@ provider "aws" { region = var.region } -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # Note: `cluster_id` is used with Outposts for auth - args = ["eks", "get-token", "--cluster-id", module.eks.cluster_id, "--region", var.region] - } -} - locals { name = "ex-${basename(path.cwd)}" - cluster_version = "1.27" # Required by EKS on Outposts + cluster_version = "1.29" outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0) instance_type = element(tolist(data.aws_outposts_outpost_instance_types.this.instance_types), 0) @@ -41,6 +29,10 @@ module "eks" { cluster_endpoint_public_access = false # Not available on Outpost cluster_endpoint_private_access = true + # Gives Terraform identity admin access to cluster which will + # allow deploying resources (EBS storage class) into the cluster + enable_cluster_creator_admin_permissions = true + vpc_id = data.aws_vpc.this.id subnet_ids = data.aws_subnets.this.ids @@ -49,9 +41,6 @@ module "eks" { outpost_arns = [local.outpost_arn] } - # Local clusters will automatically add the node group IAM role to the aws-auth configmap - manage_aws_auth_configmap = true - # Extend cluster security group rules cluster_security_group_additional_rules = { ingress_vpc_https = { diff --git a/examples/outposts/outputs.tf b/examples/outposts/outputs.tf index 43334ecc0a..24183fd207 100644 --- a/examples/outposts/outputs.tf +++ b/examples/outposts/outputs.tf @@ -47,6 +47,25 @@ output "cluster_primary_security_group_id" { value = module.eks.cluster_primary_security_group_id } +output "cluster_service_cidr" { + description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from" + value = module.eks.cluster_service_cidr +} + +output "cluster_ip_family" { + description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)" + value = module.eks.cluster_ip_family +} + +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = module.eks.access_entries +} + ################################################################################ # KMS Key ################################################################################ @@ -200,12 +219,3 @@ output "self_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by self-managed node groups" value = module.eks.self_managed_node_groups_autoscaling_group_names } - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} diff --git a/examples/outposts/prerequisites/main.tf b/examples/outposts/prerequisites/main.tf index 014418121d..66ab2a4e29 100644 --- a/examples/outposts/prerequisites/main.tf +++ b/examples/outposts/prerequisites/main.tf @@ -23,7 +23,7 @@ locals { module "ssm_bastion_ec2" { source = "terraform-aws-modules/ec2-instance/aws" - version = "~> 4.2" + version = "~> 5.5" name = "${local.name}-bastion" @@ -56,7 +56,7 @@ module "ssm_bastion_ec2" { rm terraform_${local.terraform_version}_linux_amd64.zip 2> /dev/null # Install kubectl - curl -LO https://dl.k8s.io/release/v1.27.0/bin/linux/amd64/kubectl + curl -LO https://dl.k8s.io/release/v1.29.0/bin/linux/amd64/kubectl install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl # Remove default awscli which is v1 - we want latest v2 @@ -80,7 +80,7 @@ module "ssm_bastion_ec2" { module "bastion_security_group" { source = "terraform-aws-modules/security-group/aws" - version = "~> 4.0" + version = "~> 5.0" name = "${local.name}-bastion" description = "Security group to allow provisioning ${local.name} EKS local cluster on Outposts" diff --git a/examples/outposts/prerequisites/versions.tf b/examples/outposts/prerequisites/versions.tf index 5f058b4c11..6f83215f50 100644 --- a/examples/outposts/prerequisites/versions.tf +++ b/examples/outposts/prerequisites/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.34" + version = ">= 5.40" } } } diff --git a/examples/outposts/versions.tf b/examples/outposts/versions.tf index aeb892f359..2ac7910678 100644 --- a/examples/outposts/versions.tf +++ b/examples/outposts/versions.tf @@ -1,14 +1,14 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.47" + version = ">= 5.40" } kubernetes = { source = "hashicorp/kubernetes" - version = ">= 2.10" + version = ">= 2.20" } } } diff --git a/examples/self_managed_node_group/README.md b/examples/self_managed_node_group/README.md index c5ddbc325c..f6f063f693 100644 --- a/examples/self_managed_node_group/README.md +++ b/examples/self_managed_node_group/README.md @@ -25,24 +25,25 @@ Note that this example may create resources which cost money. Run `terraform des | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.47 | -| [kubernetes](#requirement\_kubernetes) | >= 2.10 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [aws](#requirement\_aws) | >= 5.40 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.47 | +| [aws](#provider\_aws) | >= 5.40 | ## Modules | Name | Source | Version | |------|--------|---------| -| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 1.5 | +| [disabled\_self\_managed\_node\_group](#module\_disabled\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a | +| [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 2.0 | | [eks](#module\_eks) | ../.. | n/a | | [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 | -| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 4.0 | +| [kms](#module\_kms) | terraform-aws-modules/kms/aws | ~> 2.1 | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 | ## Resources @@ -62,7 +63,7 @@ No inputs. | Name | Description | |------|-------------| -| [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles | +| [access\_entries](#output\_access\_entries) | Map of access entries created and their attributes | | [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created | | [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created | | [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled | @@ -74,12 +75,14 @@ No inputs. | [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role | | [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts | | [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled | +| [cluster\_ip\_family](#output\_cluster\_ip\_family) | The IP family used by the cluster (e.g. `ipv4` or `ipv6`) | | [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster | | [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | | [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster | | [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console | | [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group | | [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group | +| [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block where Kubernetes pod and service IP addresses are assigned from | | [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` | | [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate | | [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created | diff --git a/examples/self_managed_node_group/main.tf b/examples/self_managed_node_group/main.tf index 87be519086..1d0e80b2fd 100644 --- a/examples/self_managed_node_group/main.tf +++ b/examples/self_managed_node_group/main.tf @@ -2,24 +2,12 @@ provider "aws" { region = local.region } -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - data "aws_caller_identity" "current" {} data "aws_availability_zones" "available" {} locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" - cluster_version = "1.27" + cluster_version = "1.29" region = "eu-west-1" vpc_cidr = "10.0.0.0/16" @@ -43,6 +31,12 @@ module "eks" { cluster_version = local.cluster_version cluster_endpoint_public_access = true + enable_cluster_creator_admin_permissions = true + + # Enable EFA support by adding necessary security group rules + # to the shared node security group + enable_efa_support = true + cluster_addons = { coredns = { most_recent = true @@ -59,9 +53,12 @@ module "eks" { subnet_ids = module.vpc.private_subnets control_plane_subnet_ids = module.vpc.intra_subnets - # Self managed node groups will not automatically create the aws-auth configmap so we need to - create_aws_auth_configmap = true - manage_aws_auth_configmap = true + # External encryption key + create_kms_key = false + cluster_encryption_config = { + resources = ["secrets"] + provider_key_arn = module.kms.key_arn + } self_managed_node_group_defaults = { # enable discovery of autoscaling groups by cluster-autoscaler @@ -75,11 +72,34 @@ module "eks" { # Default node group - as provisioned by the module defaults default_node_group = {} + # AL2023 node group utilizing new user data format which utilizes nodeadm + # to join nodes to the cluster (instead of /etc/eks/bootstrap.sh) + al2023_nodeadm = { + ami_type = "AL2023_x86_64_STANDARD" + + cloudinit_pre_nodeadm = [ + { + content_type = "application/node.eks.aws" + content = <<-EOT + --- + apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + EOT + } + ] + } + # Bottlerocket node group bottlerocket = { name = "bottlerocket-self-mng" - platform = "bottlerocket" + ami_type = "BOTTLEROCKET_x86_64" ami_id = data.aws_ami.eks_default_bottlerocket.id instance_type = "m5.large" desired_size = 2 @@ -141,36 +161,6 @@ module "eks" { } } - efa = { - min_size = 1 - max_size = 2 - desired_size = 1 - - # aws ec2 describe-instance-types --region eu-west-1 --filters Name=network-info.efa-supported,Values=true --query "InstanceTypes[*].[InstanceType]" --output text | sort - instance_type = "c5n.9xlarge" - - post_bootstrap_user_data = <<-EOT - # Install EFA - curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz - tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer - ./efa_installer.sh -y --minimal - fi_info -p efa -t FI_EP_RDM - - # Disable ptrace - sysctl -w kernel.yama.ptrace_scope=0 - EOT - - network_interfaces = [ - { - description = "EFA interface example" - delete_on_termination = true - device_index = 0 - associate_public_ip_address = false - interface_type = "efa" - } - ] - } - # Complete complete = { name = "complete-self-mng" @@ -216,6 +206,58 @@ module "eks" { } } + instance_attributes = { + name = "instance-attributes" + + min_size = 1 + max_size = 2 + desired_size = 1 + + bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" + + instance_type = null + + # launch template configuration + instance_requirements = { + cpu_manufacturers = ["intel"] + instance_generations = ["current", "previous"] + spot_max_price_percentage_over_lowest_price = 100 + + vcpu_count = { + min = 1 + } + + allowed_instance_types = ["t*", "m*"] + } + + use_mixed_instances_policy = true + mixed_instances_policy = { + instances_distribution = { + on_demand_base_capacity = 0 + on_demand_percentage_above_base_capacity = 0 + on_demand_allocation_strategy = "lowest-price" + spot_allocation_strategy = "price-capacity-optimized" + } + + # ASG configuration + override = [ + { + instance_requirements = { + cpu_manufacturers = ["intel"] + instance_generations = ["current", "previous"] + spot_max_price_percentage_over_lowest_price = 100 + + vcpu_count = { + min = 1 + } + + allowed_instance_types = ["t*", "m*"] + } + } + ] + } + } + metadata_options = { http_endpoint = "enabled" http_tokens = "required" @@ -235,28 +277,51 @@ module "eks" { additional = aws_iam_policy.additional.arn } - timeouts = { - create = "80m" - update = "80m" - delete = "80m" - } - tags = { ExtraTag = "Self managed node group complete example" } } + + efa = { + # Disabling automatic creation due to instance type/quota availability + # Can be enabled when appropriate for testing/validation + create = false + + ami_type = "AL2_x86_64_GPU" + instance_type = "trn1n.32xlarge" + + enable_efa_support = true + pre_bootstrap_user_data = <<-EOT + # Mount NVME instance store volumes since they are typically + # available on instances that support EFA + setup-local-disks raid0 + EOT + + min_size = 2 + max_size = 2 + desired_size = 2 + } } tags = local.tags } +module "disabled_self_managed_node_group" { + source = "../../modules/self-managed-node-group" + + create = false + + # Hard requirement + cluster_service_cidr = "" +} + ################################################################################ # Supporting Resources ################################################################################ module "vpc" { source = "terraform-aws-modules/vpc/aws" - version = "~> 4.0" + version = "~> 5.0" name = local.name cidr = local.vpc_cidr @@ -312,7 +377,7 @@ module "key_pair" { module "ebs_kms_key" { source = "terraform-aws-modules/kms/aws" - version = "~> 1.5" + version = "~> 2.0" description = "Customer managed key to encrypt EKS managed node group volumes" @@ -334,6 +399,18 @@ module "ebs_kms_key" { tags = local.tags } +module "kms" { + source = "terraform-aws-modules/kms/aws" + version = "~> 2.1" + + aliases = ["eks/${local.name}"] + description = "${local.name} cluster encryption key" + enable_default_policy = true + key_owners = [data.aws_caller_identity.current.arn] + + tags = local.tags +} + resource "aws_iam_policy" "additional" { name = "${local.name}-additional" description = "Example usage of node additional policy" diff --git a/examples/self_managed_node_group/outputs.tf b/examples/self_managed_node_group/outputs.tf index 43334ecc0a..24183fd207 100644 --- a/examples/self_managed_node_group/outputs.tf +++ b/examples/self_managed_node_group/outputs.tf @@ -47,6 +47,25 @@ output "cluster_primary_security_group_id" { value = module.eks.cluster_primary_security_group_id } +output "cluster_service_cidr" { + description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from" + value = module.eks.cluster_service_cidr +} + +output "cluster_ip_family" { + description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)" + value = module.eks.cluster_ip_family +} + +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = module.eks.access_entries +} + ################################################################################ # KMS Key ################################################################################ @@ -200,12 +219,3 @@ output "self_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by self-managed node groups" value = module.eks.self_managed_node_groups_autoscaling_group_names } - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = module.eks.aws_auth_configmap_yaml -} diff --git a/examples/self_managed_node_group/versions.tf b/examples/self_managed_node_group/versions.tf index aeb892f359..6f83215f50 100644 --- a/examples/self_managed_node_group/versions.tf +++ b/examples/self_managed_node_group/versions.tf @@ -1,14 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.47" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" + version = ">= 5.40" } } } diff --git a/examples/user_data/README.md b/examples/user_data/README.md index cea7dce755..de9b419bb4 100644 --- a/examples/user_data/README.md +++ b/examples/user_data/README.md @@ -17,37 +17,85 @@ $ terraform apply | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [local](#requirement\_local) | >= 2.4 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [local](#provider\_local) | >= 2.4 | ## Modules | Name | Source | Version | |------|--------|---------| +| [eks\_mng\_al2023\_additional](#module\_eks\_mng\_al2023\_additional) | ../../modules/_user_data | n/a | +| [eks\_mng\_al2023\_custom\_ami](#module\_eks\_mng\_al2023\_custom\_ami) | ../../modules/_user_data | n/a | +| [eks\_mng\_al2023\_custom\_template](#module\_eks\_mng\_al2023\_custom\_template) | ../../modules/_user_data | n/a | +| [eks\_mng\_al2023\_no\_op](#module\_eks\_mng\_al2023\_no\_op) | ../../modules/_user_data | n/a | +| [eks\_mng\_al2\_additional](#module\_eks\_mng\_al2\_additional) | ../../modules/_user_data | n/a | +| [eks\_mng\_al2\_custom\_ami](#module\_eks\_mng\_al2\_custom\_ami) | ../../modules/_user_data | n/a | +| [eks\_mng\_al2\_custom\_ami\_ipv6](#module\_eks\_mng\_al2\_custom\_ami\_ipv6) | ../../modules/_user_data | n/a | +| [eks\_mng\_al2\_custom\_template](#module\_eks\_mng\_al2\_custom\_template) | ../../modules/_user_data | n/a | +| [eks\_mng\_al2\_disabled](#module\_eks\_mng\_al2\_disabled) | ../../modules/_user_data | n/a | +| [eks\_mng\_al2\_no\_op](#module\_eks\_mng\_al2\_no\_op) | ../../modules/_user_data | n/a | | [eks\_mng\_bottlerocket\_additional](#module\_eks\_mng\_bottlerocket\_additional) | ../../modules/_user_data | n/a | | [eks\_mng\_bottlerocket\_custom\_ami](#module\_eks\_mng\_bottlerocket\_custom\_ami) | ../../modules/_user_data | n/a | | [eks\_mng\_bottlerocket\_custom\_template](#module\_eks\_mng\_bottlerocket\_custom\_template) | ../../modules/_user_data | n/a | | [eks\_mng\_bottlerocket\_no\_op](#module\_eks\_mng\_bottlerocket\_no\_op) | ../../modules/_user_data | n/a | -| [eks\_mng\_linux\_additional](#module\_eks\_mng\_linux\_additional) | ../../modules/_user_data | n/a | -| [eks\_mng\_linux\_custom\_ami](#module\_eks\_mng\_linux\_custom\_ami) | ../../modules/_user_data | n/a | -| [eks\_mng\_linux\_custom\_template](#module\_eks\_mng\_linux\_custom\_template) | ../../modules/_user_data | n/a | -| [eks\_mng\_linux\_no\_op](#module\_eks\_mng\_linux\_no\_op) | ../../modules/_user_data | n/a | +| [eks\_mng\_windows\_additional](#module\_eks\_mng\_windows\_additional) | ../../modules/_user_data | n/a | +| [eks\_mng\_windows\_custom\_ami](#module\_eks\_mng\_windows\_custom\_ami) | ../../modules/_user_data | n/a | +| [eks\_mng\_windows\_custom\_template](#module\_eks\_mng\_windows\_custom\_template) | ../../modules/_user_data | n/a | +| [eks\_mng\_windows\_no\_op](#module\_eks\_mng\_windows\_no\_op) | ../../modules/_user_data | n/a | +| [self\_mng\_al2023\_bootstrap](#module\_self\_mng\_al2023\_bootstrap) | ../../modules/_user_data | n/a | +| [self\_mng\_al2023\_custom\_template](#module\_self\_mng\_al2023\_custom\_template) | ../../modules/_user_data | n/a | +| [self\_mng\_al2023\_no\_op](#module\_self\_mng\_al2023\_no\_op) | ../../modules/_user_data | n/a | +| [self\_mng\_al2\_bootstrap](#module\_self\_mng\_al2\_bootstrap) | ../../modules/_user_data | n/a | +| [self\_mng\_al2\_bootstrap\_ipv6](#module\_self\_mng\_al2\_bootstrap\_ipv6) | ../../modules/_user_data | n/a | +| [self\_mng\_al2\_custom\_template](#module\_self\_mng\_al2\_custom\_template) | ../../modules/_user_data | n/a | +| [self\_mng\_al2\_no\_op](#module\_self\_mng\_al2\_no\_op) | ../../modules/_user_data | n/a | | [self\_mng\_bottlerocket\_bootstrap](#module\_self\_mng\_bottlerocket\_bootstrap) | ../../modules/_user_data | n/a | | [self\_mng\_bottlerocket\_custom\_template](#module\_self\_mng\_bottlerocket\_custom\_template) | ../../modules/_user_data | n/a | | [self\_mng\_bottlerocket\_no\_op](#module\_self\_mng\_bottlerocket\_no\_op) | ../../modules/_user_data | n/a | -| [self\_mng\_linux\_bootstrap](#module\_self\_mng\_linux\_bootstrap) | ../../modules/_user_data | n/a | -| [self\_mng\_linux\_custom\_template](#module\_self\_mng\_linux\_custom\_template) | ../../modules/_user_data | n/a | -| [self\_mng\_linux\_no\_op](#module\_self\_mng\_linux\_no\_op) | ../../modules/_user_data | n/a | | [self\_mng\_windows\_bootstrap](#module\_self\_mng\_windows\_bootstrap) | ../../modules/_user_data | n/a | | [self\_mng\_windows\_custom\_template](#module\_self\_mng\_windows\_custom\_template) | ../../modules/_user_data | n/a | | [self\_mng\_windows\_no\_op](#module\_self\_mng\_windows\_no\_op) | ../../modules/_user_data | n/a | ## Resources -No resources. +| Name | Type | +|------|------| +| [local_file.eks_mng_al2023_additional](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_al2023_custom_ami](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_al2023_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_al2023_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_al2_additional](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_al2_custom_ami](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_al2_custom_ami_ipv6](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_al2_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_al2_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_bottlerocket_additional](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_bottlerocket_custom_ami](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_bottlerocket_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_bottlerocket_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_windows_additional](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_windows_custom_ami](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_windows_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.eks_mng_windows_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_al2023_bootstrap](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_al2023_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_al2023_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_al2_bootstrap](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_al2_bootstrap_ipv6](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_al2_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_al2_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_bottlerocket_bootstrap](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_bottlerocket_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_bottlerocket_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_windows_bootstrap](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_windows_custom_template](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.self_mng_windows_no_op](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | ## Inputs @@ -55,23 +103,5 @@ No inputs. ## Outputs -| Name | Description | -|------|-------------| -| [eks\_mng\_bottlerocket\_additional](#output\_eks\_mng\_bottlerocket\_additional) | Base64 decoded user data rendered for the provided inputs | -| [eks\_mng\_bottlerocket\_custom\_ami](#output\_eks\_mng\_bottlerocket\_custom\_ami) | Base64 decoded user data rendered for the provided inputs | -| [eks\_mng\_bottlerocket\_custom\_template](#output\_eks\_mng\_bottlerocket\_custom\_template) | Base64 decoded user data rendered for the provided inputs | -| [eks\_mng\_bottlerocket\_no\_op](#output\_eks\_mng\_bottlerocket\_no\_op) | Base64 decoded user data rendered for the provided inputs | -| [eks\_mng\_linux\_additional](#output\_eks\_mng\_linux\_additional) | Base64 decoded user data rendered for the provided inputs | -| [eks\_mng\_linux\_custom\_ami](#output\_eks\_mng\_linux\_custom\_ami) | Base64 decoded user data rendered for the provided inputs | -| [eks\_mng\_linux\_custom\_template](#output\_eks\_mng\_linux\_custom\_template) | Base64 decoded user data rendered for the provided inputs | -| [eks\_mng\_linux\_no\_op](#output\_eks\_mng\_linux\_no\_op) | Base64 decoded user data rendered for the provided inputs | -| [self\_mng\_bottlerocket\_bootstrap](#output\_self\_mng\_bottlerocket\_bootstrap) | Base64 decoded user data rendered for the provided inputs | -| [self\_mng\_bottlerocket\_custom\_template](#output\_self\_mng\_bottlerocket\_custom\_template) | Base64 decoded user data rendered for the provided inputs | -| [self\_mng\_bottlerocket\_no\_op](#output\_self\_mng\_bottlerocket\_no\_op) | Base64 decoded user data rendered for the provided inputs | -| [self\_mng\_linux\_bootstrap](#output\_self\_mng\_linux\_bootstrap) | Base64 decoded user data rendered for the provided inputs | -| [self\_mng\_linux\_custom\_template](#output\_self\_mng\_linux\_custom\_template) | Base64 decoded user data rendered for the provided inputs | -| [self\_mng\_linux\_no\_op](#output\_self\_mng\_linux\_no\_op) | Base64 decoded user data rendered for the provided inputs | -| [self\_mng\_windows\_bootstrap](#output\_self\_mng\_windows\_bootstrap) | Base64 decoded user data rendered for the provided inputs | -| [self\_mng\_windows\_custom\_template](#output\_self\_mng\_windows\_custom\_template) | Base64 decoded user data rendered for the provided inputs | -| [self\_mng\_windows\_no\_op](#output\_self\_mng\_windows\_no\_op) | Base64 decoded user data rendered for the provided inputs | +No outputs. diff --git a/examples/user_data/main.tf b/examples/user_data/main.tf index d7d513190e..9a55b3cf77 100644 --- a/examples/user_data/main.tf +++ b/examples/user_data/main.tf @@ -4,32 +4,45 @@ locals { cluster_endpoint = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com" cluster_auth_base64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==" cluster_service_ipv4_cidr = "172.16.0.0/16" + cluster_service_ipv6_cidr = "fdd3:7636:68bc::/108" + cluster_service_cidr = "192.168.0.0/16" } ################################################################################ -# User Data Module +# EKS managed node group - AL2 ################################################################################ -# EKS managed node group - linux -module "eks_mng_linux_no_op" { +module "eks_mng_al2_disabled" { source = "../../modules/_user_data" + + create = false +} + +module "eks_mng_al2_no_op" { + source = "../../modules/_user_data" + + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr } -module "eks_mng_linux_additional" { +module "eks_mng_al2_additional" { source = "../../modules/_user_data" + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr + pre_bootstrap_user_data = <<-EOT export USE_MAX_PODS=false EOT } -module "eks_mng_linux_custom_ami" { +module "eks_mng_al2_custom_ami" { source = "../../modules/_user_data" - cluster_name = local.name - cluster_endpoint = local.cluster_endpoint - cluster_auth_base64 = local.cluster_auth_base64 - cluster_service_ipv4_cidr = local.cluster_service_ipv4_cidr + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + cluster_service_cidr = local.cluster_service_ipv4_cidr enable_bootstrap_user_data = true @@ -44,13 +57,35 @@ module "eks_mng_linux_custom_ami" { EOT } +module "eks_mng_al2_custom_ami_ipv6" { + source = "../../modules/_user_data" + + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + cluster_ip_family = "ipv6" + cluster_service_cidr = local.cluster_service_ipv6_cidr + + enable_bootstrap_user_data = true -module "eks_mng_linux_custom_template" { + pre_bootstrap_user_data = <<-EOT + export FOO=bar + EOT + + bootstrap_extra_args = "--kubelet-extra-args '--instance-type t3a.large'" + + post_bootstrap_user_data = <<-EOT + echo "All done" + EOT +} + +module "eks_mng_al2_custom_template" { source = "../../modules/_user_data" - cluster_name = local.name - cluster_endpoint = local.cluster_endpoint - cluster_auth_base64 = local.cluster_auth_base64 + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + cluster_service_cidr = local.cluster_service_ipv4_cidr user_data_template_path = "${path.module}/templates/linux_custom.tpl" @@ -66,17 +101,132 @@ module "eks_mng_linux_custom_template" { EOT } -# EKS managed node group - bottlerocket +################################################################################ +# EKS managed node group - AL2023 +################################################################################ + +module "eks_mng_al2023_no_op" { + source = "../../modules/_user_data" + + ami_type = "AL2023_x86_64_STANDARD" + + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr +} + +module "eks_mng_al2023_additional" { + source = "../../modules/_user_data" + + ami_type = "AL2023_x86_64_STANDARD" + + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr + + cloudinit_pre_nodeadm = [{ + content = <<-EOT + --- + apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + EOT + content_type = "application/node.eks.aws" + }] +} + +module "eks_mng_al2023_custom_ami" { + source = "../../modules/_user_data" + + ami_type = "AL2023_x86_64_STANDARD" + + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + cluster_service_cidr = local.cluster_service_cidr + + enable_bootstrap_user_data = true + + cloudinit_pre_nodeadm = [{ + content = <<-EOT + --- + apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + EOT + content_type = "application/node.eks.aws" + }] + + cloudinit_post_nodeadm = [{ + content = <<-EOT + echo "All done" + EOT + content_type = "text/x-shellscript; charset=\"us-ascii\"" + }] +} + +module "eks_mng_al2023_custom_template" { + source = "../../modules/_user_data" + + ami_type = "AL2023_x86_64_STANDARD" + + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + cluster_service_cidr = local.cluster_service_cidr + + enable_bootstrap_user_data = true + user_data_template_path = "${path.module}/templates/al2023_custom.tpl" + + cloudinit_pre_nodeadm = [{ + content = <<-EOT + --- + apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + EOT + content_type = "application/node.eks.aws" + }] + + cloudinit_post_nodeadm = [{ + content = <<-EOT + echo "All done" + EOT + content_type = "text/x-shellscript; charset=\"us-ascii\"" + }] +} + +################################################################################ +# EKS managed node group - Bottlerocket +################################################################################ + module "eks_mng_bottlerocket_no_op" { source = "../../modules/_user_data" - platform = "bottlerocket" + ami_type = "BOTTLEROCKET_x86_64" + + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr } module "eks_mng_bottlerocket_additional" { source = "../../modules/_user_data" - platform = "bottlerocket" + ami_type = "BOTTLEROCKET_x86_64" + cluster_service_cidr = local.cluster_service_cidr bootstrap_extra_args = <<-EOT # extra args added @@ -88,11 +238,15 @@ module "eks_mng_bottlerocket_additional" { module "eks_mng_bottlerocket_custom_ami" { source = "../../modules/_user_data" - platform = "bottlerocket" + ami_type = "BOTTLEROCKET_x86_64" - cluster_name = local.name - cluster_endpoint = local.cluster_endpoint - cluster_auth_base64 = local.cluster_auth_base64 + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + cluster_service_cidr = local.cluster_service_cidr + additional_cluster_dns_ips = [ + "169.254.20.10" + ] enable_bootstrap_user_data = true @@ -106,11 +260,13 @@ module "eks_mng_bottlerocket_custom_ami" { module "eks_mng_bottlerocket_custom_template" { source = "../../modules/_user_data" - platform = "bottlerocket" + ami_type = "BOTTLEROCKET_x86_64" cluster_name = local.name cluster_endpoint = local.cluster_endpoint cluster_auth_base64 = local.cluster_auth_base64 + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl" @@ -121,23 +277,107 @@ module "eks_mng_bottlerocket_custom_template" { EOT } -# Self managed node group - linux -module "self_mng_linux_no_op" { +################################################################################ +# EKS managed node group - Windows +################################################################################ + +module "eks_mng_windows_no_op" { source = "../../modules/_user_data" - is_eks_managed_node_group = false + ami_type = "WINDOWS_CORE_2022_x86_64" + + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr } -module "self_mng_linux_bootstrap" { +module "eks_mng_windows_additional" { source = "../../modules/_user_data" + ami_type = "WINDOWS_CORE_2022_x86_64" + + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr + + pre_bootstrap_user_data = <<-EOT + [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + EOT +} + +module "eks_mng_windows_custom_ami" { + source = "../../modules/_user_data" + + ami_type = "WINDOWS_CORE_2022_x86_64" + + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr + enable_bootstrap_user_data = true - is_eks_managed_node_group = false + + pre_bootstrap_user_data = <<-EOT + [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + EOT + # I don't know if this is the right way on Windows, but its just a string check here anyways + bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" + + post_bootstrap_user_data = <<-EOT + [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + EOT +} + +module "eks_mng_windows_custom_template" { + source = "../../modules/_user_data" + + ami_type = "WINDOWS_CORE_2022_x86_64" cluster_name = local.name cluster_endpoint = local.cluster_endpoint cluster_auth_base64 = local.cluster_auth_base64 + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr + + enable_bootstrap_user_data = true + + user_data_template_path = "${path.module}/templates/windows_custom.tpl" + + pre_bootstrap_user_data = <<-EOT + [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + EOT + # I don't know if this is the right way on Windows, but its just a string check here anyways + bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" + + post_bootstrap_user_data = <<-EOT + [string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + EOT +} + +################################################################################ +# Self-managed node group - AL2 +################################################################################ + +module "self_mng_al2_no_op" { + source = "../../modules/_user_data" + + is_eks_managed_node_group = false + + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr +} + +module "self_mng_al2_bootstrap" { + source = "../../modules/_user_data" + + enable_bootstrap_user_data = true + is_eks_managed_node_group = false + + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + cluster_service_cidr = local.cluster_service_ipv4_cidr + pre_bootstrap_user_data = <<-EOT echo "foo" export FOO=bar @@ -150,15 +390,40 @@ module "self_mng_linux_bootstrap" { EOT } -module "self_mng_linux_custom_template" { +module "self_mng_al2_bootstrap_ipv6" { source = "../../modules/_user_data" enable_bootstrap_user_data = true is_eks_managed_node_group = false - cluster_name = local.name - cluster_endpoint = local.cluster_endpoint - cluster_auth_base64 = local.cluster_auth_base64 + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + cluster_ip_family = "ipv6" + cluster_service_cidr = local.cluster_service_ipv6_cidr + + pre_bootstrap_user_data = <<-EOT + echo "foo" + export FOO=bar + EOT + + bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" + + post_bootstrap_user_data = <<-EOT + echo "All done" + EOT +} + +module "self_mng_al2_custom_template" { + source = "../../modules/_user_data" + + enable_bootstrap_user_data = true + is_eks_managed_node_group = false + + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + cluster_service_cidr = local.cluster_service_ipv4_cidr user_data_template_path = "${path.module}/templates/linux_custom.tpl" @@ -174,19 +439,114 @@ module "self_mng_linux_custom_template" { EOT } -# Self managed node group - bottlerocket +################################################################################ +# Self-managed node group - AL2023 +################################################################################ + +module "self_mng_al2023_no_op" { + source = "../../modules/_user_data" + + ami_type = "AL2023_x86_64_STANDARD" + + is_eks_managed_node_group = false + + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr +} + +module "self_mng_al2023_bootstrap" { + source = "../../modules/_user_data" + + ami_type = "AL2023_x86_64_STANDARD" + + enable_bootstrap_user_data = true + is_eks_managed_node_group = false + + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + cluster_service_cidr = local.cluster_service_cidr + + cloudinit_pre_nodeadm = [{ + content = <<-EOT + --- + apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + EOT + content_type = "application/node.eks.aws" + }] + + cloudinit_post_nodeadm = [{ + content = <<-EOT + echo "All done" + EOT + content_type = "text/x-shellscript; charset=\"us-ascii\"" + }] +} + +module "self_mng_al2023_custom_template" { + source = "../../modules/_user_data" + + ami_type = "AL2023_x86_64_STANDARD" + + enable_bootstrap_user_data = true + is_eks_managed_node_group = false + + cluster_name = local.name + cluster_endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 + cluster_service_cidr = local.cluster_service_cidr + + user_data_template_path = "${path.module}/templates/al2023_custom.tpl" + + cloudinit_pre_nodeadm = [{ + content = <<-EOT + --- + apiVersion: node.eks.aws/v1alpha1 + kind: NodeConfig + spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + EOT + content_type = "application/node.eks.aws" + }] + + cloudinit_post_nodeadm = [{ + content = <<-EOT + echo "All done" + EOT + content_type = "text/x-shellscript; charset=\"us-ascii\"" + }] +} + +################################################################################ +# Self-managed node group - Bottlerocket +################################################################################ + module "self_mng_bottlerocket_no_op" { source = "../../modules/_user_data" - platform = "bottlerocket" + ami_type = "BOTTLEROCKET_x86_64" is_eks_managed_node_group = false + + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr } module "self_mng_bottlerocket_bootstrap" { source = "../../modules/_user_data" - platform = "bottlerocket" + ami_type = "BOTTLEROCKET_x86_64" enable_bootstrap_user_data = true is_eks_managed_node_group = false @@ -195,6 +555,9 @@ module "self_mng_bottlerocket_bootstrap" { cluster_endpoint = local.cluster_endpoint cluster_auth_base64 = local.cluster_auth_base64 + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr + bootstrap_extra_args = <<-EOT # extra args added [settings.kernel] @@ -205,7 +568,7 @@ module "self_mng_bottlerocket_bootstrap" { module "self_mng_bottlerocket_custom_template" { source = "../../modules/_user_data" - platform = "bottlerocket" + ami_type = "BOTTLEROCKET_x86_64" enable_bootstrap_user_data = true is_eks_managed_node_group = false @@ -214,6 +577,9 @@ module "self_mng_bottlerocket_custom_template" { cluster_endpoint = local.cluster_endpoint cluster_auth_base64 = local.cluster_auth_base64 + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr + user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl" bootstrap_extra_args = <<-EOT @@ -223,19 +589,25 @@ module "self_mng_bottlerocket_custom_template" { EOT } -# Self managed node group - windows +################################################################################ +# Self-managed node group - Windows +################################################################################ + module "self_mng_windows_no_op" { source = "../../modules/_user_data" - platform = "windows" + ami_type = "WINDOWS_CORE_2022_x86_64" is_eks_managed_node_group = false + + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr } module "self_mng_windows_bootstrap" { source = "../../modules/_user_data" - platform = "windows" + ami_type = "WINDOWS_CORE_2022_x86_64" enable_bootstrap_user_data = true is_eks_managed_node_group = false @@ -244,10 +616,13 @@ module "self_mng_windows_bootstrap" { cluster_endpoint = local.cluster_endpoint cluster_auth_base64 = local.cluster_auth_base64 + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr + pre_bootstrap_user_data = <<-EOT [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT - # I don't know if this is the right way on WindowsOS, but its just a string check here anyways + # I don't know if this is the right way on Windows, but its just a string check here anyways bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" post_bootstrap_user_data = <<-EOT @@ -258,7 +633,7 @@ module "self_mng_windows_bootstrap" { module "self_mng_windows_custom_template" { source = "../../modules/_user_data" - platform = "windows" + ami_type = "WINDOWS_CORE_2022_x86_64" enable_bootstrap_user_data = true is_eks_managed_node_group = false @@ -267,12 +642,15 @@ module "self_mng_windows_custom_template" { cluster_endpoint = local.cluster_endpoint cluster_auth_base64 = local.cluster_auth_base64 + # Hard requirement + cluster_service_cidr = local.cluster_service_cidr + user_data_template_path = "${path.module}/templates/windows_custom.tpl" pre_bootstrap_user_data = <<-EOT [string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' EOT - # I don't know if this is the right way on WindowsOS, but its just a string check here anyways + # I don't know if this is the right way on Windows, but its just a string check here anyways bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot" post_bootstrap_user_data = <<-EOT diff --git a/examples/user_data/outputs.tf b/examples/user_data/outputs.tf index dd2c3407e1..c40f6632d0 100644 --- a/examples/user_data/outputs.tf +++ b/examples/user_data/outputs.tf @@ -1,89 +1,189 @@ -# EKS managed node group - linux -output "eks_mng_linux_no_op" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.eks_mng_linux_no_op.user_data) +################################################################################ +# We are writing to local file so that we can better track diffs across changes +# +# Its harder to verify changes and diffs when we use the standard `output` +# route, writing to file makes this easier and better highlights changes +# to avoid unintended disruptions +################################################################################ + +################################################################################ +# EKS managed node group - AL2 +################################################################################ + +resource "local_file" "eks_mng_al2_no_op" { + content = base64decode(module.eks_mng_al2_no_op.user_data) + filename = "${path.module}/rendered/al2/eks-mng-no-op.sh" +} + +resource "local_file" "eks_mng_al2_additional" { + content = base64decode(module.eks_mng_al2_additional.user_data) + filename = "${path.module}/rendered/al2/eks-mng-additional.txt" +} + +resource "local_file" "eks_mng_al2_custom_ami" { + content = base64decode(module.eks_mng_al2_custom_ami.user_data) + filename = "${path.module}/rendered/al2/eks-mng-custom-ami.sh" +} + +resource "local_file" "eks_mng_al2_custom_ami_ipv6" { + content = base64decode(module.eks_mng_al2_custom_ami_ipv6.user_data) + filename = "${path.module}/rendered/al2/eks-mng-custom-ami-ipv6.sh" +} + +resource "local_file" "eks_mng_al2_custom_template" { + content = base64decode(module.eks_mng_al2_custom_template.user_data) + filename = "${path.module}/rendered/al2/eks-mng-custom-template.sh" } -output "eks_mng_linux_additional" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.eks_mng_linux_additional.user_data) +################################################################################ +# EKS managed node group - AL2023 +################################################################################ + +resource "local_file" "eks_mng_al2023_no_op" { + content = base64decode(module.eks_mng_al2023_no_op.user_data) + filename = "${path.module}/rendered/al2023/eks-mng-no-op.txt" } -output "eks_mng_linux_custom_ami" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.eks_mng_linux_custom_ami.user_data) +resource "local_file" "eks_mng_al2023_additional" { + content = base64decode(module.eks_mng_al2023_additional.user_data) + filename = "${path.module}/rendered/al2023/eks-mng-additional.txt" } -output "eks_mng_linux_custom_template" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.eks_mng_linux_custom_template.user_data) +resource "local_file" "eks_mng_al2023_custom_ami" { + content = base64decode(module.eks_mng_al2023_custom_ami.user_data) + filename = "${path.module}/rendered/al2023/eks-mng-custom-ami.txt" } -# EKS managed node group - bottlerocket -output "eks_mng_bottlerocket_no_op" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.eks_mng_bottlerocket_no_op.user_data) +resource "local_file" "eks_mng_al2023_custom_template" { + content = base64decode(module.eks_mng_al2023_custom_template.user_data) + filename = "${path.module}/rendered/al2023/eks-mng-custom-template.txt" } -output "eks_mng_bottlerocket_additional" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.eks_mng_bottlerocket_additional.user_data) +################################################################################ +# EKS managed node group - Bottlerocket +################################################################################ + +resource "local_file" "eks_mng_bottlerocket_no_op" { + content = base64decode(module.eks_mng_bottlerocket_no_op.user_data) + filename = "${path.module}/rendered/bottlerocket/eks-mng-no-op.toml" } -output "eks_mng_bottlerocket_custom_ami" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.eks_mng_bottlerocket_custom_ami.user_data) +resource "local_file" "eks_mng_bottlerocket_additional" { + content = base64decode(module.eks_mng_bottlerocket_additional.user_data) + filename = "${path.module}/rendered/bottlerocket/eks-mng-additional.toml" } -output "eks_mng_bottlerocket_custom_template" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.eks_mng_bottlerocket_custom_template.user_data) +resource "local_file" "eks_mng_bottlerocket_custom_ami" { + content = base64decode(module.eks_mng_bottlerocket_custom_ami.user_data) + filename = "${path.module}/rendered/bottlerocket/eks-mng-custom-ami.toml" } -# Self managed node group - linux -output "self_mng_linux_no_op" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.self_mng_linux_no_op.user_data) +resource "local_file" "eks_mng_bottlerocket_custom_template" { + content = base64decode(module.eks_mng_bottlerocket_custom_template.user_data) + filename = "${path.module}/rendered/bottlerocket/eks-mng-custom-template.toml" } -output "self_mng_linux_bootstrap" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.self_mng_linux_bootstrap.user_data) +################################################################################ +# EKS managed node group - Windows +################################################################################ + +resource "local_file" "eks_mng_windows_no_op" { + content = base64decode(module.eks_mng_windows_no_op.user_data) + filename = "${path.module}/rendered/windows/eks-mng-no-op.ps1" } -output "self_mng_linux_custom_template" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.self_mng_linux_custom_template.user_data) +resource "local_file" "eks_mng_windows_additional" { + content = base64decode(module.eks_mng_windows_additional.user_data) + filename = "${path.module}/rendered/windows/eks-mng-additional.ps1" } -# Self managed node group - bottlerocket -output "self_mng_bottlerocket_no_op" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.self_mng_bottlerocket_no_op.user_data) +resource "local_file" "eks_mng_windows_custom_ami" { + content = base64decode(module.eks_mng_windows_custom_ami.user_data) + filename = "${path.module}/rendered/windows/eks-mng-custom-ami.ps1" +} + +resource "local_file" "eks_mng_windows_custom_template" { + content = base64decode(module.eks_mng_windows_custom_template.user_data) + filename = "${path.module}/rendered/windows/eks-mng-custom-template.ps1" +} + +################################################################################ +# Self-managed node group - AL2 +################################################################################ + +resource "local_file" "self_mng_al2_no_op" { + content = base64decode(module.self_mng_al2_no_op.user_data) + filename = "${path.module}/rendered/al2/self-mng-no-op.sh" } -output "self_mng_bottlerocket_bootstrap" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.self_mng_bottlerocket_bootstrap.user_data) +resource "local_file" "self_mng_al2_bootstrap" { + content = base64decode(module.self_mng_al2_bootstrap.user_data) + filename = "${path.module}/rendered/al2/self-mng-bootstrap.sh" } -output "self_mng_bottlerocket_custom_template" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.self_mng_bottlerocket_custom_template.user_data) +resource "local_file" "self_mng_al2_bootstrap_ipv6" { + content = base64decode(module.self_mng_al2_bootstrap_ipv6.user_data) + filename = "${path.module}/rendered/al2/self-mng-bootstrap-ipv6.sh" } -# Self managed node group - windows -output "self_mng_windows_no_op" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.self_mng_windows_no_op.user_data) +resource "local_file" "self_mng_al2_custom_template" { + content = base64decode(module.self_mng_al2_custom_template.user_data) + filename = "${path.module}/rendered/al2/self-mng-custom-template.sh" +} + +################################################################################ +# Self-managed node group - AL2023 +################################################################################ + +resource "local_file" "self_mng_al2023_no_op" { + content = base64decode(module.self_mng_al2023_no_op.user_data) + filename = "${path.module}/rendered/al2023/self-mng-no-op.txt" +} + +resource "local_file" "self_mng_al2023_bootstrap" { + content = base64decode(module.self_mng_al2023_bootstrap.user_data) + filename = "${path.module}/rendered/al2023/self-mng-bootstrap.txt" +} + +resource "local_file" "self_mng_al2023_custom_template" { + content = base64decode(module.self_mng_al2023_custom_template.user_data) + filename = "${path.module}/rendered/al2023/self-mng-custom-template.txt" +} + +################################################################################ +# Self-managed node group - Bottlerocket +################################################################################ + +resource "local_file" "self_mng_bottlerocket_no_op" { + content = base64decode(module.self_mng_bottlerocket_no_op.user_data) + filename = "${path.module}/rendered/bottlerocket/self-mng-no-op.toml" +} + +resource "local_file" "self_mng_bottlerocket_bootstrap" { + content = base64decode(module.self_mng_bottlerocket_bootstrap.user_data) + filename = "${path.module}/rendered/bottlerocket/self-mng-bootstrap.toml" +} + +resource "local_file" "self_mng_bottlerocket_custom_template" { + content = base64decode(module.self_mng_bottlerocket_custom_template.user_data) + filename = "${path.module}/rendered/bottlerocket/self-mng-custom-template.toml" +} + +################################################################################ +# Self-managed node group - Windows +################################################################################ + +resource "local_file" "self_mng_windows_no_op" { + content = base64decode(module.self_mng_windows_no_op.user_data) + filename = "${path.module}/rendered/windows/self-mng-no-op.ps1" } -output "self_mng_windows_bootstrap" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.self_mng_windows_bootstrap.user_data) +resource "local_file" "self_mng_windows_bootstrap" { + content = base64decode(module.self_mng_windows_bootstrap.user_data) + filename = "${path.module}/rendered/windows/self-mng-bootstrap.ps1" } -output "self_mng_windows_custom_template" { - description = "Base64 decoded user data rendered for the provided inputs" - value = base64decode(module.self_mng_windows_custom_template.user_data) +resource "local_file" "self_mng_windows_custom_template" { + content = base64decode(module.self_mng_windows_custom_template.user_data) + filename = "${path.module}/rendered/windows/self-mng-custom-template.ps1" } diff --git a/examples/user_data/rendered/al2/eks-mng-additional.txt b/examples/user_data/rendered/al2/eks-mng-additional.txt new file mode 100755 index 0000000000..151f0cba7a --- /dev/null +++ b/examples/user_data/rendered/al2/eks-mng-additional.txt @@ -0,0 +1,11 @@ +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Transfer-Encoding: 7bit +Content-Type: text/x-shellscript +Mime-Version: 1.0 + +export USE_MAX_PODS=false + +--//-- diff --git a/examples/user_data/rendered/al2/eks-mng-custom-ami-ipv6.sh b/examples/user_data/rendered/al2/eks-mng-custom-ami-ipv6.sh new file mode 100755 index 0000000000..fceb7e3571 --- /dev/null +++ b/examples/user_data/rendered/al2/eks-mng-custom-ami-ipv6.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -e +export FOO=bar +B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== +API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com +/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--instance-type t3a.large' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \ + --ip-family ipv6 --service-ipv6-cidr fdd3:7636:68bc::/108 +echo "All done" diff --git a/examples/user_data/rendered/al2/eks-mng-custom-ami.sh b/examples/user_data/rendered/al2/eks-mng-custom-ami.sh new file mode 100755 index 0000000000..c7d92a7ce4 --- /dev/null +++ b/examples/user_data/rendered/al2/eks-mng-custom-ami.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -e +export FOO=bar +B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== +API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com +/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--instance-type t3a.large' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \ + --ip-family ipv4 --service-ipv4-cidr 172.16.0.0/16 +echo "All done" diff --git a/examples/user_data/rendered/al2/eks-mng-custom-template.sh b/examples/user_data/rendered/al2/eks-mng-custom-template.sh new file mode 100755 index 0000000000..e18460fa1d --- /dev/null +++ b/examples/user_data/rendered/al2/eks-mng-custom-template.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -ex + +echo "foo" +export FOO=bar + +# Custom user data template provided for rendering +B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== +API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com +/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \ + --ip-family ipv4 --service-ipv4-cidr 172.16.0.0/16 +echo "All done" diff --git a/examples/complete/variables.tf b/examples/user_data/rendered/al2/eks-mng-no-op.sh old mode 100644 new mode 100755 similarity index 100% rename from examples/complete/variables.tf rename to examples/user_data/rendered/al2/eks-mng-no-op.sh diff --git a/examples/user_data/rendered/al2/self-mng-bootstrap-ipv6.sh b/examples/user_data/rendered/al2/self-mng-bootstrap-ipv6.sh new file mode 100755 index 0000000000..b6fd557a13 --- /dev/null +++ b/examples/user_data/rendered/al2/self-mng-bootstrap-ipv6.sh @@ -0,0 +1,9 @@ +#!/bin/bash +set -e +echo "foo" +export FOO=bar +B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== +API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com +/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \ + --ip-family ipv6 --service-ipv6-cidr fdd3:7636:68bc::/108 +echo "All done" diff --git a/examples/user_data/rendered/al2/self-mng-bootstrap.sh b/examples/user_data/rendered/al2/self-mng-bootstrap.sh new file mode 100755 index 0000000000..7fcd81973e --- /dev/null +++ b/examples/user_data/rendered/al2/self-mng-bootstrap.sh @@ -0,0 +1,9 @@ +#!/bin/bash +set -e +echo "foo" +export FOO=bar +B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== +API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com +/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \ + --ip-family ipv4 --service-ipv4-cidr 172.16.0.0/16 +echo "All done" diff --git a/examples/user_data/rendered/al2/self-mng-custom-template.sh b/examples/user_data/rendered/al2/self-mng-custom-template.sh new file mode 100755 index 0000000000..e18460fa1d --- /dev/null +++ b/examples/user_data/rendered/al2/self-mng-custom-template.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -ex + +echo "foo" +export FOO=bar + +# Custom user data template provided for rendering +B64_CLUSTER_CA=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== +API_SERVER_URL=https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com +/etc/eks/bootstrap.sh ex-user-data --kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot' --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \ + --ip-family ipv4 --service-ipv4-cidr 172.16.0.0/16 +echo "All done" diff --git a/examples/user_data/rendered/al2/self-mng-no-op.sh b/examples/user_data/rendered/al2/self-mng-no-op.sh new file mode 100755 index 0000000000..e69de29bb2 diff --git a/examples/user_data/rendered/al2023/eks-mng-additional.txt b/examples/user_data/rendered/al2023/eks-mng-additional.txt new file mode 100755 index 0000000000..fe3c75c898 --- /dev/null +++ b/examples/user_data/rendered/al2023/eks-mng-additional.txt @@ -0,0 +1,19 @@ +Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" +MIME-Version: 1.0 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: application/node.eks.aws +Mime-Version: 1.0 + +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + +--MIMEBOUNDARY-- diff --git a/examples/user_data/rendered/al2023/eks-mng-custom-ami.txt b/examples/user_data/rendered/al2023/eks-mng-custom-ami.txt new file mode 100755 index 0000000000..46362c2030 --- /dev/null +++ b/examples/user_data/rendered/al2023/eks-mng-custom-ami.txt @@ -0,0 +1,41 @@ +Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" +MIME-Version: 1.0 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: application/node.eks.aws +Mime-Version: 1.0 + +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: application/node.eks.aws +Mime-Version: 1.0 + +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + name: ex-user-data + apiServerEndpoint: https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com + certificateAuthority: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== + cidr: 192.168.0.0/16 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: text/x-shellscript; charset="us-ascii" +Mime-Version: 1.0 + +echo "All done" + +--MIMEBOUNDARY-- diff --git a/examples/user_data/rendered/al2023/eks-mng-custom-template.txt b/examples/user_data/rendered/al2023/eks-mng-custom-template.txt new file mode 100755 index 0000000000..a97e188c83 --- /dev/null +++ b/examples/user_data/rendered/al2023/eks-mng-custom-template.txt @@ -0,0 +1,45 @@ +Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" +MIME-Version: 1.0 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: application/node.eks.aws +Mime-Version: 1.0 + +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: application/node.eks.aws +Mime-Version: 1.0 + +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + name: ex-user-data + apiServerEndpoint: https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com + certificateAuthority: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== + cidr: 192.168.0.0/16 + containerd: + config: | + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: text/x-shellscript; charset="us-ascii" +Mime-Version: 1.0 + +echo "All done" + +--MIMEBOUNDARY-- diff --git a/examples/user_data/rendered/al2023/eks-mng-no-op.txt b/examples/user_data/rendered/al2023/eks-mng-no-op.txt new file mode 100755 index 0000000000..e69de29bb2 diff --git a/examples/user_data/rendered/al2023/self-mng-bootstrap.txt b/examples/user_data/rendered/al2023/self-mng-bootstrap.txt new file mode 100755 index 0000000000..46362c2030 --- /dev/null +++ b/examples/user_data/rendered/al2023/self-mng-bootstrap.txt @@ -0,0 +1,41 @@ +Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" +MIME-Version: 1.0 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: application/node.eks.aws +Mime-Version: 1.0 + +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: application/node.eks.aws +Mime-Version: 1.0 + +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + name: ex-user-data + apiServerEndpoint: https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com + certificateAuthority: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== + cidr: 192.168.0.0/16 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: text/x-shellscript; charset="us-ascii" +Mime-Version: 1.0 + +echo "All done" + +--MIMEBOUNDARY-- diff --git a/examples/user_data/rendered/al2023/self-mng-custom-template.txt b/examples/user_data/rendered/al2023/self-mng-custom-template.txt new file mode 100755 index 0000000000..a97e188c83 --- /dev/null +++ b/examples/user_data/rendered/al2023/self-mng-custom-template.txt @@ -0,0 +1,45 @@ +Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" +MIME-Version: 1.0 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: application/node.eks.aws +Mime-Version: 1.0 + +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + kubelet: + config: + shutdownGracePeriod: 30s + featureGates: + DisableKubeletCloudCredentialProviders: true + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: application/node.eks.aws +Mime-Version: 1.0 + +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + name: ex-user-data + apiServerEndpoint: https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com + certificateAuthority: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== + cidr: 192.168.0.0/16 + containerd: + config: | + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: text/x-shellscript; charset="us-ascii" +Mime-Version: 1.0 + +echo "All done" + +--MIMEBOUNDARY-- diff --git a/examples/user_data/rendered/al2023/self-mng-no-op.txt b/examples/user_data/rendered/al2023/self-mng-no-op.txt new file mode 100755 index 0000000000..e69de29bb2 diff --git a/examples/user_data/rendered/bottlerocket/eks-mng-additional.toml b/examples/user_data/rendered/bottlerocket/eks-mng-additional.toml new file mode 100755 index 0000000000..7ed4affaf6 --- /dev/null +++ b/examples/user_data/rendered/bottlerocket/eks-mng-additional.toml @@ -0,0 +1,3 @@ +# extra args added +[settings.kernel] +lockdown = "integrity" diff --git a/examples/user_data/rendered/bottlerocket/eks-mng-custom-ami.toml b/examples/user_data/rendered/bottlerocket/eks-mng-custom-ami.toml new file mode 100755 index 0000000000..38b0c46a0b --- /dev/null +++ b/examples/user_data/rendered/bottlerocket/eks-mng-custom-ami.toml @@ -0,0 +1,8 @@ +[settings.kubernetes] +"cluster-name" = "ex-user-data" +"api-server" = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com" +"cluster-certificate" = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==" +"cluster-dns-ip" = ["192.168.0.10", "169.254.20.10"] +# extra args added +[settings.kernel] +lockdown = "integrity" diff --git a/examples/user_data/rendered/bottlerocket/eks-mng-custom-template.toml b/examples/user_data/rendered/bottlerocket/eks-mng-custom-template.toml new file mode 100755 index 0000000000..c5c6774cfc --- /dev/null +++ b/examples/user_data/rendered/bottlerocket/eks-mng-custom-template.toml @@ -0,0 +1,9 @@ +# Custom user data template provided for rendering +[settings.kubernetes] +"cluster-name" = "ex-user-data" +"api-server" = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com" +"cluster-certificate" = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==" + +# extra args added +[settings.kernel] +lockdown = "integrity" diff --git a/examples/user_data/rendered/bottlerocket/eks-mng-no-op.toml b/examples/user_data/rendered/bottlerocket/eks-mng-no-op.toml new file mode 100755 index 0000000000..e69de29bb2 diff --git a/examples/user_data/rendered/bottlerocket/self-mng-bootstrap.toml b/examples/user_data/rendered/bottlerocket/self-mng-bootstrap.toml new file mode 100755 index 0000000000..76f8b82dcd --- /dev/null +++ b/examples/user_data/rendered/bottlerocket/self-mng-bootstrap.toml @@ -0,0 +1,8 @@ +[settings.kubernetes] +"cluster-name" = "ex-user-data" +"api-server" = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com" +"cluster-certificate" = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==" +"cluster-dns-ip" = ["192.168.0.10"] +# extra args added +[settings.kernel] +lockdown = "integrity" diff --git a/examples/user_data/rendered/bottlerocket/self-mng-custom-template.toml b/examples/user_data/rendered/bottlerocket/self-mng-custom-template.toml new file mode 100755 index 0000000000..c5c6774cfc --- /dev/null +++ b/examples/user_data/rendered/bottlerocket/self-mng-custom-template.toml @@ -0,0 +1,9 @@ +# Custom user data template provided for rendering +[settings.kubernetes] +"cluster-name" = "ex-user-data" +"api-server" = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com" +"cluster-certificate" = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==" + +# extra args added +[settings.kernel] +lockdown = "integrity" diff --git a/examples/user_data/rendered/bottlerocket/self-mng-no-op.toml b/examples/user_data/rendered/bottlerocket/self-mng-no-op.toml new file mode 100755 index 0000000000..e69de29bb2 diff --git a/examples/user_data/rendered/windows/eks-mng-additional.ps1 b/examples/user_data/rendered/windows/eks-mng-additional.ps1 new file mode 100755 index 0000000000..0debfcf9ad --- /dev/null +++ b/examples/user_data/rendered/windows/eks-mng-additional.ps1 @@ -0,0 +1 @@ +[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' diff --git a/examples/user_data/rendered/windows/eks-mng-custom-ami.ps1 b/examples/user_data/rendered/windows/eks-mng-custom-ami.ps1 new file mode 100755 index 0000000000..182195b707 --- /dev/null +++ b/examples/user_data/rendered/windows/eks-mng-custom-ami.ps1 @@ -0,0 +1,9 @@ + +[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' +[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS" +[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1' +[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName" +& $EKSBootstrapScriptFile -EKSClusterName ex-user-data -APIServerEndpoint https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com -Base64ClusterCA LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== -KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot 3>&1 4>&1 5>&1 6>&1 +$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult } +[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + diff --git a/examples/user_data/rendered/windows/eks-mng-custom-template.ps1 b/examples/user_data/rendered/windows/eks-mng-custom-template.ps1 new file mode 100755 index 0000000000..aa4008c7e5 --- /dev/null +++ b/examples/user_data/rendered/windows/eks-mng-custom-template.ps1 @@ -0,0 +1,10 @@ +# Custom user data template provided for rendering + +[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' +[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS" +[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1' +[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName" +& $EKSBootstrapScriptFile -EKSClusterName ex-user-data -APIServerEndpoint https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com -Base64ClusterCA LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== -KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot 3>&1 4>&1 5>&1 6>&1 +$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult } +[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + diff --git a/examples/user_data/rendered/windows/eks-mng-no-op.ps1 b/examples/user_data/rendered/windows/eks-mng-no-op.ps1 new file mode 100755 index 0000000000..e69de29bb2 diff --git a/examples/user_data/rendered/windows/self-mng-bootstrap.ps1 b/examples/user_data/rendered/windows/self-mng-bootstrap.ps1 new file mode 100755 index 0000000000..182195b707 --- /dev/null +++ b/examples/user_data/rendered/windows/self-mng-bootstrap.ps1 @@ -0,0 +1,9 @@ + +[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' +[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS" +[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1' +[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName" +& $EKSBootstrapScriptFile -EKSClusterName ex-user-data -APIServerEndpoint https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com -Base64ClusterCA LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== -KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot 3>&1 4>&1 5>&1 6>&1 +$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult } +[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + diff --git a/examples/user_data/rendered/windows/self-mng-custom-template.ps1 b/examples/user_data/rendered/windows/self-mng-custom-template.ps1 new file mode 100755 index 0000000000..aa4008c7e5 --- /dev/null +++ b/examples/user_data/rendered/windows/self-mng-custom-template.ps1 @@ -0,0 +1,10 @@ +# Custom user data template provided for rendering + +[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' +[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS" +[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1' +[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName" +& $EKSBootstrapScriptFile -EKSClusterName ex-user-data -APIServerEndpoint https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com -Base64ClusterCA LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ== -KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot 3>&1 4>&1 5>&1 6>&1 +$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult } +[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯' + diff --git a/examples/user_data/rendered/windows/self-mng-no-op.ps1 b/examples/user_data/rendered/windows/self-mng-no-op.ps1 new file mode 100755 index 0000000000..e69de29bb2 diff --git a/examples/user_data/templates/al2023_custom.tpl b/examples/user_data/templates/al2023_custom.tpl new file mode 100644 index 0000000000..34c566c154 --- /dev/null +++ b/examples/user_data/templates/al2023_custom.tpl @@ -0,0 +1,15 @@ +%{ if enable_bootstrap_user_data ~} +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + name: ${cluster_name} + apiServerEndpoint: ${cluster_endpoint} + certificateAuthority: ${cluster_auth_base64} + cidr: ${cluster_service_cidr} + containerd: + config: | + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false +%{ endif ~} diff --git a/examples/user_data/templates/linux_custom.tpl b/examples/user_data/templates/linux_custom.tpl index bfe21f117a..b3cb73a2ab 100644 --- a/examples/user_data/templates/linux_custom.tpl +++ b/examples/user_data/templates/linux_custom.tpl @@ -6,5 +6,6 @@ ${pre_bootstrap_user_data ~} # Custom user data template provided for rendering B64_CLUSTER_CA=${cluster_auth_base64} API_SERVER_URL=${cluster_endpoint} -/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL +/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \ + --ip-family ${cluster_ip_family} --service-${cluster_ip_family}-cidr ${cluster_service_cidr} ${post_bootstrap_user_data ~} diff --git a/examples/user_data/versions.tf b/examples/user_data/versions.tf index 7117131f4c..31969d6c02 100644 --- a/examples/user_data/versions.tf +++ b/examples/user_data/versions.tf @@ -1,3 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" + + required_providers { + local = { + source = "hashicorp/local" + version = ">= 2.4" + } + } } diff --git a/main.tf b/main.tf index 52d36aa201..4cb1200327 100644 --- a/main.tf +++ b/main.tf @@ -12,6 +12,8 @@ data "aws_iam_session_context" "current" { locals { create = var.create && var.putin_khuylo + partition = data.aws_partition.current.partition + cluster_role = try(aws_iam_role.this[0].arn, var.iam_role_arn) create_outposts_local_cluster = length(var.outpost_config) > 0 @@ -30,6 +32,17 @@ resource "aws_eks_cluster" "this" { version = var.cluster_version enabled_cluster_log_types = var.cluster_enabled_log_types + access_config { + authentication_mode = var.authentication_mode + + # See access entries below - this is a one time operation from the EKS API. + # Instead, we are hardcoding this to false and if users wish to achieve this + # same functionality, we will do that through an access entry which can be + # enabled or disabled at any time of their choosing using the variable + # var.enable_cluster_creator_admin_permissions + bootstrap_cluster_creator_admin_permissions = false + } + vpc_config { security_group_ids = compact(distinct(concat(var.cluster_additional_security_group_ids, [local.cluster_security_group_id]))) subnet_ids = coalescelist(var.control_plane_subnet_ids, var.subnet_ids) @@ -71,14 +84,15 @@ resource "aws_eks_cluster" "this" { } tags = merge( + { terraform-aws-modules = "eks" }, var.tags, var.cluster_tags, ) timeouts { - create = lookup(var.cluster_timeouts, "create", null) - update = lookup(var.cluster_timeouts, "update", null) - delete = lookup(var.cluster_timeouts, "delete", null) + create = try(var.cluster_timeouts.create, null) + update = try(var.cluster_timeouts.update, null) + delete = try(var.cluster_timeouts.delete, null) } depends_on = [ @@ -88,6 +102,12 @@ resource "aws_eks_cluster" "this" { aws_cloudwatch_log_group.this, aws_iam_policy.cni_ipv6_policy, ] + + lifecycle { + ignore_changes = [ + access_config[0].bootstrap_cluster_creator_admin_permissions + ] + } } resource "aws_ec2_tag" "cluster_primary_security_group" { @@ -109,20 +129,102 @@ resource "aws_cloudwatch_log_group" "this" { name = "/aws/eks/${var.cluster_name}/cluster" retention_in_days = var.cloudwatch_log_group_retention_in_days kms_key_id = var.cloudwatch_log_group_kms_key_id + log_group_class = var.cloudwatch_log_group_class tags = merge( var.tags, + var.cloudwatch_log_group_tags, { Name = "/aws/eks/${var.cluster_name}/cluster" } ) } +################################################################################ +# Access Entry +################################################################################ + +locals { + # This replaces the one time logic from the EKS API with something that can be + # better controlled by users through Terraform + bootstrap_cluster_creator_admin_permissions = { + cluster_creator = { + principal_arn = data.aws_iam_session_context.current.issuer_arn + type = "STANDARD" + + policy_associations = { + admin = { + policy_arn = "arn:${local.partition}:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" + access_scope = { + type = "cluster" + } + } + } + } + } + + # Merge the bootstrap behavior with the entries that users provide + merged_access_entries = merge( + { for k, v in local.bootstrap_cluster_creator_admin_permissions : k => v if var.enable_cluster_creator_admin_permissions }, + var.access_entries, + ) + + # Flatten out entries and policy associations so users can specify the policy + # associations within a single entry + flattened_access_entries = flatten([ + for entry_key, entry_val in local.merged_access_entries : [ + for pol_key, pol_val in lookup(entry_val, "policy_associations", {}) : + merge( + { + principal_arn = entry_val.principal_arn + entry_key = entry_key + pol_key = pol_key + }, + { for k, v in { + association_policy_arn = pol_val.policy_arn + association_access_scope_type = pol_val.access_scope.type + association_access_scope_namespaces = lookup(pol_val.access_scope, "namespaces", []) + } : k => v if !contains(["EC2_LINUX", "EC2_WINDOWS", "FARGATE_LINUX"], lookup(entry_val, "type", "STANDARD")) }, + ) + ] + ]) +} + +resource "aws_eks_access_entry" "this" { + for_each = { for k, v in local.merged_access_entries : k => v if local.create } + + cluster_name = aws_eks_cluster.this[0].name + kubernetes_groups = try(each.value.kubernetes_groups, null) + principal_arn = each.value.principal_arn + type = try(each.value.type, "STANDARD") + user_name = try(each.value.user_name, null) + + tags = merge(var.tags, try(each.value.tags, {})) +} + +resource "aws_eks_access_policy_association" "this" { + for_each = { for k, v in local.flattened_access_entries : "${v.entry_key}_${v.pol_key}" => v if local.create } + + access_scope { + namespaces = try(each.value.association_access_scope_namespaces, []) + type = each.value.association_access_scope_type + } + + cluster_name = aws_eks_cluster.this[0].name + + policy_arn = each.value.association_policy_arn + principal_arn = each.value.principal_arn + + depends_on = [ + aws_eks_access_entry.this, + ] +} + ################################################################################ # KMS Key ################################################################################ module "kms" { source = "terraform-aws-modules/kms/aws" - version = "1.1.0" # Note - be mindful of Terraform/provider version compatibility between modules + version = "2.1.0" # Note - be mindful of Terraform/provider version compatibility between modules create = local.create && var.create_kms_key && local.enable_cluster_encryption_config # not valid on Outposts @@ -147,7 +249,10 @@ module "kms" { cluster = { name = "eks/${var.cluster_name}" } } - tags = var.tags + tags = merge( + { terraform-aws-modules = "eks" }, + var.tags, + ) } ################################################################################ @@ -220,19 +325,26 @@ resource "aws_security_group_rule" "cluster" { # Note - this is different from EKS identity provider ################################################################################ +locals { + # Not available on outposts + create_oidc_provider = local.create && var.enable_irsa && !local.create_outposts_local_cluster + + oidc_root_ca_thumbprint = local.create_oidc_provider && var.include_oidc_root_ca_thumbprint ? [data.tls_certificate.this[0].certificates[0].sha1_fingerprint] : [] +} + data "tls_certificate" "this" { # Not available on outposts - count = local.create && var.enable_irsa && !local.create_outposts_local_cluster ? 1 : 0 + count = local.create_oidc_provider && var.include_oidc_root_ca_thumbprint ? 1 : 0 url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer } resource "aws_iam_openid_connect_provider" "oidc_provider" { # Not available on outposts - count = local.create && var.enable_irsa && !local.create_outposts_local_cluster ? 1 : 0 + count = local.create_oidc_provider ? 1 : 0 - client_id_list = distinct(compact(concat(["sts.${local.dns_suffix}"], var.openid_connect_audiences))) - thumbprint_list = concat(data.tls_certificate.this[0].certificates[*].sha1_fingerprint, var.custom_oidc_thumbprints) + client_id_list = distinct(compact(concat(["sts.amazonaws.com"], var.openid_connect_audiences))) + thumbprint_list = concat(local.oidc_root_ca_thumbprint, var.custom_oidc_thumbprints) url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer tags = merge( @@ -248,13 +360,9 @@ resource "aws_iam_openid_connect_provider" "oidc_provider" { locals { create_iam_role = local.create && var.create_iam_role iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster") - iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" + iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy" cluster_encryption_policy_name = coalesce(var.cluster_encryption_policy_name, "${local.iam_role_name}-ClusterEncryption") - - # TODO - hopefully this can be removed once the AWS endpoint is named properly in China - # https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1904 - dns_suffix = coalesce(var.cluster_iam_role_dns_suffix, data.aws_partition.current.dns_suffix) } data "aws_iam_policy_document" "assume_role_policy" { @@ -266,7 +374,7 @@ data "aws_iam_policy_document" "assume_role_policy" { principals { type = "Service" - identifiers = ["eks.${local.dns_suffix}"] + identifiers = ["eks.amazonaws.com"] } dynamic "principals" { @@ -275,7 +383,7 @@ data "aws_iam_policy_document" "assume_role_policy" { content { type = "Service" identifiers = [ - "ec2.${local.dns_suffix}", + "ec2.amazonaws.com", ] } } @@ -379,6 +487,14 @@ resource "aws_iam_policy" "cluster_encryption" { # EKS Addons ################################################################################ +data "aws_eks_addon_version" "this" { + for_each = { for k, v in var.cluster_addons : k => v if local.create && !local.create_outposts_local_cluster } + + addon_name = try(each.value.name, each.key) + kubernetes_version = coalesce(var.cluster_version, aws_eks_cluster.this[0].version) + most_recent = try(each.value.most_recent, null) +} + resource "aws_eks_addon" "this" { # Not supported on outposts for_each = { for k, v in var.cluster_addons : k => v if !try(v.before_compute, false) && local.create && !local.create_outposts_local_cluster } @@ -386,11 +502,12 @@ resource "aws_eks_addon" "this" { cluster_name = aws_eks_cluster.this[0].name addon_name = try(each.value.name, each.key) - addon_version = try(each.value.addon_version, data.aws_eks_addon_version.this[each.key].version) - configuration_values = try(each.value.configuration_values, null) - preserve = try(each.value.preserve, null) - resolve_conflicts = try(each.value.resolve_conflicts, "OVERWRITE") - service_account_role_arn = try(each.value.service_account_role_arn, null) + addon_version = coalesce(try(each.value.addon_version, null), data.aws_eks_addon_version.this[each.key].version) + configuration_values = try(each.value.configuration_values, null) + preserve = try(each.value.preserve, true) + resolve_conflicts_on_create = try(each.value.resolve_conflicts_on_create, "OVERWRITE") + resolve_conflicts_on_update = try(each.value.resolve_conflicts_on_update, "OVERWRITE") + service_account_role_arn = try(each.value.service_account_role_arn, null) timeouts { create = try(each.value.timeouts.create, var.cluster_addons_timeouts.create, null) @@ -404,7 +521,7 @@ resource "aws_eks_addon" "this" { module.self_managed_node_group, ] - tags = var.tags + tags = merge(var.tags, try(each.value.tags, {})) } resource "aws_eks_addon" "before_compute" { @@ -414,11 +531,12 @@ resource "aws_eks_addon" "before_compute" { cluster_name = aws_eks_cluster.this[0].name addon_name = try(each.value.name, each.key) - addon_version = try(each.value.addon_version, data.aws_eks_addon_version.this[each.key].version) - configuration_values = try(each.value.configuration_values, null) - preserve = try(each.value.preserve, null) - resolve_conflicts = try(each.value.resolve_conflicts, "OVERWRITE") - service_account_role_arn = try(each.value.service_account_role_arn, null) + addon_version = coalesce(try(each.value.addon_version, null), data.aws_eks_addon_version.this[each.key].version) + configuration_values = try(each.value.configuration_values, null) + preserve = try(each.value.preserve, true) + resolve_conflicts_on_create = try(each.value.resolve_conflicts_on_create, "OVERWRITE") + resolve_conflicts_on_update = try(each.value.resolve_conflicts_on_update, "OVERWRITE") + service_account_role_arn = try(each.value.service_account_role_arn, null) timeouts { create = try(each.value.timeouts.create, var.cluster_addons_timeouts.create, null) @@ -426,15 +544,7 @@ resource "aws_eks_addon" "before_compute" { delete = try(each.value.timeouts.delete, var.cluster_addons_timeouts.delete, null) } - tags = var.tags -} - -data "aws_eks_addon_version" "this" { - for_each = { for k, v in var.cluster_addons : k => v if local.create && !local.create_outposts_local_cluster } - - addon_name = try(each.value.name, each.key) - kubernetes_version = coalesce(var.cluster_version, aws_eks_cluster.this[0].version) - most_recent = try(each.value.most_recent, null) + tags = merge(var.tags, try(each.value.tags, {})) } ################################################################################ @@ -442,6 +552,14 @@ data "aws_eks_addon_version" "this" { # Note - this is different from IRSA ################################################################################ +locals { + # Maintain current behavior for <= 1.29, remove default for >= 1.30 + # `null` will return the latest Kubernetes version from the EKS API, which at time of writing is 1.30 + # https://github.com/kubernetes/kubernetes/pull/123561 + idpc_backwards_compat_version = contains(["1.21", "1.22", "1.23", "1.24", "1.25", "1.26", "1.27", "1.28", "1.29"], coalesce(var.cluster_version, "1.30")) + idpc_issuer_url = local.idpc_backwards_compat_version ? try(aws_eks_cluster.this[0].identity[0].oidc[0].issuer, null) : null +} + resource "aws_eks_identity_provider_config" "this" { for_each = { for k, v in var.cluster_identity_providers : k => v if local.create && !local.create_outposts_local_cluster } @@ -452,118 +570,12 @@ resource "aws_eks_identity_provider_config" "this" { groups_claim = lookup(each.value, "groups_claim", null) groups_prefix = lookup(each.value, "groups_prefix", null) identity_provider_config_name = try(each.value.identity_provider_config_name, each.key) - issuer_url = try(each.value.issuer_url, aws_eks_cluster.this[0].identity[0].oidc[0].issuer) - required_claims = lookup(each.value, "required_claims", null) - username_claim = lookup(each.value, "username_claim", null) - username_prefix = lookup(each.value, "username_prefix", null) - } - - tags = var.tags -} - -################################################################################ -# aws-auth configmap -################################################################################ - -locals { - node_iam_role_arns_non_windows = distinct( - compact( - concat( - [for group in module.eks_managed_node_group : group.iam_role_arn], - [for group in module.self_managed_node_group : group.iam_role_arn if group.platform != "windows"], - var.aws_auth_node_iam_role_arns_non_windows, - ) - ) - ) - - node_iam_role_arns_windows = distinct( - compact( - concat( - [for group in module.self_managed_node_group : group.iam_role_arn if group.platform == "windows"], - var.aws_auth_node_iam_role_arns_windows, - ) - ) - ) - - fargate_profile_pod_execution_role_arns = distinct( - compact( - concat( - [for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn], - var.aws_auth_fargate_profile_pod_execution_role_arns, - ) - ) - ) - - aws_auth_configmap_data = { - mapRoles = yamlencode(concat( - [for role_arn in local.node_iam_role_arns_non_windows : { - rolearn = role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - ] - } - ], - [for role_arn in local.node_iam_role_arns_windows : { - rolearn = role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "eks:kube-proxy-windows", - "system:bootstrappers", - "system:nodes", - ] - } - ], - # Fargate profile - [for role_arn in local.fargate_profile_pod_execution_role_arns : { - rolearn = role_arn - username = "system:node:{{SessionName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - "system:node-proxier", - ] - } - ], - var.aws_auth_roles - )) - mapUsers = yamlencode(var.aws_auth_users) - mapAccounts = yamlencode(var.aws_auth_accounts) - } -} - -resource "kubernetes_config_map" "aws_auth" { - count = var.create && var.create_aws_auth_configmap ? 1 : 0 - - metadata { - name = "aws-auth" - namespace = "kube-system" + # TODO - make argument explicitly required on next breaking change + issuer_url = try(each.value.issuer_url, local.idpc_issuer_url) + required_claims = lookup(each.value, "required_claims", null) + username_claim = lookup(each.value, "username_claim", null) + username_prefix = lookup(each.value, "username_prefix", null) } - data = local.aws_auth_configmap_data - - lifecycle { - # We are ignoring the data here since we will manage it with the resource below - # This is only intended to be used in scenarios where the configmap does not exist - ignore_changes = [data, metadata[0].labels, metadata[0].annotations] - } -} - -resource "kubernetes_config_map_v1_data" "aws_auth" { - count = var.create && var.manage_aws_auth_configmap ? 1 : 0 - - force = true - - metadata { - name = "aws-auth" - namespace = "kube-system" - } - - data = local.aws_auth_configmap_data - - depends_on = [ - # Required for instances where the configmap does not exist yet to avoid race condition - kubernetes_config_map.aws_auth, - ] + tags = merge(var.tags, try(each.value.tags, {})) } diff --git a/modules/_user_data/README.md b/modules/_user_data/README.md index 0853fd9e1a..e5207d9443 100644 --- a/modules/_user_data/README.md +++ b/modules/_user_data/README.md @@ -9,14 +9,16 @@ See [`examples/user_data/`](https://github.com/terraform-aws-modules/terraform-a | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | | [cloudinit](#requirement\_cloudinit) | >= 2.0 | +| [null](#requirement\_null) | >= 3.0 | ## Providers | Name | Version | |------|---------| | [cloudinit](#provider\_cloudinit) | >= 2.0 | +| [null](#provider\_null) | >= 3.0 | ## Modules @@ -26,28 +28,37 @@ No modules. | Name | Type | |------|------| +| [null_resource.validate_cluster_service_cidr](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [cloudinit_config.al2023_eks_managed_node_group](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source | | [cloudinit_config.linux_eks_managed_node_group](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no | +| [additional\_cluster\_dns\_ips](#input\_additional\_cluster\_dns\_ips) | Additional DNS IP addresses to use for the cluster. Only used when `ami_type` = `BOTTLEROCKET_*` | `list(string)` | `[]` | no | +| [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values | `string` | `null` | no | +| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no | +| [cloudinit\_post\_nodeadm](#input\_cloudinit\_post\_nodeadm) | Array of cloud-init document parts that are created after the nodeadm document part |
list(object({
content = string
content_type = optional(string)
filename = optional(string)
merge_type = optional(string)
}))
| `[]` | no | +| [cloudinit\_pre\_nodeadm](#input\_cloudinit\_pre\_nodeadm) | Array of cloud-init document parts that are created before the nodeadm document part |
list(object({
content = string
content_type = optional(string)
filename = optional(string)
merge_type = optional(string)
}))
| `[]` | no | | [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no | | [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no | +| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `"ipv4"` | no | | [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no | -| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | +| [cluster\_service\_cidr](#input\_cluster\_service\_cidr) | The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself | `string` | `""` | no | +| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | [Deprecated] The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | | [create](#input\_create) | Determines whether to create user-data or not | `bool` | `true` | no | | [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template | `bool` | `false` | no | | [is\_eks\_managed\_node\_group](#input\_is\_eks\_managed\_node\_group) | Determines whether the user data is used on nodes in an EKS managed node group. Used to determine if user data will be appended or not | `bool` | `true` | no | -| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no | -| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | -| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | +| [platform](#input\_platform) | [DEPRECATED - use `ami_type` instead. Will be removed in `v21.0`] Identifies the OS platform as `bottlerocket`, `linux` (AL2), `al2023`, or `windows` | `string` | `"linux"` | no | +| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `""` | no | +| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `""` | no | | [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no | ## Outputs | Name | Description | |------|-------------| +| [platform](#output\_platform) | [DEPRECATED - Will be removed in `v21.0`] Identifies the OS platform as `bottlerocket`, `linux` (AL2), `al2023, or `windows | | [user\_data](#output\_user\_data) | Base64 encoded user data rendered for the provided inputs | diff --git a/modules/_user_data/main.tf b/modules/_user_data/main.tf index 8ace10539d..b695ba6cca 100644 --- a/modules/_user_data/main.tf +++ b/modules/_user_data/main.tf @@ -1,70 +1,99 @@ +# The `cluster_service_cidr` is required when `create == true` +# This is a hacky way to make that logic work, otherwise Terraform always wants a value +# and supplying any old value like `""` or `null` is not valid and will silently +# fail to join nodes to the cluster +resource "null_resource" "validate_cluster_service_cidr" { + lifecycle { + precondition { + # The length 6 is currently arbitrary, but it's a safe bet that the CIDR will be longer than that + # The main point is that a value needs to be provided when `create = true` + condition = var.create ? length(local.cluster_service_cidr) > 6 : true + error_message = "`cluster_service_cidr` is required when `create = true`." + } + } +} locals { - int_linux_default_user_data = var.create && var.platform == "linux" && (var.enable_bootstrap_user_data || var.user_data_template_path != "") ? base64encode(templatefile( - coalesce(var.user_data_template_path, "${path.module}/../../templates/linux_user_data.tpl"), + # Converts AMI type into user data type that represents the underlying format (bash, toml, PS1, nodeadm) + # TODO - platform will be removed in v21.0 and only `ami_type` will be valid + ami_type_to_user_data_type = { + AL2_x86_64 = "linux" + AL2_x86_64_GPU = "linux" + AL2_ARM_64 = "linux" + BOTTLEROCKET_ARM_64 = "bottlerocket" + BOTTLEROCKET_x86_64 = "bottlerocket" + BOTTLEROCKET_ARM_64_NVIDIA = "bottlerocket" + BOTTLEROCKET_x86_64_NVIDIA = "bottlerocket" + WINDOWS_CORE_2019_x86_64 = "windows" + WINDOWS_FULL_2019_x86_64 = "windows" + WINDOWS_CORE_2022_x86_64 = "windows" + WINDOWS_FULL_2022_x86_64 = "windows" + AL2023_x86_64_STANDARD = "al2023" + AL2023_ARM_64_STANDARD = "al2023" + } + # Try to use `ami_type` first, but fall back to current, default behavior + # TODO - will be removed in v21.0 + user_data_type = try(local.ami_type_to_user_data_type[var.ami_type], var.platform) + + template_path = { + al2023 = "${path.module}/../../templates/al2023_user_data.tpl" + bottlerocket = "${path.module}/../../templates/bottlerocket_user_data.tpl" + linux = "${path.module}/../../templates/linux_user_data.tpl" + windows = "${path.module}/../../templates/windows_user_data.tpl" + } + + cluster_service_cidr = try(coalesce(var.cluster_service_ipv4_cidr, var.cluster_service_cidr), "") + cluster_dns_ips = flatten(concat([try(cidrhost(local.cluster_service_cidr, 10), "")], var.additional_cluster_dns_ips)) + + user_data = base64encode(templatefile( + coalesce(var.user_data_template_path, local.template_path[local.user_data_type]), { # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami enable_bootstrap_user_data = var.enable_bootstrap_user_data + # Required to bootstrap node cluster_name = var.cluster_name cluster_endpoint = var.cluster_endpoint cluster_auth_base64 = var.cluster_auth_base64 + + cluster_service_cidr = local.cluster_service_cidr + cluster_ip_family = var.cluster_ip_family + + # Bottlerocket + cluster_dns_ips = "[${join(", ", formatlist("\"%s\"", local.cluster_dns_ips))}]" + # Optional - cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr != null ? var.cluster_service_ipv4_cidr : "" - bootstrap_extra_args = var.bootstrap_extra_args - pre_bootstrap_user_data = var.pre_bootstrap_user_data - post_bootstrap_user_data = var.post_bootstrap_user_data + bootstrap_extra_args = var.bootstrap_extra_args + pre_bootstrap_user_data = var.pre_bootstrap_user_data + post_bootstrap_user_data = var.post_bootstrap_user_data + } + )) + + user_data_type_to_rendered = { + al2023 = { + user_data = var.create ? try(data.cloudinit_config.al2023_eks_managed_node_group[0].rendered, local.user_data) : "" } - )) : "" - platform = { bottlerocket = { - user_data = var.create && var.platform == "bottlerocket" && (var.enable_bootstrap_user_data || var.user_data_template_path != "" || var.bootstrap_extra_args != "") ? base64encode(templatefile( - coalesce(var.user_data_template_path, "${path.module}/../../templates/bottlerocket_user_data.tpl"), - { - # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami - enable_bootstrap_user_data = var.enable_bootstrap_user_data - # Required to bootstrap node - cluster_name = var.cluster_name - cluster_endpoint = var.cluster_endpoint - cluster_auth_base64 = var.cluster_auth_base64 - # Optional - is appended if using EKS managed node group without custom AMI - # cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr # Bottlerocket pulls this automatically https://github.com/bottlerocket-os/bottlerocket/issues/1866 - bootstrap_extra_args = var.bootstrap_extra_args - } - )) : "" + user_data = var.create && local.user_data_type == "bottlerocket" && (var.enable_bootstrap_user_data || var.user_data_template_path != "" || var.bootstrap_extra_args != "") ? local.user_data : "" } linux = { - user_data = try(data.cloudinit_config.linux_eks_managed_node_group[0].rendered, local.int_linux_default_user_data) - + user_data = var.create ? try(data.cloudinit_config.linux_eks_managed_node_group[0].rendered, local.user_data) : "" } windows = { - user_data = var.create && var.platform == "windows" && var.enable_bootstrap_user_data ? base64encode(templatefile( - coalesce(var.user_data_template_path, "${path.module}/../../templates/windows_user_data.tpl"), - { - # Required to bootstrap node - cluster_name = var.cluster_name - cluster_endpoint = var.cluster_endpoint - cluster_auth_base64 = var.cluster_auth_base64 - # Optional - is appended if using EKS managed node group without custom AMI - # cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr # Not supported yet: https://github.com/awslabs/amazon-eks-ami/issues/805 - bootstrap_extra_args = var.bootstrap_extra_args - pre_bootstrap_user_data = var.pre_bootstrap_user_data - post_bootstrap_user_data = var.post_bootstrap_user_data - } - )) : "" + user_data = var.create && local.user_data_type == "windows" && (var.enable_bootstrap_user_data || var.user_data_template_path != "" || var.pre_bootstrap_user_data != "") ? local.user_data : "" } } } # https://github.com/aws/containers-roadmap/issues/596#issuecomment-675097667 -# An important note is that user data must in MIME multi-part archive format, +# Managed nodegroup data must in MIME multi-part archive format, # as by default, EKS will merge the bootstrapping command required for nodes to join the # cluster with your user data. If you use a custom AMI in your launch template, # this merging will NOT happen and you are responsible for nodes joining the cluster. # See docs for more details -> https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data data "cloudinit_config" "linux_eks_managed_node_group" { - count = var.create && var.platform == "linux" && var.is_eks_managed_node_group && !var.enable_bootstrap_user_data && var.pre_bootstrap_user_data != "" && var.user_data_template_path == "" ? 1 : 0 + count = var.create && local.user_data_type == "linux" && var.is_eks_managed_node_group && !var.enable_bootstrap_user_data && var.pre_bootstrap_user_data != "" && var.user_data_template_path == "" ? 1 : 0 base64_encode = true gzip = false @@ -72,7 +101,44 @@ data "cloudinit_config" "linux_eks_managed_node_group" { # Prepend to existing user data supplied by AWS EKS part { - content_type = "text/x-shellscript" content = var.pre_bootstrap_user_data + content_type = "text/x-shellscript" + } +} + +# Scenarios: +# +# 1. Do nothing - provide nothing +# 2. Prepend stuff on EKS MNG (before EKS MNG adds its bit at the end) +# 3. Own all of the stuff on self-MNG or EKS MNG w/ custom AMI + +locals { + nodeadm_cloudinit = var.enable_bootstrap_user_data ? concat( + var.cloudinit_pre_nodeadm, + [{ + content_type = "application/node.eks.aws" + content = base64decode(local.user_data) + }], + var.cloudinit_post_nodeadm + ) : var.cloudinit_pre_nodeadm +} + +data "cloudinit_config" "al2023_eks_managed_node_group" { + count = var.create && local.user_data_type == "al2023" && length(local.nodeadm_cloudinit) > 0 ? 1 : 0 + + base64_encode = true + gzip = false + boundary = "MIMEBOUNDARY" + + dynamic "part" { + # Using the index is fine in this context since any change in user data will be a replacement + for_each = { for i, v in local.nodeadm_cloudinit : i => v } + + content { + content = part.value.content + content_type = try(part.value.content_type, null) + filename = try(part.value.filename, null) + merge_type = try(part.value.merge_type, null) + } } } diff --git a/modules/_user_data/outputs.tf b/modules/_user_data/outputs.tf index 075801b233..7bebb3f218 100644 --- a/modules/_user_data/outputs.tf +++ b/modules/_user_data/outputs.tf @@ -1,4 +1,9 @@ output "user_data" { description = "Base64 encoded user data rendered for the provided inputs" - value = try(local.platform[var.platform].user_data, null) + value = try(local.user_data_type_to_rendered[local.user_data_type].user_data, null) +} + +output "platform" { + description = "[DEPRECATED - Will be removed in `v21.0`] Identifies the OS platform as `bottlerocket`, `linux` (AL2), `al2023, or `windows`" + value = local.user_data_type } diff --git a/modules/_user_data/variables.tf b/modules/_user_data/variables.tf index 232e1e883e..d5a1ef1bd3 100644 --- a/modules/_user_data/variables.tf +++ b/modules/_user_data/variables.tf @@ -5,11 +5,17 @@ variable "create" { } variable "platform" { - description = "Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based" + description = "[DEPRECATED - use `ami_type` instead. Will be removed in `v21.0`] Identifies the OS platform as `bottlerocket`, `linux` (AL2), `al2023`, or `windows`" type = string default = "linux" } +variable "ami_type" { + description = "Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values" + type = string + default = null +} + variable "enable_bootstrap_user_data" { description = "Determines whether the bootstrap configurations are populated within the user data template" type = bool @@ -40,26 +46,45 @@ variable "cluster_auth_base64" { default = "" } +variable "cluster_service_cidr" { + description = "The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself" + type = string + default = "" +} + +variable "cluster_ip_family" { + description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`" + type = string + default = "ipv4" +} + +variable "additional_cluster_dns_ips" { + description = "Additional DNS IP addresses to use for the cluster. Only used when `ami_type` = `BOTTLEROCKET_*`" + type = list(string) + default = [] +} + +# TODO - remove at next breaking change variable "cluster_service_ipv4_cidr" { - description = "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks" + description = "[Deprecated] The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks" type = string default = null } variable "pre_bootstrap_user_data" { - description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`" + description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`" type = string default = "" } variable "post_bootstrap_user_data" { - description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket`" + description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`" type = string default = "" } variable "bootstrap_extra_args" { - description = "Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data" + description = "Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data" type = string default = "" } @@ -69,3 +94,25 @@ variable "user_data_template_path" { type = string default = "" } + +variable "cloudinit_pre_nodeadm" { + description = "Array of cloud-init document parts that are created before the nodeadm document part" + type = list(object({ + content = string + content_type = optional(string) + filename = optional(string) + merge_type = optional(string) + })) + default = [] +} + +variable "cloudinit_post_nodeadm" { + description = "Array of cloud-init document parts that are created after the nodeadm document part" + type = list(object({ + content = string + content_type = optional(string) + filename = optional(string) + merge_type = optional(string) + })) + default = [] +} diff --git a/modules/_user_data/versions.tf b/modules/_user_data/versions.tf index 2dbd12cdc0..9219addeda 100644 --- a/modules/_user_data/versions.tf +++ b/modules/_user_data/versions.tf @@ -1,10 +1,14 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" required_providers { cloudinit = { source = "hashicorp/cloudinit" version = ">= 2.0" } + null = { + source = "hashicorp/null" + version = ">= 3.0" + } } } diff --git a/modules/aws-auth/README.md b/modules/aws-auth/README.md new file mode 100644 index 0000000000..5ba490b7f1 --- /dev/null +++ b/modules/aws-auth/README.md @@ -0,0 +1,81 @@ +# `aws-auth` Module + +Configuration in this directory creates/updates the `aws-auth` ConfigMap. + +```hcl +module "eks" { + source = "terraform-aws-modules/eks/aws//modules/aws-auth" + version = "~> 20.0" + + manage_aws_auth_configmap = true + + aws_auth_roles = [ + { + rolearn = "arn:aws:iam::66666666666:role/role1" + username = "role1" + groups = ["system:masters"] + }, + ] + + aws_auth_users = [ + { + userarn = "arn:aws:iam::66666666666:user/user1" + username = "user1" + groups = ["system:masters"] + }, + { + userarn = "arn:aws:iam::66666666666:user/user2" + username = "user2" + groups = ["system:masters"] + }, + ] + + aws_auth_accounts = [ + "777777777777", + "888888888888", + ] +} +``` + +## Usage + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [kubernetes](#requirement\_kubernetes) | >= 2.20 | + +## Providers + +| Name | Version | +|------|---------| +| [kubernetes](#provider\_kubernetes) | >= 2.20 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource | +| [kubernetes_config_map_v1_data.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map_v1_data) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_auth\_accounts](#input\_aws\_auth\_accounts) | List of account maps to add to the aws-auth configmap | `list(any)` | `[]` | no | +| [aws\_auth\_roles](#input\_aws\_auth\_roles) | List of role maps to add to the aws-auth configmap | `list(any)` | `[]` | no | +| [aws\_auth\_users](#input\_aws\_auth\_users) | List of user maps to add to the aws-auth configmap | `list(any)` | `[]` | no | +| [create](#input\_create) | Controls if resources should be created (affects all resources) | `bool` | `true` | no | +| [create\_aws\_auth\_configmap](#input\_create\_aws\_auth\_configmap) | Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap` | `bool` | `false` | no | +| [manage\_aws\_auth\_configmap](#input\_manage\_aws\_auth\_configmap) | Determines whether to manage the aws-auth configmap | `bool` | `true` | no | + +## Outputs + +No outputs. + diff --git a/modules/aws-auth/main.tf b/modules/aws-auth/main.tf new file mode 100644 index 0000000000..2f7e9694a7 --- /dev/null +++ b/modules/aws-auth/main.tf @@ -0,0 +1,47 @@ + +################################################################################ +# aws-auth configmap +################################################################################ + +locals { + aws_auth_configmap_data = { + mapRoles = yamlencode(var.aws_auth_roles) + mapUsers = yamlencode(var.aws_auth_users) + mapAccounts = yamlencode(var.aws_auth_accounts) + } +} + +resource "kubernetes_config_map" "aws_auth" { + count = var.create && var.create_aws_auth_configmap ? 1 : 0 + + metadata { + name = "aws-auth" + namespace = "kube-system" + } + + data = local.aws_auth_configmap_data + + lifecycle { + # We are ignoring the data here since we will manage it with the resource below + # This is only intended to be used in scenarios where the configmap does not exist + ignore_changes = [data, metadata[0].labels, metadata[0].annotations] + } +} + +resource "kubernetes_config_map_v1_data" "aws_auth" { + count = var.create && var.manage_aws_auth_configmap ? 1 : 0 + + force = true + + metadata { + name = "aws-auth" + namespace = "kube-system" + } + + data = local.aws_auth_configmap_data + + depends_on = [ + # Required for instances where the configmap does not exist yet to avoid race condition + kubernetes_config_map.aws_auth, + ] +} diff --git a/modules/aws-auth/outputs.tf b/modules/aws-auth/outputs.tf new file mode 100644 index 0000000000..e69de29bb2 diff --git a/modules/aws-auth/variables.tf b/modules/aws-auth/variables.tf new file mode 100644 index 0000000000..3aaeb023e3 --- /dev/null +++ b/modules/aws-auth/variables.tf @@ -0,0 +1,39 @@ +variable "create" { + description = "Controls if resources should be created (affects all resources)" + type = bool + default = true +} + +################################################################################ +# aws-auth ConfigMap +################################################################################ + +variable "create_aws_auth_configmap" { + description = "Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap`" + type = bool + default = false +} + +variable "manage_aws_auth_configmap" { + description = "Determines whether to manage the aws-auth configmap" + type = bool + default = true +} + +variable "aws_auth_roles" { + description = "List of role maps to add to the aws-auth configmap" + type = list(any) + default = [] +} + +variable "aws_auth_users" { + description = "List of user maps to add to the aws-auth configmap" + type = list(any) + default = [] +} + +variable "aws_auth_accounts" { + description = "List of account maps to add to the aws-auth configmap" + type = list(any) + default = [] +} diff --git a/modules/aws-auth/versions.tf b/modules/aws-auth/versions.tf new file mode 100644 index 0000000000..f330045476 --- /dev/null +++ b/modules/aws-auth/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.3.2" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.20" + } + } +} diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md index bf3a35976a..9cc5680b84 100644 --- a/modules/eks-managed-node-group/README.md +++ b/modules/eks-managed-node-group/README.md @@ -63,14 +63,14 @@ module "eks_managed_node_group" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.47 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [aws](#requirement\_aws) | >= 5.40 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.47 | +| [aws](#provider\_aws) | >= 5.40 | ## Modules @@ -88,32 +88,41 @@ module "eks_managed_node_group" { | [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | +| [aws_placement_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/placement_group) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_ec2_instance_type.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ec2_instance_type) | data source | +| [aws_ec2_instance_type_offerings.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ec2_instance_type_offerings) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_ssm_parameter.ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_subnets.efa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [ami\_id](#input\_ami\_id) | The AMI from which to launch the instance. If not supplied, EKS will use its own default image | `string` | `""` | no | -| [ami\_release\_version](#input\_ami\_release\_version) | AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version | `string` | `null` | no | -| [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64` | `string` | `null` | no | +| [ami\_release\_version](#input\_ami\_release\_version) | The AMI version. Defaults to latest AMI release version for the given Kubernetes version and AMI type | `string` | `null` | no | +| [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values | `string` | `null` | no | | [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `any` | `{}` | no | -| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no | +| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no | | [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `{}` | no | | [capacity\_type](#input\_capacity\_type) | Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT` | `string` | `"ON_DEMAND"` | no | +| [cloudinit\_post\_nodeadm](#input\_cloudinit\_post\_nodeadm) | Array of cloud-init document parts that are created after the nodeadm document part |
list(object({
content = string
content_type = optional(string)
filename = optional(string)
merge_type = optional(string)
}))
| `[]` | no | +| [cloudinit\_pre\_nodeadm](#input\_cloudinit\_pre\_nodeadm) | Array of cloud-init document parts that are created before the nodeadm document part |
list(object({
content = string
content_type = optional(string)
filename = optional(string)
merge_type = optional(string)
}))
| `[]` | no | | [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no | | [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no | -| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `null` | no | +| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `"ipv4"` | no | | [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `null` | no | | [cluster\_primary\_security\_group\_id](#input\_cluster\_primary\_security\_group\_id) | The ID of the EKS cluster primary security group to associate with the instance(s). This is the security group that is automatically created by the EKS service | `string` | `null` | no | -| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | +| [cluster\_service\_cidr](#input\_cluster\_service\_cidr) | The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself | `string` | `""` | no | +| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | [Deprecated] The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes version. Defaults to EKS Cluster Kubernetes version | `string` | `null` | no | | [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `{}` | no | | [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no | | [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no | | [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create a launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no | +| [create\_placement\_group](#input\_create\_placement\_group) | Determines whether a placement group is created & used by the nodegroup | `bool` | `false` | no | | [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no | | [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `{}` | no | | [desired\_size](#input\_desired\_size) | Desired number of instances/nodes | `number` | `1` | no | @@ -123,6 +132,7 @@ module "eks_managed_node_group" { | [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `any` | `{}` | no | | [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `{}` | no | | [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template. Only valid when using a custom AMI via `ami_id` | `bool` | `false` | no | +| [enable\_efa\_support](#input\_enable\_efa\_support) | Determines whether to enable Elastic Fabric Adapter (EFA) support | `bool` | `false` | no | | [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `true` | no | | [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `{}` | no | | [force\_update\_version](#input\_force\_update\_version) | Force version update if existing pods are unable to be drained due to a pod disruption budget issue | `bool` | `null` | no | @@ -155,9 +165,10 @@ module "eks_managed_node_group" { | [name](#input\_name) | Name of the EKS managed node group | `string` | `""` | no | | [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no | | [placement](#input\_placement) | The placement of the instance | `map(string)` | `{}` | no | -| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket` or `linux` based; `windows` is not supported | `string` | `"linux"` | no | -| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | -| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | +| [placement\_group\_strategy](#input\_placement\_group\_strategy) | The placement group strategy | `string` | `"cluster"` | no | +| [platform](#input\_platform) | [DEPRECATED - use `ami_type` instead. Will be removed in `v21.0`] Identifies the OS platform as `bottlerocket`, `linux` (AL2), `al2023`, or `windows` | `string` | `"linux"` | no | +| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `""` | no | +| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `""` | no | | [private\_dns\_name\_options](#input\_private\_dns\_name\_options) | The options for the instance hostname. The default values are inherited from the subnet | `map(string)` | `{}` | no | | [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no | | [remote\_access](#input\_remote\_access) | Configuration block with remote access settings. Only valid when `use_custom_launch_template` = `false` | `any` | `{}` | no | @@ -170,6 +181,7 @@ module "eks_managed_node_group" { | [update\_config](#input\_update\_config) | Configuration block of settings for max unavailable resources during node group updates | `map(string)` |
{
"max_unavailable_percentage": 33
}
| no | | [update\_launch\_template\_default\_version](#input\_update\_launch\_template\_default\_version) | Whether to update the launch templates default version on each update. Conflicts with `launch_template_default_version` | `bool` | `true` | no | | [use\_custom\_launch\_template](#input\_use\_custom\_launch\_template) | Determines whether to use a custom launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no | +| [use\_latest\_ami\_release\_version](#input\_use\_latest\_ami\_release\_version) | Determines whether to use the latest AMI release version for the given `ami_type` (except for `CUSTOM`). Note: `ami_type` and `cluster_version` must be supplied in order to enable this feature | `bool` | `false` | no | | [use\_name\_prefix](#input\_use\_name\_prefix) | Determines whether to use `name` as is or create a unique name beginning with the `name` as the prefix | `bool` | `true` | no | | [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no | | [vpc\_security\_group\_ids](#input\_vpc\_security\_group\_ids) | A list of security group IDs to associate | `list(string)` | `[]` | no | @@ -193,4 +205,5 @@ module "eks_managed_node_group" { | [node\_group\_resources](#output\_node\_group\_resources) | List of objects containing information about underlying resources | | [node\_group\_status](#output\_node\_group\_status) | Status of the EKS Node Group | | [node\_group\_taints](#output\_node\_group\_taints) | List of objects containing information about taints applied to the node group | +| [platform](#output\_platform) | [DEPRECATED - Will be removed in `v21.0`] Identifies the OS platform as `bottlerocket`, `linux` (AL2), `al2023`, or `windows` | diff --git a/modules/eks-managed-node-group/main.tf b/modules/eks-managed-node-group/main.tf index 8ca23782dc..82fe075204 100644 --- a/modules/eks-managed-node-group/main.tf +++ b/modules/eks-managed-node-group/main.tf @@ -10,18 +10,49 @@ module "user_data" { create = var.create platform = var.platform + ami_type = var.ami_type - cluster_name = var.cluster_name - cluster_endpoint = var.cluster_endpoint - cluster_auth_base64 = var.cluster_auth_base64 - - cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr + cluster_name = var.cluster_name + cluster_endpoint = var.cluster_endpoint + cluster_auth_base64 = var.cluster_auth_base64 + cluster_ip_family = var.cluster_ip_family + cluster_service_cidr = try(coalesce(var.cluster_service_cidr, var.cluster_service_ipv4_cidr), "") enable_bootstrap_user_data = var.enable_bootstrap_user_data pre_bootstrap_user_data = var.pre_bootstrap_user_data post_bootstrap_user_data = var.post_bootstrap_user_data bootstrap_extra_args = var.bootstrap_extra_args user_data_template_path = var.user_data_template_path + + cloudinit_pre_nodeadm = var.cloudinit_pre_nodeadm + cloudinit_post_nodeadm = var.cloudinit_post_nodeadm +} + +################################################################################ +# EFA Support +################################################################################ + +data "aws_ec2_instance_type" "this" { + count = var.create && var.enable_efa_support ? 1 : 0 + + instance_type = local.efa_instance_type +} + +locals { + efa_instance_type = try(element(var.instance_types, 0), "") + num_network_cards = try(data.aws_ec2_instance_type.this[0].maximum_network_cards, 0) + + efa_network_interfaces = [ + for i in range(local.num_network_cards) : { + associate_public_ip_address = false + delete_on_termination = true + device_index = i == 0 ? 0 : 1 + network_card_index = i + interface_type = "efa" + } + ] + + network_interfaces = var.enable_efa_support ? local.efa_network_interfaces : var.network_interfaces } ################################################################################ @@ -31,6 +62,8 @@ module "user_data" { locals { launch_template_name = coalesce(var.launch_template_name, var.name) security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) + + placement = var.create && (var.enable_efa_support || var.create_placement_group) ? { group_name = aws_placement_group.this[0].name } : var.placement } resource "aws_launch_template" "this" { @@ -179,7 +212,7 @@ resource "aws_launch_template" "this" { for_each = length(var.license_specifications) > 0 ? var.license_specifications : {} content { - license_configuration_arn = license_specifications.value.license_configuration_arn + license_configuration_arn = license_specification.value.license_configuration_arn } } @@ -215,7 +248,8 @@ resource "aws_launch_template" "this" { name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name}-" : null dynamic "network_interfaces" { - for_each = var.network_interfaces + for_each = local.network_interfaces + content { associate_carrier_ip_address = try(network_interfaces.value.associate_carrier_ip_address, null) associate_public_ip_address = try(network_interfaces.value.associate_public_ip_address, null) @@ -243,14 +277,14 @@ resource "aws_launch_template" "this" { } dynamic "placement" { - for_each = length(var.placement) > 0 ? [var.placement] : [] + for_each = length(local.placement) > 0 ? [local.placement] : [] content { affinity = try(placement.value.affinity, null) - availability_zone = try(placement.value.availability_zone, null) - group_name = try(placement.value.group_name, null) - host_id = try(placement.value.host_id, null) - host_resource_group_arn = try(placement.value.host_resource_group_arn, null) + availability_zone = lookup(placement.value, "availability_zone", null) + group_name = lookup(placement.value, "group_name", null) + host_id = lookup(placement.value, "host_id", null) + host_resource_group_arn = lookup(placement.value, "host_resource_group_arn", null) partition_number = try(placement.value.partition_number, null) spread_domain = try(placement.value.spread_domain, null) tenancy = try(placement.value.tenancy, null) @@ -280,9 +314,12 @@ resource "aws_launch_template" "this" { update_default_version = var.update_launch_template_default_version user_data = module.user_data.user_data - vpc_security_group_ids = length(var.network_interfaces) > 0 ? [] : local.security_group_ids + vpc_security_group_ids = length(local.network_interfaces) > 0 ? [] : local.security_group_ids - tags = var.tags + tags = merge( + var.tags, + var.launch_template_tags, + ) # Prevent premature access of policies by pods that # require permissions on create/destroy that depend on nodes @@ -295,6 +332,45 @@ resource "aws_launch_template" "this" { } } +################################################################################ +# AMI SSM Parameter +################################################################################ + +locals { + # Just to ensure templating doesn't fail when values are not provided + ssm_cluster_version = var.cluster_version != null ? var.cluster_version : "" + ssm_ami_type = var.ami_type != null ? var.ami_type : "" + + # Map the AMI type to the respective SSM param path + ssm_ami_type_to_ssm_param = { + AL2_x86_64 = "/aws/service/eks/optimized-ami/${local.ssm_cluster_version}/amazon-linux-2/recommended/release_version" + AL2_x86_64_GPU = "/aws/service/eks/optimized-ami/${local.ssm_cluster_version}/amazon-linux-2-gpu/recommended/release_version" + AL2_ARM_64 = "/aws/service/eks/optimized-ami/${local.ssm_cluster_version}/amazon-linux-2-arm64/recommended/release_version" + CUSTOM = "NONE" + BOTTLEROCKET_ARM_64 = "/aws/service/bottlerocket/aws-k8s-${local.ssm_cluster_version}/arm64/latest/image_version" + BOTTLEROCKET_x86_64 = "/aws/service/bottlerocket/aws-k8s-${local.ssm_cluster_version}/x86_64/latest/image_version" + BOTTLEROCKET_ARM_64_NVIDIA = "/aws/service/bottlerocket/aws-k8s-${local.ssm_cluster_version}-nvidia/arm64/latest/image_version" + BOTTLEROCKET_x86_64_NVIDIA = "/aws/service/bottlerocket/aws-k8s-${local.ssm_cluster_version}-nvidia/x86_64/latest/image_version" + WINDOWS_CORE_2019_x86_64 = "/aws/service/ami-windows-latest/Windows_Server-2019-English-Full-EKS_Optimized-${local.ssm_cluster_version}" + WINDOWS_FULL_2019_x86_64 = "/aws/service/ami-windows-latest/Windows_Server-2019-English-Core-EKS_Optimized-${local.ssm_cluster_version}" + WINDOWS_CORE_2022_x86_64 = "/aws/service/ami-windows-latest/Windows_Server-2022-English-Full-EKS_Optimized-${local.ssm_cluster_version}" + WINDOWS_FULL_2022_x86_64 = "/aws/service/ami-windows-latest/Windows_Server-2022-English-Core-EKS_Optimized-${local.ssm_cluster_version}" + AL2023_x86_64_STANDARD = "/aws/service/eks/optimized-ami/${local.ssm_cluster_version}/amazon-linux-2023/x86_64/standard/recommended/release_version" + AL2023_ARM_64_STANDARD = "/aws/service/eks/optimized-ami/${local.ssm_cluster_version}/amazon-linux-2023/arm64/standard/recommended/release_version" + } + + # The Windows SSM params currently do not have a release version, so we have to get the full output JSON blob and parse out the release version + windows_latest_ami_release_version = var.create && var.use_latest_ami_release_version && startswith(local.ssm_ami_type, "WINDOWS") ? nonsensitive(jsondecode(data.aws_ssm_parameter.ami[0].value)["release_version"]) : null + # Based on the steps above, try to get an AMI release version - if not, `null` is returned + latest_ami_release_version = startswith(local.ssm_ami_type, "WINDOWS") ? local.windows_latest_ami_release_version : try(nonsensitive(data.aws_ssm_parameter.ami[0].value), null) +} + +data "aws_ssm_parameter" "ami" { + count = var.create && var.use_latest_ami_release_version ? 1 : 0 + + name = local.ssm_ami_type_to_ssm_param[var.ami_type] +} + ################################################################################ # Node Group ################################################################################ @@ -311,7 +387,7 @@ resource "aws_eks_node_group" "this" { # Required cluster_name = var.cluster_name node_role_arn = var.create_iam_role ? aws_iam_role.this[0].arn : var.iam_role_arn - subnet_ids = var.subnet_ids + subnet_ids = var.enable_efa_support ? data.aws_subnets.efa[0].ids : var.subnet_ids scaling_config { min_size = var.min_size @@ -325,7 +401,7 @@ resource "aws_eks_node_group" "this" { # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami ami_type = var.ami_id != "" ? null : var.ami_type - release_version = var.ami_id != "" ? null : var.ami_release_version + release_version = var.ami_id != "" ? null : var.use_latest_ami_release_version ? local.latest_ami_release_version : var.ami_release_version version = var.ami_id != "" ? null : var.cluster_version capacity_type = var.capacity_type @@ -395,13 +471,21 @@ resource "aws_eks_node_group" "this" { ################################################################################ locals { - iam_role_name = coalesce(var.iam_role_name, var.name) + create_iam_role = var.create && var.create_iam_role + + iam_role_name = coalesce(var.iam_role_name, "${var.name}-eks-node-group") iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" - cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + + ipv4_cni_policy = { for k, v in { + AmazonEKS_CNI_Policy = "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv4" } + ipv6_cni_policy = { for k, v in { + AmazonEKS_CNI_IPv6_Policy = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" + } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv6" } } data "aws_iam_policy_document" "assume_role_policy" { - count = var.create && var.create_iam_role ? 1 : 0 + count = local.create_iam_role ? 1 : 0 statement { sid = "EKSNodeAssumeRole" @@ -409,13 +493,13 @@ data "aws_iam_policy_document" "assume_role_policy" { principals { type = "Service" - identifiers = ["ec2.${data.aws_partition.current.dns_suffix}"] + identifiers = ["ec2.amazonaws.com"] } } } resource "aws_iam_role" "this" { - count = var.create && var.create_iam_role ? 1 : 0 + count = local.create_iam_role ? 1 : 0 name = var.iam_role_use_name_prefix ? null : local.iam_role_name name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null @@ -431,23 +515,76 @@ resource "aws_iam_role" "this" { # Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group resource "aws_iam_role_policy_attachment" "this" { - for_each = { for k, v in toset(compact([ - "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy", - "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly", - var.iam_role_attach_cni_policy ? local.cni_policy : "", - ])) : k => v if var.create && var.create_iam_role } + for_each = { for k, v in merge( + { + AmazonEKSWorkerNodePolicy = "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy" + AmazonEC2ContainerRegistryReadOnly = "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly" + }, + local.ipv4_cni_policy, + local.ipv6_cni_policy + ) : k => v if local.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name } resource "aws_iam_role_policy_attachment" "additional" { - for_each = { for k, v in var.iam_role_additional_policies : k => v if var.create && var.create_iam_role } + for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name } +################################################################################ +# Placement Group +################################################################################ + +resource "aws_placement_group" "this" { + count = var.create && (var.enable_efa_support || var.create_placement_group) ? 1 : 0 + + name = "${var.cluster_name}-${var.name}" + strategy = var.placement_group_strategy + + tags = var.tags +} + +################################################################################ +# Instance AZ Lookup + +# Instances usually used in placement groups w/ EFA are only available in +# select availability zones. These data sources will cross reference the availability +# zones supported by the instance type with the subnets provided to ensure only +# AZs/subnets that are supported are used. +################################################################################ + +# Find the availability zones supported by the instance type +data "aws_ec2_instance_type_offerings" "this" { + count = var.create && var.enable_efa_support ? 1 : 0 + + filter { + name = "instance-type" + values = [local.efa_instance_type] + } + + location_type = "availability-zone-id" +} + +# Reverse the lookup to find one of the subnets provided based on the availability +# availability zone ID of the queried instance type (supported) +data "aws_subnets" "efa" { + count = var.create && var.enable_efa_support ? 1 : 0 + + filter { + name = "subnet-id" + values = var.subnet_ids + } + + filter { + name = "availability-zone-id" + values = data.aws_ec2_instance_type_offerings.this[0].locations + } +} + ################################################################################ # Autoscaling Group Schedule ################################################################################ diff --git a/modules/eks-managed-node-group/migrations.tf b/modules/eks-managed-node-group/migrations.tf new file mode 100644 index 0000000000..5d51a7208a --- /dev/null +++ b/modules/eks-managed-node-group/migrations.tf @@ -0,0 +1,20 @@ +################################################################################ +# Migrations: v20.7 -> v20.8 +################################################################################ + +# Node IAM role policy attachment +# Commercial partition only - `moved` does now allow multiple moves to same target +moved { + from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"] + to = aws_iam_role_policy_attachment.this["AmazonEKSWorkerNodePolicy"] +} + +moved { + from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"] + to = aws_iam_role_policy_attachment.this["AmazonEC2ContainerRegistryReadOnly"] +} + +moved { + from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"] + to = aws_iam_role_policy_attachment.this["AmazonEKS_CNI_Policy"] +} diff --git a/modules/eks-managed-node-group/outputs.tf b/modules/eks-managed-node-group/outputs.tf index 012cd46f4e..8cab6e2c13 100644 --- a/modules/eks-managed-node-group/outputs.tf +++ b/modules/eks-managed-node-group/outputs.tf @@ -88,3 +88,12 @@ output "iam_role_unique_id" { description = "Stable and unique string identifying the IAM role" value = try(aws_iam_role.this[0].unique_id, null) } + +################################################################################ +# Additional +################################################################################ + +output "platform" { + description = "[DEPRECATED - Will be removed in `v21.0`] Identifies the OS platform as `bottlerocket`, `linux` (AL2), `al2023`, or `windows`" + value = module.user_data.platform +} diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf index 197cd28c95..344b346ad1 100644 --- a/modules/eks-managed-node-group/variables.tf +++ b/modules/eks-managed-node-group/variables.tf @@ -11,7 +11,7 @@ variable "tags" { } variable "platform" { - description = "Identifies if the OS platform is `bottlerocket` or `linux` based; `windows` is not supported" + description = "[DEPRECATED - use `ami_type` instead. Will be removed in `v21.0`] Identifies the OS platform as `bottlerocket`, `linux` (AL2), `al2023`, or `windows`" type = string default = "linux" } @@ -44,26 +44,33 @@ variable "cluster_auth_base64" { default = "" } +variable "cluster_service_cidr" { + description = "The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself" + type = string + default = "" +} + +# TODO - remove at next breaking change variable "cluster_service_ipv4_cidr" { - description = "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks" + description = "[Deprecated] The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks" type = string default = null } variable "pre_bootstrap_user_data" { - description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`" + description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`" type = string default = "" } variable "post_bootstrap_user_data" { - description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket`" + description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`" type = string default = "" } variable "bootstrap_extra_args" { - description = "Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data" + description = "Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data" type = string default = "" } @@ -74,6 +81,28 @@ variable "user_data_template_path" { default = "" } +variable "cloudinit_pre_nodeadm" { + description = "Array of cloud-init document parts that are created before the nodeadm document part" + type = list(object({ + content = string + content_type = optional(string) + filename = optional(string) + merge_type = optional(string) + })) + default = [] +} + +variable "cloudinit_post_nodeadm" { + description = "Array of cloud-init document parts that are created after the nodeadm document part" + type = list(object({ + content = string + content_type = optional(string) + filename = optional(string) + merge_type = optional(string) + })) + default = [] +} + ################################################################################ # Launch template ################################################################################ @@ -250,6 +279,12 @@ variable "enable_monitoring" { default = true } +variable "enable_efa_support" { + description = "Determines whether to enable Elastic Fabric Adapter (EFA) support" + type = bool + default = false +} + variable "network_interfaces" { description = "Customize network interfaces to be attached at instance boot time" type = list(any) @@ -262,6 +297,18 @@ variable "placement" { default = {} } +variable "create_placement_group" { + description = "Determines whether a placement group is created & used by the nodegroup" + type = bool + default = false +} + +variable "placement_group_strategy" { + description = "The placement group strategy" + type = string + default = "cluster" +} + variable "private_dns_name_options" { description = "The options for the instance hostname. The default values are inherited from the subnet" type = map(string) @@ -321,17 +368,23 @@ variable "use_name_prefix" { } variable "ami_type" { - description = "Type of Amazon Machine Image (AMI) associated with the EKS Node Group. Valid values are `AL2_x86_64`, `AL2_x86_64_GPU`, `AL2_ARM_64`, `CUSTOM`, `BOTTLEROCKET_ARM_64`, `BOTTLEROCKET_x86_64`" + description = "Type of Amazon Machine Image (AMI) associated with the EKS Node Group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values" type = string default = null } variable "ami_release_version" { - description = "AMI version of the EKS Node Group. Defaults to latest version for Kubernetes version" + description = "The AMI version. Defaults to latest AMI release version for the given Kubernetes version and AMI type" type = string default = null } +variable "use_latest_ami_release_version" { + description = "Determines whether to use the latest AMI release version for the given `ami_type` (except for `CUSTOM`). Note: `ami_type` and `cluster_version` must be supplied in order to enable this feature" + type = bool + default = false +} + variable "capacity_type" { description = "Type of capacity associated with the EKS Node Group. Valid values: `ON_DEMAND`, `SPOT`" type = string @@ -413,7 +466,7 @@ variable "create_iam_role" { variable "cluster_ip_family" { description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`" type = string - default = null + default = "ipv4" } variable "iam_role_arn" { diff --git a/modules/eks-managed-node-group/versions.tf b/modules/eks-managed-node-group/versions.tf index 55eff62b09..6f83215f50 100644 --- a/modules/eks-managed-node-group/versions.tf +++ b/modules/eks-managed-node-group/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.47" + version = ">= 5.40" } } } diff --git a/modules/fargate-profile/README.md b/modules/fargate-profile/README.md index cc0bab2a5b..072c2f2e33 100644 --- a/modules/fargate-profile/README.md +++ b/modules/fargate-profile/README.md @@ -28,14 +28,14 @@ module "fargate_profile" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.47 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [aws](#requirement\_aws) | >= 5.40 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.47 | +| [aws](#provider\_aws) | >= 5.40 | ## Modules @@ -52,12 +52,13 @@ No modules. | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `null` | no | +| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `"ipv4"` | no | | [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `null` | no | | [create](#input\_create) | Determines whether to create Fargate profile or not | `bool` | `true` | no | | [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no | diff --git a/modules/fargate-profile/main.tf b/modules/fargate-profile/main.tf index de9dd2d754..1e2cf60024 100644 --- a/modules/fargate-profile/main.tf +++ b/modules/fargate-profile/main.tf @@ -1,10 +1,19 @@ data "aws_partition" "current" {} data "aws_caller_identity" "current" {} +data "aws_region" "current" {} locals { + create_iam_role = var.create && var.create_iam_role + iam_role_name = coalesce(var.iam_role_name, var.name, "fargate-profile") iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" - cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + + ipv4_cni_policy = { for k, v in { + AmazonEKS_CNI_Policy = "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv4" } + ipv6_cni_policy = { for k, v in { + AmazonEKS_CNI_IPv6_Policy = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" + } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv6" } } ################################################################################ @@ -12,7 +21,7 @@ locals { ################################################################################ data "aws_iam_policy_document" "assume_role_policy" { - count = var.create && var.create_iam_role ? 1 : 0 + count = local.create_iam_role ? 1 : 0 statement { effect = "Allow" @@ -22,11 +31,20 @@ data "aws_iam_policy_document" "assume_role_policy" { type = "Service" identifiers = ["eks-fargate-pods.amazonaws.com"] } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + + values = [ + "arn:${data.aws_partition.current.partition}:eks:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:fargateprofile/${var.cluster_name}/*", + ] + } } } resource "aws_iam_role" "this" { - count = var.create && var.create_iam_role ? 1 : 0 + count = local.create_iam_role ? 1 : 0 name = var.iam_role_use_name_prefix ? null : local.iam_role_name name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null @@ -41,17 +59,20 @@ resource "aws_iam_role" "this" { } resource "aws_iam_role_policy_attachment" "this" { - for_each = { for k, v in toset(compact([ - "${local.iam_role_policy_prefix}/AmazonEKSFargatePodExecutionRolePolicy", - var.iam_role_attach_cni_policy ? local.cni_policy : "", - ])) : k => v if var.create && var.create_iam_role } + for_each = { for k, v in merge( + { + AmazonEKSFargatePodExecutionRolePolicy = "${local.iam_role_policy_prefix}/AmazonEKSFargatePodExecutionRolePolicy" + }, + local.ipv4_cni_policy, + local.ipv6_cni_policy + ) : k => v if local.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name } resource "aws_iam_role_policy_attachment" "additional" { - for_each = { for k, v in var.iam_role_additional_policies : k => v if var.create && var.create_iam_role } + for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_role } policy_arn = each.value role = aws_iam_role.this[0].name diff --git a/modules/fargate-profile/migrations.tf b/modules/fargate-profile/migrations.tf new file mode 100644 index 0000000000..02494f6893 --- /dev/null +++ b/modules/fargate-profile/migrations.tf @@ -0,0 +1,15 @@ +################################################################################ +# Migrations: v20.8 -> v20.9 +################################################################################ + +# Node IAM role policy attachment +# Commercial partition only - `moved` does now allow multiple moves to same target +moved { + from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy"] + to = aws_iam_role_policy_attachment.this["AmazonEKSFargatePodExecutionRolePolicy"] +} + +moved { + from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"] + to = aws_iam_role_policy_attachment.this["AmazonEKS_CNI_Policy"] +} diff --git a/modules/fargate-profile/variables.tf b/modules/fargate-profile/variables.tf index e22279dc6b..75816b0af8 100644 --- a/modules/fargate-profile/variables.tf +++ b/modules/fargate-profile/variables.tf @@ -23,7 +23,7 @@ variable "create_iam_role" { variable "cluster_ip_family" { description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`" type = string - default = null + default = "ipv4" } variable "iam_role_arn" { diff --git a/modules/fargate-profile/versions.tf b/modules/fargate-profile/versions.tf index 55eff62b09..6f83215f50 100644 --- a/modules/fargate-profile/versions.tf +++ b/modules/fargate-profile/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.47" + version = ">= 5.40" } } } diff --git a/modules/karpenter/README.md b/modules/karpenter/README.md index 8e9b6dce99..ec819ed256 100644 --- a/modules/karpenter/README.md +++ b/modules/karpenter/README.md @@ -7,30 +7,16 @@ Configuration in this directory creates the AWS resources required by Karpenter ### All Resources (Default) In the following example, the Karpenter module will create: -- An IAM role for service accounts (IRSA) with a narrowly scoped IAM policy for the Karpenter controller to utilize -- An IAM role and instance profile for the nodes created by Karpenter to utilize - - Note: This IAM role ARN will need to be added to the `aws-auth` configmap for nodes to join the cluster successfully -- An SQS queue and Eventbridge event rules for Karpenter to utilize for spot termination handling, capacity rebalancing, etc. - -This setup is great for running Karpenter on EKS Fargate: +- An IAM role for use with Pod Identity and a scoped IAM policy for the Karpenter controller +- A Pod Identity association to grant Karpenter controller access provided by the IAM Role +- A Node IAM role that Karpenter will use to create an Instance Profile for the nodes to receive IAM permissions +- An access entry for the Node IAM role to allow nodes to join the cluster +- SQS queue and EventBridge event rules for Karpenter to utilize for spot termination handling, capacity re-balancing, etc. ```hcl module "eks" { - source = "terraform-aws-modules/eks" + source = "terraform-aws-modules/eks/aws" - # Shown just for connection between cluster and Karpenter sub-module below - manage_aws_auth_configmap = true - aws_auth_roles = [ - # We need to add in the Karpenter node IAM role for nodes launched by Karpenter - { - rolearn = module.karpenter.role_arn - username = "system:node:{{EC2PrivateDNSName}}" - groups = [ - "system:bootstrappers", - "system:nodes", - ] - }, - ] ... } @@ -39,8 +25,10 @@ module "karpenter" { cluster_name = module.eks.cluster_name - irsa_oidc_provider_arn = module.eks.oidc_provider_arn - irsa_namespace_service_accounts = ["karpenter:karpenter"] + # Attach additional IAM policies to the Karpenter node IAM role + node_iam_role_additional_policies = { + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } tags = { Environment = "dev" @@ -49,15 +37,13 @@ module "karpenter" { } ``` -### External Node IAM Role (Default) +### Re-Use Existing Node IAM Role In the following example, the Karpenter module will create: -- An IAM role for service accounts (IRSA) with a narrowly scoped IAM policy for the Karpenter controller to utilize -- An IAM instance profile for the nodes created by Karpenter to utilize - - Note: This setup will utilize the existing IAM role created by the EKS Managed Node group which means the role is already populated in the `aws-auth` configmap and no further updates are required. -- An SQS queue and Eventbridge event rules for Karpenter to utilize for spot termination handling, capacity rebalancing, etc. +- An IAM role for use with Pod Identity and a scoped IAM policy for the Karpenter controller +- SQS queue and EventBridge event rules for Karpenter to utilize for spot termination handling, capacity re-balancing, etc. -In this scenario, Karpenter would run atop the EKS Managed Node group and scale out nodes as needed from there: +In this scenario, Karpenter will re-use an existing Node IAM role from the EKS managed nodegroup which already has the necessary access entry permissions: ```hcl module "eks" { @@ -81,11 +67,11 @@ module "karpenter" { cluster_name = module.eks.cluster_name - irsa_oidc_provider_arn = module.eks.oidc_provider_arn - irsa_namespace_service_accounts = ["karpenter:karpenter"] + create_node_iam_role = false + node_iam_role_arn = module.eks.eks_managed_node_groups["initial"].iam_role_arn - create_iam_role = false - iam_role_arn = module.eks.eks_managed_node_groups["initial"].iam_role_arn + # Since the nodegroup role will already have an access entry + create_access_entry = false tags = { Environment = "dev" @@ -99,14 +85,14 @@ module "karpenter" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.47 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [aws](#requirement\_aws) | >= 5.40 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.47 | +| [aws](#provider\_aws) | >= 5.40 | ## Modules @@ -118,65 +104,75 @@ No modules. |------|------| | [aws_cloudwatch_event_rule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | | [aws_cloudwatch_event_target.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | +| [aws_eks_access_entry.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_entry) | resource | +| [aws_eks_pod_identity_association.karpenter](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_pod_identity_association) | resource | | [aws_iam_instance_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | -| [aws_iam_policy.irsa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | -| [aws_iam_role.irsa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | -| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | -| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | -| [aws_iam_role_policy_attachment.irsa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | -| [aws_iam_role_policy_attachment.irsa_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | -| [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_policy.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.controller_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_iam_role_policy_attachment.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_sqs_queue.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue) | resource | | [aws_sqs_queue_policy.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_policy) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_iam_policy_document.assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.irsa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | -| [aws_iam_policy_document.irsa_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.controller_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | +| [aws_iam_policy_document.node_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_policy_document.queue](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `null` | no | +| [access\_entry\_type](#input\_access\_entry\_type) | Type of the access entry. `EC2_LINUX`, `FARGATE_LINUX`, or `EC2_WINDOWS`; defaults to `EC2_LINUX` | `string` | `"EC2_LINUX"` | no | +| [ami\_id\_ssm\_parameter\_arns](#input\_ami\_id\_ssm\_parameter\_arns) | List of SSM Parameter ARNs that Karpenter controller is allowed read access (for retrieving AMI IDs) | `list(string)` | `[]` | no | +| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. Note: If `ipv6` is specified, the `AmazonEKS_CNI_IPv6_Policy` must exist in the account. This policy is created by the EKS module with `create_cni_ipv6_iam_policy = true` | `string` | `"ipv4"` | no | | [cluster\_name](#input\_cluster\_name) | The name of the EKS cluster | `string` | `""` | no | -| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no | -| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no | -| [create\_instance\_profile](#input\_create\_instance\_profile) | Whether to create an IAM instance profile | `bool` | `true` | no | -| [create\_irsa](#input\_create\_irsa) | Determines whether an IAM role for service accounts is created | `bool` | `true` | no | +| [create](#input\_create) | Controls if resources should be created (affects nearly all resources) | `bool` | `true` | no | +| [create\_access\_entry](#input\_create\_access\_entry) | Determines whether an access entry is created for the IAM role used by the node IAM role | `bool` | `true` | no | +| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created | `bool` | `true` | no | +| [create\_instance\_profile](#input\_create\_instance\_profile) | Whether to create an IAM instance profile | `bool` | `false` | no | +| [create\_node\_iam\_role](#input\_create\_node\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no | +| [create\_pod\_identity\_association](#input\_create\_pod\_identity\_association) | Determines whether to create pod identity association | `bool` | `false` | no | +| [enable\_irsa](#input\_enable\_irsa) | Determines whether to enable support for IAM role for service accounts | `bool` | `false` | no | +| [enable\_pod\_identity](#input\_enable\_pod\_identity) | Determines whether to enable support for EKS pod identity | `bool` | `true` | no | | [enable\_spot\_termination](#input\_enable\_spot\_termination) | Determines whether to enable native spot termination handling | `bool` | `true` | no | -| [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `list(string)` | `[]` | no | -| [iam\_role\_arn](#input\_iam\_role\_arn) | Existing IAM role ARN for the IAM instance profile. Required if `create_iam_role` is set to `false` | `string` | `null` | no | -| [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | -| [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | +| [iam\_policy\_description](#input\_iam\_policy\_description) | IAM policy description | `string` | `"Karpenter controller IAM policy"` | no | +| [iam\_policy\_name](#input\_iam\_policy\_name) | Name of the IAM policy | `string` | `"KarpenterController"` | no | +| [iam\_policy\_path](#input\_iam\_policy\_path) | Path of the IAM policy | `string` | `"/"` | no | +| [iam\_policy\_use\_name\_prefix](#input\_iam\_policy\_use\_name\_prefix) | Determines whether the name of the IAM policy (`iam_policy_name`) is used as a prefix | `bool` | `true` | no | +| [iam\_role\_description](#input\_iam\_role\_description) | IAM role description | `string` | `"Karpenter controller IAM role"` | no | | [iam\_role\_max\_session\_duration](#input\_iam\_role\_max\_session\_duration) | Maximum API session duration in seconds between 3600 and 43200 | `number` | `null` | no | -| [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no | -| [iam\_role\_path](#input\_iam\_role\_path) | IAM role path | `string` | `"/"` | no | -| [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no | -| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no | -| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `bool` | `true` | no | +| [iam\_role\_name](#input\_iam\_role\_name) | Name of the IAM role | `string` | `"KarpenterController"` | no | +| [iam\_role\_path](#input\_iam\_role\_path) | Path of the IAM role | `string` | `"/"` | no | +| [iam\_role\_permissions\_boundary\_arn](#input\_iam\_role\_permissions\_boundary\_arn) | Permissions boundary ARN to use for the IAM role | `string` | `null` | no | +| [iam\_role\_policies](#input\_iam\_role\_policies) | Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format | `map(string)` | `{}` | no | +| [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add the the IAM role | `map(any)` | `{}` | no | +| [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the name of the IAM role (`iam_role_name`) is used as a prefix | `bool` | `true` | no | | [irsa\_assume\_role\_condition\_test](#input\_irsa\_assume\_role\_condition\_test) | Name of the [IAM condition operator](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html) to evaluate when assuming the role | `string` | `"StringEquals"` | no | -| [irsa\_description](#input\_irsa\_description) | IAM role for service accounts description | `string` | `"Karpenter IAM role for service account"` | no | -| [irsa\_max\_session\_duration](#input\_irsa\_max\_session\_duration) | Maximum API session duration in seconds between 3600 and 43200 | `number` | `null` | no | -| [irsa\_name](#input\_irsa\_name) | Name of IAM role for service accounts | `string` | `null` | no | | [irsa\_namespace\_service\_accounts](#input\_irsa\_namespace\_service\_accounts) | List of `namespace:serviceaccount`pairs to use in trust policy for IAM role for service accounts | `list(string)` |
[
"karpenter:karpenter"
]
| no | | [irsa\_oidc\_provider\_arn](#input\_irsa\_oidc\_provider\_arn) | OIDC provider arn used in trust policy for IAM role for service accounts | `string` | `""` | no | -| [irsa\_path](#input\_irsa\_path) | Path of IAM role for service accounts | `string` | `"/"` | no | -| [irsa\_permissions\_boundary\_arn](#input\_irsa\_permissions\_boundary\_arn) | Permissions boundary ARN to use for IAM role for service accounts | `string` | `null` | no | -| [irsa\_policy\_name](#input\_irsa\_policy\_name) | Name of IAM policy for service accounts | `string` | `null` | no | -| [irsa\_ssm\_parameter\_arns](#input\_irsa\_ssm\_parameter\_arns) | List of SSM Parameter ARNs that contain AMI IDs launched by Karpenter | `list(string)` |
[
"arn:aws:ssm:*:*:parameter/aws/service/*"
]
| no | -| [irsa\_subnet\_account\_id](#input\_irsa\_subnet\_account\_id) | Account ID of where the subnets Karpenter will utilize resides. Used when subnets are shared from another account | `string` | `""` | no | -| [irsa\_tag\_key](#input\_irsa\_tag\_key) | Tag key (`{key = value}`) applied to resources launched by Karpenter through the Karpenter provisioner | `string` | `"karpenter.sh/discovery"` | no | -| [irsa\_tag\_values](#input\_irsa\_tag\_values) | Tag values (`{key = value}`) applied to resources launched by Karpenter through the Karpenter provisioner. Defaults to cluster name when not set. | `list(string)` | `[]` | no | -| [irsa\_tags](#input\_irsa\_tags) | A map of additional tags to add the the IAM role for service accounts | `map(any)` | `{}` | no | -| [irsa\_use\_name\_prefix](#input\_irsa\_use\_name\_prefix) | Determines whether the IAM role for service accounts name (`irsa_name`) is used as a prefix | `bool` | `true` | no | -| [policies](#input\_policies) | Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format | `map(string)` | `{}` | no | +| [namespace](#input\_namespace) | Namespace to associate with the Karpenter Pod Identity | `string` | `"kube-system"` | no | +| [node\_iam\_role\_additional\_policies](#input\_node\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | +| [node\_iam\_role\_arn](#input\_node\_iam\_role\_arn) | Existing IAM role ARN for the IAM instance profile. Required if `create_iam_role` is set to `false` | `string` | `null` | no | +| [node\_iam\_role\_attach\_cni\_policy](#input\_node\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | +| [node\_iam\_role\_description](#input\_node\_iam\_role\_description) | Description of the role | `string` | `null` | no | +| [node\_iam\_role\_max\_session\_duration](#input\_node\_iam\_role\_max\_session\_duration) | Maximum API session duration in seconds between 3600 and 43200 | `number` | `null` | no | +| [node\_iam\_role\_name](#input\_node\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no | +| [node\_iam\_role\_path](#input\_node\_iam\_role\_path) | IAM role path | `string` | `"/"` | no | +| [node\_iam\_role\_permissions\_boundary](#input\_node\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no | +| [node\_iam\_role\_tags](#input\_node\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no | +| [node\_iam\_role\_use\_name\_prefix](#input\_node\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `bool` | `true` | no | | [queue\_kms\_data\_key\_reuse\_period\_seconds](#input\_queue\_kms\_data\_key\_reuse\_period\_seconds) | The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again | `number` | `null` | no | | [queue\_kms\_master\_key\_id](#input\_queue\_kms\_master\_key\_id) | The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK | `string` | `null` | no | | [queue\_managed\_sse\_enabled](#input\_queue\_managed\_sse\_enabled) | Boolean to enable server-side encryption (SSE) of message content with SQS-owned encryption keys | `bool` | `true` | no | | [queue\_name](#input\_queue\_name) | Name of the SQS queue | `string` | `null` | no | | [rule\_name\_prefix](#input\_rule\_name\_prefix) | Prefix used for all event bridge rules | `string` | `"Karpenter"` | no | +| [service\_account](#input\_service\_account) | Service account to associate with the Karpenter Pod Identity | `string` | `"karpenter"` | no | | [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | ## Outputs @@ -184,17 +180,20 @@ No modules. | Name | Description | |------|-------------| | [event\_rules](#output\_event\_rules) | Map of the event rules created and their attributes | +| [iam\_role\_arn](#output\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the controller IAM role | +| [iam\_role\_name](#output\_iam\_role\_name) | The name of the controller IAM role | +| [iam\_role\_unique\_id](#output\_iam\_role\_unique\_id) | Stable and unique string identifying the controller IAM role | | [instance\_profile\_arn](#output\_instance\_profile\_arn) | ARN assigned by AWS to the instance profile | | [instance\_profile\_id](#output\_instance\_profile\_id) | Instance profile's ID | | [instance\_profile\_name](#output\_instance\_profile\_name) | Name of the instance profile | | [instance\_profile\_unique](#output\_instance\_profile\_unique) | Stable and unique string identifying the IAM instance profile | -| [irsa\_arn](#output\_irsa\_arn) | The Amazon Resource Name (ARN) specifying the IAM role for service accounts | -| [irsa\_name](#output\_irsa\_name) | The name of the IAM role for service accounts | -| [irsa\_unique\_id](#output\_irsa\_unique\_id) | Stable and unique string identifying the IAM role for service accounts | +| [namespace](#output\_namespace) | Namespace associated with the Karpenter Pod Identity | +| [node\_access\_entry\_arn](#output\_node\_access\_entry\_arn) | Amazon Resource Name (ARN) of the node Access Entry | +| [node\_iam\_role\_arn](#output\_node\_iam\_role\_arn) | The Amazon Resource Name (ARN) specifying the node IAM role | +| [node\_iam\_role\_name](#output\_node\_iam\_role\_name) | The name of the node IAM role | +| [node\_iam\_role\_unique\_id](#output\_node\_iam\_role\_unique\_id) | Stable and unique string identifying the node IAM role | | [queue\_arn](#output\_queue\_arn) | The ARN of the SQS queue | | [queue\_name](#output\_queue\_name) | The name of the created Amazon SQS queue | | [queue\_url](#output\_queue\_url) | The URL for the created Amazon SQS queue | -| [role\_arn](#output\_role\_arn) | The Amazon Resource Name (ARN) specifying the IAM role | -| [role\_name](#output\_role\_name) | The name of the IAM role | -| [role\_unique\_id](#output\_role\_unique\_id) | Stable and unique string identifying the IAM role | +| [service\_account](#output\_service\_account) | Service Account associated with the Karpenter Pod Identity | diff --git a/modules/karpenter/main.tf b/modules/karpenter/main.tf index 19399ce99d..4b31eb5da7 100644 --- a/modules/karpenter/main.tf +++ b/modules/karpenter/main.tf @@ -1,192 +1,431 @@ +data "aws_region" "current" {} data "aws_partition" "current" {} data "aws_caller_identity" "current" {} locals { account_id = data.aws_caller_identity.current.account_id partition = data.aws_partition.current.partition - dns_suffix = data.aws_partition.current.dns_suffix + region = data.aws_region.current.name } ################################################################################ -# IAM Role for Service Account (IRSA) -# This is used by the Karpenter controller +# Karpenter controller IAM Role ################################################################################ locals { - create_irsa = var.create && var.create_irsa - irsa_name = coalesce(var.irsa_name, "KarpenterIRSA-${var.cluster_name}") - irsa_policy_name = coalesce(var.irsa_policy_name, local.irsa_name) - + create_iam_role = var.create && var.create_iam_role irsa_oidc_provider_url = replace(var.irsa_oidc_provider_arn, "/^(.*provider/)/", "") } -data "aws_iam_policy_document" "irsa_assume_role" { - count = local.create_irsa ? 1 : 0 +data "aws_iam_policy_document" "controller_assume_role" { + count = local.create_iam_role ? 1 : 0 - statement { - effect = "Allow" - actions = ["sts:AssumeRoleWithWebIdentity"] + # Pod Identity + dynamic "statement" { + for_each = var.enable_pod_identity ? [1] : [] - principals { - type = "Federated" - identifiers = [var.irsa_oidc_provider_arn] - } + content { + actions = [ + "sts:AssumeRole", + "sts:TagSession", + ] - condition { - test = var.irsa_assume_role_condition_test - variable = "${local.irsa_oidc_provider_url}:sub" - values = [for sa in var.irsa_namespace_service_accounts : "system:serviceaccount:${sa}"] + principals { + type = "Service" + identifiers = ["pods.eks.amazonaws.com"] + } } + } - # https://aws.amazon.com/premiumsupport/knowledge-center/eks-troubleshoot-oidc-and-irsa/?nc1=h_ls - condition { - test = var.irsa_assume_role_condition_test - variable = "${local.irsa_oidc_provider_url}:aud" - values = ["sts.amazonaws.com"] + # IAM Roles for Service Accounts (IRSA) + dynamic "statement" { + for_each = var.enable_irsa ? [1] : [] + + content { + actions = ["sts:AssumeRoleWithWebIdentity"] + + principals { + type = "Federated" + identifiers = [var.irsa_oidc_provider_arn] + } + + condition { + test = var.irsa_assume_role_condition_test + variable = "${local.irsa_oidc_provider_url}:sub" + values = [for sa in var.irsa_namespace_service_accounts : "system:serviceaccount:${sa}"] + } + + # https://aws.amazon.com/premiumsupport/knowledge-center/eks-troubleshoot-oidc-and-irsa/?nc1=h_ls + condition { + test = var.irsa_assume_role_condition_test + variable = "${local.irsa_oidc_provider_url}:aud" + values = ["sts.amazonaws.com"] + } } } } -resource "aws_iam_role" "irsa" { - count = local.create_irsa ? 1 : 0 +resource "aws_iam_role" "controller" { + count = local.create_iam_role ? 1 : 0 - name = var.irsa_use_name_prefix ? null : local.irsa_name - name_prefix = var.irsa_use_name_prefix ? "${local.irsa_name}-" : null - path = var.irsa_path - description = var.irsa_description + name = var.iam_role_use_name_prefix ? null : var.iam_role_name + name_prefix = var.iam_role_use_name_prefix ? "${var.iam_role_name}-" : null + path = var.iam_role_path + description = var.iam_role_description - assume_role_policy = data.aws_iam_policy_document.irsa_assume_role[0].json - max_session_duration = var.irsa_max_session_duration - permissions_boundary = var.irsa_permissions_boundary_arn + assume_role_policy = data.aws_iam_policy_document.controller_assume_role[0].json + max_session_duration = var.iam_role_max_session_duration + permissions_boundary = var.iam_role_permissions_boundary_arn force_detach_policies = true - tags = merge(var.tags, var.irsa_tags) + tags = merge(var.tags, var.iam_role_tags) } -locals { - irsa_tag_values = coalescelist(var.irsa_tag_values, [var.cluster_name]) -} +data "aws_iam_policy_document" "controller" { + count = local.create_iam_role ? 1 : 0 -data "aws_iam_policy_document" "irsa" { - count = local.create_irsa ? 1 : 0 + statement { + sid = "AllowScopedEC2InstanceActions" + resources = [ + "arn:${local.partition}:ec2:*::image/*", + "arn:${local.partition}:ec2:*::snapshot/*", + "arn:${local.partition}:ec2:*:*:spot-instances-request/*", + "arn:${local.partition}:ec2:*:*:security-group/*", + "arn:${local.partition}:ec2:*:*:subnet/*", + "arn:${local.partition}:ec2:*:*:launch-template/*", + ] + + actions = [ + "ec2:RunInstances", + "ec2:CreateFleet" + ] + } statement { + sid = "AllowScopedEC2InstanceActionsWithTags" + resources = [ + "arn:${local.partition}:ec2:*:*:fleet/*", + "arn:${local.partition}:ec2:*:*:instance/*", + "arn:${local.partition}:ec2:*:*:volume/*", + "arn:${local.partition}:ec2:*:*:network-interface/*", + "arn:${local.partition}:ec2:*:*:launch-template/*", + "arn:${local.partition}:ec2:*:*:spot-instances-request/*", + ] actions = [ - "ec2:CreateLaunchTemplate", + "ec2:RunInstances", "ec2:CreateFleet", - "ec2:CreateTags", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeImages", - "ec2:DescribeInstances", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstanceTypeOfferings", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeSpotPriceHistory", - "pricing:GetProducts", + "ec2:CreateLaunchTemplate" ] - resources = ["*"] + condition { + test = "StringEquals" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.sh/nodepool" + values = ["*"] + } } statement { - actions = [ - "ec2:TerminateInstances", - "ec2:DeleteLaunchTemplate", + sid = "AllowScopedResourceCreationTagging" + resources = [ + "arn:${local.partition}:ec2:*:*:fleet/*", + "arn:${local.partition}:ec2:*:*:instance/*", + "arn:${local.partition}:ec2:*:*:volume/*", + "arn:${local.partition}:ec2:*:*:network-interface/*", + "arn:${local.partition}:ec2:*:*:launch-template/*", + "arn:${local.partition}:ec2:*:*:spot-instances-request/*", ] + actions = ["ec2:CreateTags"] - resources = ["*"] + condition { + test = "StringEquals" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } condition { test = "StringEquals" - variable = "ec2:ResourceTag/${var.irsa_tag_key}" - values = local.irsa_tag_values + variable = "ec2:CreateAction" + values = [ + "RunInstances", + "CreateFleet", + "CreateLaunchTemplate", + ] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.sh/nodepool" + values = ["*"] } } statement { - actions = ["ec2:RunInstances"] - resources = [ - "arn:${local.partition}:ec2:*:${local.account_id}:launch-template/*", - ] + sid = "AllowScopedResourceTagging" + resources = ["arn:${local.partition}:ec2:*:*:instance/*"] + actions = ["ec2:CreateTags"] condition { test = "StringEquals" - variable = "ec2:ResourceTag/${var.irsa_tag_key}" - values = local.irsa_tag_values + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.sh/nodepool" + values = ["*"] + } + + condition { + test = "ForAllValues:StringEquals" + variable = "aws:TagKeys" + values = [ + "karpenter.sh/nodeclaim", + "Name", + ] } } statement { - actions = ["ec2:RunInstances"] + sid = "AllowScopedDeletion" resources = [ - "arn:${local.partition}:ec2:*::image/*", - "arn:${local.partition}:ec2:*::snapshot/*", - "arn:${local.partition}:ec2:*:${local.account_id}:instance/*", - "arn:${local.partition}:ec2:*:${local.account_id}:spot-instances-request/*", - "arn:${local.partition}:ec2:*:${local.account_id}:security-group/*", - "arn:${local.partition}:ec2:*:${local.account_id}:volume/*", - "arn:${local.partition}:ec2:*:${local.account_id}:network-interface/*", - "arn:${local.partition}:ec2:*:${coalesce(var.irsa_subnet_account_id, local.account_id)}:subnet/*", + "arn:${local.partition}:ec2:*:*:instance/*", + "arn:${local.partition}:ec2:*:*:launch-template/*" ] + + actions = [ + "ec2:TerminateInstances", + "ec2:DeleteLaunchTemplate" + ] + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.sh/nodepool" + values = ["*"] + } } statement { - actions = ["ssm:GetParameter"] - resources = var.irsa_ssm_parameter_arns + sid = "AllowRegionalReadActions" + resources = ["*"] + actions = [ + "ec2:DescribeAvailabilityZones", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceTypes", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets" + ] + + condition { + test = "StringEquals" + variable = "aws:RequestedRegion" + values = [local.region] + } } statement { - actions = ["eks:DescribeCluster"] - resources = ["arn:${local.partition}:eks:*:${local.account_id}:cluster/${var.cluster_name}"] + sid = "AllowSSMReadActions" + resources = coalescelist(var.ami_id_ssm_parameter_arns, ["arn:${local.partition}:ssm:${local.region}::parameter/aws/service/*"]) + actions = ["ssm:GetParameter"] } statement { - actions = ["iam:PassRole"] - resources = [var.create_iam_role ? aws_iam_role.this[0].arn : var.iam_role_arn] + sid = "AllowPricingReadActions" + resources = ["*"] + actions = ["pricing:GetProducts"] } dynamic "statement" { for_each = local.enable_spot_termination ? [1] : [] content { + sid = "AllowInterruptionQueueActions" + resources = [try(aws_sqs_queue.this[0].arn, null)] actions = [ "sqs:DeleteMessage", - "sqs:GetQueueUrl", "sqs:GetQueueAttributes", - "sqs:ReceiveMessage", + "sqs:GetQueueUrl", + "sqs:ReceiveMessage" ] - resources = [aws_sqs_queue.this[0].arn] } } + + statement { + sid = "AllowPassingInstanceRole" + resources = var.create_node_iam_role ? [aws_iam_role.node[0].arn] : [var.node_iam_role_arn] + actions = ["iam:PassRole"] + + condition { + test = "StringEquals" + variable = "iam:PassedToService" + values = ["ec2.amazonaws.com"] + } + } + + statement { + sid = "AllowScopedInstanceProfileCreationActions" + resources = ["*"] + actions = ["iam:CreateInstanceProfile"] + + condition { + test = "StringEquals" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:RequestTag/topology.kubernetes.io/region" + values = [local.region] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] + } + } + + statement { + sid = "AllowScopedInstanceProfileTagActions" + resources = ["*"] + actions = ["iam:TagInstanceProfile"] + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/topology.kubernetes.io/region" + values = [local.region] + } + + condition { + test = "StringEquals" + variable = "aws:RequestTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/topology.kubernetes.io/region" + values = [local.region] + } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] + } + + condition { + test = "StringLike" + variable = "aws:RequestTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] + } + } + + statement { + sid = "AllowScopedInstanceProfileActions" + resources = ["*"] + actions = [ + "iam:AddRoleToInstanceProfile", + "iam:RemoveRoleFromInstanceProfile", + "iam:DeleteInstanceProfile" + ] + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "aws:ResourceTag/topology.kubernetes.io/region" + values = [local.region] + } + + condition { + test = "StringLike" + variable = "aws:ResourceTag/karpenter.k8s.aws/ec2nodeclass" + values = ["*"] + } + } + + statement { + sid = "AllowInstanceProfileReadActions" + resources = ["*"] + actions = ["iam:GetInstanceProfile"] + } + + statement { + sid = "AllowAPIServerEndpointDiscovery" + resources = ["arn:${local.partition}:eks:${local.region}:${local.account_id}:cluster/${var.cluster_name}"] + actions = ["eks:DescribeCluster"] + } } -resource "aws_iam_policy" "irsa" { - count = local.create_irsa ? 1 : 0 +resource "aws_iam_policy" "controller" { + count = local.create_iam_role ? 1 : 0 - name_prefix = "${local.irsa_policy_name}-" - path = var.irsa_path - description = var.irsa_description - policy = data.aws_iam_policy_document.irsa[0].json + name = var.iam_policy_use_name_prefix ? null : var.iam_policy_name + name_prefix = var.iam_policy_use_name_prefix ? "${var.iam_policy_name}-" : null + path = var.iam_policy_path + description = var.iam_policy_description + policy = data.aws_iam_policy_document.controller[0].json tags = var.tags } -resource "aws_iam_role_policy_attachment" "irsa" { - count = local.create_irsa ? 1 : 0 +resource "aws_iam_role_policy_attachment" "controller" { + count = local.create_iam_role ? 1 : 0 - role = aws_iam_role.irsa[0].name - policy_arn = aws_iam_policy.irsa[0].arn + role = aws_iam_role.controller[0].name + policy_arn = aws_iam_policy.controller[0].arn } -resource "aws_iam_role_policy_attachment" "irsa_additional" { - for_each = { for k, v in var.policies : k => v if local.create_irsa } +resource "aws_iam_role_policy_attachment" "controller_additional" { + for_each = { for k, v in var.iam_role_policies : k => v if local.create_iam_role } - role = aws_iam_role.irsa[0].name + role = aws_iam_role.controller[0].name policy_arn = each.value } +################################################################################ +# Pod Identity Association +################################################################################ + +resource "aws_eks_pod_identity_association" "karpenter" { + count = local.create_iam_role && var.enable_pod_identity && var.create_pod_identity_association ? 1 : 0 + + cluster_name = var.cluster_name + namespace = var.namespace + service_account = var.service_account + role_arn = aws_iam_role.controller[0].arn + + tags = var.tags +} + ################################################################################ # Node Termination Queue ################################################################################ @@ -220,11 +459,10 @@ data "aws_iam_policy_document" "queue" { principals { type = "Service" identifiers = [ - "events.${local.dns_suffix}", - "sqs.${local.dns_suffix}", + "events.amazonaws.com", + "sqs.amazonaws.com", ] } - } } @@ -249,7 +487,7 @@ locals { detail-type = ["AWS Health Event"] } } - spot_interupt = { + spot_interrupt = { name = "SpotInterrupt" description = "Karpenter interrupt - EC2 spot instance interruption warning" event_pattern = { @@ -303,15 +541,21 @@ resource "aws_cloudwatch_event_target" "this" { ################################################################################ locals { - create_iam_role = var.create && var.create_iam_role + create_node_iam_role = var.create && var.create_node_iam_role + + node_iam_role_name = coalesce(var.node_iam_role_name, "Karpenter-${var.cluster_name}") + node_iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy" - iam_role_name = coalesce(var.iam_role_name, "Karpenter-${var.cluster_name}") - iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy" - cni_policy = var.cluster_ip_family == "ipv6" ? "${local.iam_role_policy_prefix}/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + ipv4_cni_policy = { for k, v in { + AmazonEKS_CNI_Policy = "${local.node_iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + } : k => v if var.node_iam_role_attach_cni_policy && var.cluster_ip_family == "ipv4" } + ipv6_cni_policy = { for k, v in { + AmazonEKS_CNI_IPv6_Policy = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" + } : k => v if var.node_iam_role_attach_cni_policy && var.cluster_ip_family == "ipv6" } } -data "aws_iam_policy_document" "assume_role" { - count = local.create_iam_role ? 1 : 0 +data "aws_iam_policy_document" "node_assume_role" { + count = local.create_node_iam_role ? 1 : 0 statement { sid = "EKSNodeAssumeRole" @@ -319,62 +563,207 @@ data "aws_iam_policy_document" "assume_role" { principals { type = "Service" - identifiers = ["ec2.${local.dns_suffix}"] + identifiers = ["ec2.amazonaws.com"] } } } -resource "aws_iam_role" "this" { - count = local.create_iam_role ? 1 : 0 +resource "aws_iam_role" "node" { + count = local.create_node_iam_role ? 1 : 0 - name = var.iam_role_use_name_prefix ? null : local.iam_role_name - name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null - path = var.iam_role_path - description = var.iam_role_description + name = var.node_iam_role_use_name_prefix ? null : local.node_iam_role_name + name_prefix = var.node_iam_role_use_name_prefix ? "${local.node_iam_role_name}-" : null + path = var.node_iam_role_path + description = var.node_iam_role_description - assume_role_policy = data.aws_iam_policy_document.assume_role[0].json - max_session_duration = var.iam_role_max_session_duration - permissions_boundary = var.iam_role_permissions_boundary + assume_role_policy = data.aws_iam_policy_document.node_assume_role[0].json + max_session_duration = var.node_iam_role_max_session_duration + permissions_boundary = var.node_iam_role_permissions_boundary force_detach_policies = true - tags = merge(var.tags, var.iam_role_tags) + tags = merge(var.tags, var.node_iam_role_tags) } # Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group -resource "aws_iam_role_policy_attachment" "this" { - for_each = { for k, v in toset(compact([ - "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy", - "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly", - var.iam_role_attach_cni_policy ? local.cni_policy : "", - ])) : k => v if local.create_iam_role } +resource "aws_iam_role_policy_attachment" "node" { + for_each = { for k, v in merge( + { + AmazonEKSWorkerNodePolicy = "${local.node_iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy" + AmazonEC2ContainerRegistryReadOnly = "${local.node_iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly" + }, + local.ipv4_cni_policy, + local.ipv6_cni_policy + ) : k => v if local.create_node_iam_role } policy_arn = each.value - role = aws_iam_role.this[0].name + role = aws_iam_role.node[0].name } -resource "aws_iam_role_policy_attachment" "additional" { - for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_role } +resource "aws_iam_role_policy_attachment" "node_additional" { + for_each = { for k, v in var.node_iam_role_additional_policies : k => v if local.create_node_iam_role } policy_arn = each.value - role = aws_iam_role.this[0].name + role = aws_iam_role.node[0].name +} + +################################################################################ +# Access Entry +################################################################################ + +resource "aws_eks_access_entry" "node" { + count = var.create && var.create_access_entry ? 1 : 0 + + cluster_name = var.cluster_name + principal_arn = var.create_node_iam_role ? aws_iam_role.node[0].arn : var.node_iam_role_arn + type = var.access_entry_type + + tags = var.tags + + depends_on = [ + # If we try to add this too quickly, it fails. So .... we wait + aws_sqs_queue_policy.this, + ] } ################################################################################ # Node IAM Instance Profile # This is used by the nodes launched by Karpenter +# Starting with Karpenter 0.32 this is no longer required as Karpenter will +# create the Instance Profile ################################################################################ locals { - external_role_name = try(replace(var.iam_role_arn, "/^(.*role/)/", ""), null) + external_role_name = try(replace(var.node_iam_role_arn, "/^(.*role/)/", ""), null) } resource "aws_iam_instance_profile" "this" { count = var.create && var.create_instance_profile ? 1 : 0 - name = var.iam_role_use_name_prefix ? null : local.iam_role_name - name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null - path = var.iam_role_path - role = var.create_iam_role ? aws_iam_role.this[0].name : local.external_role_name + name = var.node_iam_role_use_name_prefix ? null : local.node_iam_role_name + name_prefix = var.node_iam_role_use_name_prefix ? "${local.node_iam_role_name}-" : null + path = var.node_iam_role_path + role = var.create_node_iam_role ? aws_iam_role.node[0].name : local.external_role_name + + tags = merge(var.tags, var.node_iam_role_tags) +} + +################################################################################ +# create new Iam policy and attach it to role ffor old Karpenter +# +################################################################################ +locals { + + irsa_name = coalesce(var.iam_role_name, "KarpenterIRSA-${var.cluster_name}") + irsa_policy_name = coalesce(var.iam_policy_name, local.irsa_name) - tags = merge(var.tags, var.iam_role_tags) } + +data "aws_iam_policy_document" "irsa1" { + count = var.enable_irsa ? 1 : 0 + + statement { + actions = [ + "ec2:CreateLaunchTemplate", + "ec2:CreateFleet", + "ec2:CreateTags", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeImages", + "ec2:DescribeInstances", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeSpotPriceHistory", + "pricing:GetProducts", + ] + + resources = ["*"] + } + + statement { + actions = [ + "ec2:TerminateInstances", + "ec2:DeleteLaunchTemplate", + ] + + resources = ["*"] + + condition { + test = "StringEquals" + variable = "ec2:ResourceTag/karpenter.sh/discovery" + values = ["${var.cluster_name}"] + } + } + + statement { + actions = ["ec2:RunInstances"] + resources = [ + "arn:${local.partition}:ec2:*:${local.account_id}:launch-template/*", + ] + + condition { + test = "StringEquals" + variable = "ec2:ResourceTag/karpenter.sh/discovery" + values = ["${var.cluster_name}"] + } + } + + statement { + actions = ["ec2:RunInstances"] + resources = [ + "arn:${local.partition}:ec2:*::image/*", + "arn:${local.partition}:ec2:*::snapshot/*", + "arn:${local.partition}:ec2:*:${local.account_id}:instance/*", + "arn:${local.partition}:ec2:*:${local.account_id}:spot-instances-request/*", + "arn:${local.partition}:ec2:*:${local.account_id}:security-group/*", + "arn:${local.partition}:ec2:*:${local.account_id}:volume/*", + "arn:${local.partition}:ec2:*:${local.account_id}:network-interface/*", + "arn:${local.partition}:ec2:*:${local.account_id}:subnet/*", + ] + } + + statement { + actions = ["ssm:GetParameter"] + resources = ["arn:aws:ssm:*:*:parameter/aws/service/*"] + } + + statement { + actions = ["eks:DescribeCluster"] + resources = ["arn:${local.partition}:eks:*:${local.account_id}:cluster/${var.cluster_name}"] + } + + statement { + actions = ["iam:PassRole"] + resources = [var.enable_irsa ? aws_iam_role.node[0].arn : var.node_iam_role_arn] + } + + dynamic "statement" { + for_each = local.enable_spot_termination ? [1] : [] + + content { + actions = [ + "sqs:DeleteMessage", + "sqs:GetQueueUrl", + "sqs:GetQueueAttributes", + "sqs:ReceiveMessage", + ] + resources = [aws_sqs_queue.this[0].arn] + } + } +} + +resource "aws_iam_policy" "irsa1" { + count = var.enable_irsa ? 1 : 0 + + name_prefix = "${local.irsa_policy_name}-" + policy = data.aws_iam_policy_document.irsa1[0].json + + +} +resource "aws_iam_role_policy_attachment" "irsa1" { + count = var.enable_irsa ? 1 : 0 + + role = aws_iam_role.controller[0].name + policy_arn = aws_iam_policy.irsa1[0].arn +} \ No newline at end of file diff --git a/modules/karpenter/migrations.tf b/modules/karpenter/migrations.tf new file mode 100644 index 0000000000..b40040f330 --- /dev/null +++ b/modules/karpenter/migrations.tf @@ -0,0 +1,77 @@ +################################################################################ +# Migrations: v19.21 -> v20.0 +################################################################################ + +# Node IAM role +moved { + from = aws_iam_role.this + to = aws_iam_role.node +} + +moved { + from = aws_iam_policy.this + to = aws_iam_policy.node +} + +moved { + from = aws_iam_role_policy_attachment.this + to = aws_iam_role_policy_attachment.node +} + +moved { + from = aws_iam_role_policy_attachment.additional + to = aws_iam_role_policy_attachment.node_additional +} + +# Controller IAM role +moved { + from = aws_iam_role.irsa + to = aws_iam_role.controller +} + +moved { + from = aws_iam_policy.irsa + to = aws_iam_policy.controller +} + +moved { + from = aws_iam_role_policy_attachment.irsa + to = aws_iam_role_policy_attachment.controller +} + +moved { + from = aws_iam_role_policy_attachment.irsa_additional + to = aws_iam_role_policy_attachment.controller_additional +} + +# Spelling correction +moved { + from = aws_cloudwatch_event_target.this["spot_interupt"] + to = aws_cloudwatch_event_target.this["spot_interrupt"] +} + +moved { + from = aws_cloudwatch_event_rule.this["spot_interupt"] + to = aws_cloudwatch_event_rule.this["spot_interrupt"] +} + +################################################################################ +# Migrations: v20.7 -> v20.8 +################################################################################ + +# Node IAM role policy attachment +# Commercial partition only - `moved` does now allow multiple moves to same target +moved { + from = aws_iam_role_policy_attachment.node["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"] + to = aws_iam_role_policy_attachment.node["AmazonEKSWorkerNodePolicy"] +} + +moved { + from = aws_iam_role_policy_attachment.node["arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"] + to = aws_iam_role_policy_attachment.node["AmazonEC2ContainerRegistryReadOnly"] +} + +moved { + from = aws_iam_role_policy_attachment.node["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"] + to = aws_iam_role_policy_attachment.node["AmazonEKS_CNI_Policy"] +} diff --git a/modules/karpenter/outputs.tf b/modules/karpenter/outputs.tf index 947de39bfd..a71d47242d 100644 --- a/modules/karpenter/outputs.tf +++ b/modules/karpenter/outputs.tf @@ -1,20 +1,20 @@ ################################################################################ -# IAM Role for Service Account (IRSA) +# Karpenter controller IAM Role ################################################################################ -output "irsa_name" { - description = "The name of the IAM role for service accounts" - value = try(aws_iam_role.irsa[0].name, null) +output "iam_role_name" { + description = "The name of the controller IAM role" + value = try(aws_iam_role.controller[0].name, null) } -output "irsa_arn" { - description = "The Amazon Resource Name (ARN) specifying the IAM role for service accounts" - value = try(aws_iam_role.irsa[0].arn, null) +output "iam_role_arn" { + description = "The Amazon Resource Name (ARN) specifying the controller IAM role" + value = try(aws_iam_role.controller[0].arn, null) } -output "irsa_unique_id" { - description = "Stable and unique string identifying the IAM role for service accounts" - value = try(aws_iam_role.irsa[0].unique_id, null) +output "iam_role_unique_id" { + description = "Stable and unique string identifying the controller IAM role" + value = try(aws_iam_role.controller[0].unique_id, null) } ################################################################################ @@ -49,19 +49,28 @@ output "event_rules" { # Node IAM Role ################################################################################ -output "role_name" { - description = "The name of the IAM role" - value = try(aws_iam_role.this[0].name, null) +output "node_iam_role_name" { + description = "The name of the node IAM role" + value = try(aws_iam_role.node[0].name, null) } -output "role_arn" { - description = "The Amazon Resource Name (ARN) specifying the IAM role" - value = try(aws_iam_role.this[0].arn, var.iam_role_arn) +output "node_iam_role_arn" { + description = "The Amazon Resource Name (ARN) specifying the node IAM role" + value = try(aws_iam_role.node[0].arn, var.node_iam_role_arn) } -output "role_unique_id" { - description = "Stable and unique string identifying the IAM role" - value = try(aws_iam_role.this[0].unique_id, null) +output "node_iam_role_unique_id" { + description = "Stable and unique string identifying the node IAM role" + value = try(aws_iam_role.node[0].unique_id, null) +} + +################################################################################ +# Access Entry +################################################################################ + +output "node_access_entry_arn" { + description = "Amazon Resource Name (ARN) of the node Access Entry" + value = try(aws_eks_access_entry.node[0].access_entry_arn, null) } ################################################################################ @@ -87,3 +96,17 @@ output "instance_profile_unique" { description = "Stable and unique string identifying the IAM instance profile" value = try(aws_iam_instance_profile.this[0].unique_id, null) } + +################################################################################ +# Pod Identity +################################################################################ + +output "namespace" { + description = "Namespace associated with the Karpenter Pod Identity" + value = var.namespace +} + +output "service_account" { + description = "Service Account associated with the Karpenter Pod Identity" + value = var.service_account +} diff --git a/modules/karpenter/variables.tf b/modules/karpenter/variables.tf index 95a5a1df93..8d4977a587 100644 --- a/modules/karpenter/variables.tf +++ b/modules/karpenter/variables.tf @@ -1,5 +1,5 @@ variable "create" { - description = "Determines whether to create EKS managed node group or not" + description = "Controls if resources should be created (affects nearly all resources)" type = bool default = true } @@ -17,92 +17,107 @@ variable "cluster_name" { } ################################################################################ -# IAM Role for Service Account (IRSA) +# Karpenter controller IAM Role ################################################################################ -variable "create_irsa" { - description = "Determines whether an IAM role for service accounts is created" +variable "create_iam_role" { + description = "Determines whether an IAM role is created" type = bool default = true } -variable "irsa_name" { - description = "Name of IAM role for service accounts" - type = string - default = null -} - -variable "irsa_policy_name" { - description = "Name of IAM policy for service accounts" +variable "iam_role_name" { + description = "Name of the IAM role" type = string - default = null + default = "KarpenterController" } -variable "irsa_use_name_prefix" { - description = "Determines whether the IAM role for service accounts name (`irsa_name`) is used as a prefix" +variable "iam_role_use_name_prefix" { + description = "Determines whether the name of the IAM role (`iam_role_name`) is used as a prefix" type = bool default = true } -variable "irsa_path" { - description = "Path of IAM role for service accounts" +variable "iam_role_path" { + description = "Path of the IAM role" type = string default = "/" } -variable "irsa_description" { - description = "IAM role for service accounts description" +variable "iam_role_description" { + description = "IAM role description" type = string - default = "Karpenter IAM role for service account" + default = "Karpenter controller IAM role" } -variable "irsa_max_session_duration" { +variable "iam_role_max_session_duration" { description = "Maximum API session duration in seconds between 3600 and 43200" type = number default = null } -variable "irsa_permissions_boundary_arn" { - description = "Permissions boundary ARN to use for IAM role for service accounts" +variable "iam_role_permissions_boundary_arn" { + description = "Permissions boundary ARN to use for the IAM role" type = string default = null } -variable "irsa_tags" { - description = "A map of additional tags to add the the IAM role for service accounts" +variable "iam_role_tags" { + description = "A map of additional tags to add the the IAM role" type = map(any) default = {} } -variable "policies" { - description = "Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format" - type = map(string) - default = {} +variable "iam_policy_name" { + description = "Name of the IAM policy" + type = string + default = "KarpenterController" +} + +variable "iam_policy_use_name_prefix" { + description = "Determines whether the name of the IAM policy (`iam_policy_name`) is used as a prefix" + type = bool + default = true +} + +variable "iam_policy_path" { + description = "Path of the IAM policy" + type = string + default = "/" } -variable "irsa_tag_key" { - description = "Tag key (`{key = value}`) applied to resources launched by Karpenter through the Karpenter provisioner" +variable "iam_policy_description" { + description = "IAM policy description" type = string - default = "karpenter.sh/discovery" + default = "Karpenter controller IAM policy" } -variable "irsa_tag_values" { - description = "Tag values (`{key = value}`) applied to resources launched by Karpenter through the Karpenter provisioner. Defaults to cluster name when not set." +variable "iam_role_policies" { + description = "Policies to attach to the IAM role in `{'static_name' = 'policy_arn'}` format" + type = map(string) + default = {} +} + +variable "ami_id_ssm_parameter_arns" { + description = "List of SSM Parameter ARNs that Karpenter controller is allowed read access (for retrieving AMI IDs)" type = list(string) default = [] } -variable "irsa_ssm_parameter_arns" { - description = "List of SSM Parameter ARNs that contain AMI IDs launched by Karpenter" - type = list(string) - # https://github.com/aws/karpenter/blob/ed9473a9863ca949b61b9846c8b9f33f35b86dbd/pkg/cloudprovider/aws/ami.go#L105-L123 - default = ["arn:aws:ssm:*:*:parameter/aws/service/*"] +variable "enable_pod_identity" { + description = "Determines whether to enable support for EKS pod identity" + type = bool + default = true } -variable "irsa_subnet_account_id" { - description = "Account ID of where the subnets Karpenter will utilize resides. Used when subnets are shared from another account" - type = string - default = "" +################################################################################ +# IAM Role for Service Account (IRSA) +################################################################################ + +variable "enable_irsa" { + description = "Determines whether to enable support for IAM role for service accounts" + type = bool + default = false } variable "irsa_oidc_provider_arn" { @@ -123,6 +138,28 @@ variable "irsa_assume_role_condition_test" { default = "StringEquals" } +################################################################################ +# Pod Identity Association +################################################################################ +# TODO - Change default to `true` at next breaking change +variable "create_pod_identity_association" { + description = "Determines whether to create pod identity association" + type = bool + default = false +} + +variable "namespace" { + description = "Namespace to associate with the Karpenter Pod Identity" + type = string + default = "kube-system" +} + +variable "service_account" { + description = "Service account to associate with the Karpenter Pod Identity" + type = string + default = "karpenter" +} + ################################################################################ # Node Termination Queue ################################################################################ @@ -158,81 +195,97 @@ variable "queue_kms_data_key_reuse_period_seconds" { } ################################################################################ -# Node IAM Role & Instance Profile +# Node IAM Role ################################################################################ -variable "create_iam_role" { +variable "create_node_iam_role" { description = "Determines whether an IAM role is created or to use an existing IAM role" type = bool default = true } variable "cluster_ip_family" { - description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`" + description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. Note: If `ipv6` is specified, the `AmazonEKS_CNI_IPv6_Policy` must exist in the account. This policy is created by the EKS module with `create_cni_ipv6_iam_policy = true`" type = string - default = null + default = "ipv4" } -variable "iam_role_arn" { +variable "node_iam_role_arn" { description = "Existing IAM role ARN for the IAM instance profile. Required if `create_iam_role` is set to `false`" type = string default = null } -variable "iam_role_name" { +variable "node_iam_role_name" { description = "Name to use on IAM role created" type = string default = null } -variable "iam_role_use_name_prefix" { +variable "node_iam_role_use_name_prefix" { description = "Determines whether the IAM role name (`iam_role_name`) is used as a prefix" type = bool default = true } -variable "iam_role_path" { +variable "node_iam_role_path" { description = "IAM role path" type = string default = "/" } -variable "iam_role_description" { +variable "node_iam_role_description" { description = "Description of the role" type = string default = null } -variable "iam_role_max_session_duration" { +variable "node_iam_role_max_session_duration" { description = "Maximum API session duration in seconds between 3600 and 43200" type = number default = null } -variable "iam_role_permissions_boundary" { +variable "node_iam_role_permissions_boundary" { description = "ARN of the policy that is used to set the permissions boundary for the IAM role" type = string default = null } -variable "iam_role_attach_cni_policy" { +variable "node_iam_role_attach_cni_policy" { description = "Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster" type = bool default = true } -variable "iam_role_additional_policies" { +variable "node_iam_role_additional_policies" { description = "Additional policies to be added to the IAM role" - type = list(string) - default = [] + type = map(string) + default = {} } -variable "iam_role_tags" { +variable "node_iam_role_tags" { description = "A map of additional tags to add to the IAM role created" type = map(string) default = {} } +################################################################################ +# Access Entry +################################################################################ + +variable "create_access_entry" { + description = "Determines whether an access entry is created for the IAM role used by the node IAM role" + type = bool + default = true +} + +variable "access_entry_type" { + description = "Type of the access entry. `EC2_LINUX`, `FARGATE_LINUX`, or `EC2_WINDOWS`; defaults to `EC2_LINUX`" + type = string + default = "EC2_LINUX" +} + ################################################################################ # Node IAM Instance Profile ################################################################################ @@ -240,7 +293,7 @@ variable "iam_role_tags" { variable "create_instance_profile" { description = "Whether to create an IAM instance profile" type = bool - default = true + default = false } ################################################################################ @@ -251,4 +304,4 @@ variable "rule_name_prefix" { description = "Prefix used for all event bridge rules" type = string default = "Karpenter" -} +} \ No newline at end of file diff --git a/modules/karpenter/versions.tf b/modules/karpenter/versions.tf index 55eff62b09..6f83215f50 100644 --- a/modules/karpenter/versions.tf +++ b/modules/karpenter/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.47" + version = ">= 5.40" } } } diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md index 8964144994..5fb9cd3212 100644 --- a/modules/self-managed-node-group/README.md +++ b/modules/self-managed-node-group/README.md @@ -42,14 +42,14 @@ module "self_managed_node_group" { | Name | Version | |------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | >= 4.57 | +| [terraform](#requirement\_terraform) | >= 1.3.2 | +| [aws](#requirement\_aws) | >= 5.40 | ## Providers | Name | Version | |------|---------| -| [aws](#provider\_aws) | >= 4.57 | +| [aws](#provider\_aws) | >= 5.40 | ## Modules @@ -63,36 +63,47 @@ module "self_managed_node_group" { |------|------| | [aws_autoscaling_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group) | resource | | [aws_autoscaling_schedule.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_schedule) | resource | +| [aws_eks_access_entry.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_access_entry) | resource | | [aws_iam_instance_profile.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | | [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | | [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_role_policy_attachment.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_launch_template.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource | -| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_placement_group.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/placement_group) | resource | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | +| [aws_ec2_instance_type.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ec2_instance_type) | data source | +| [aws_ec2_instance_type_offerings.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ec2_instance_type_offerings) | data source | | [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | +| [aws_ssm_parameter.ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ssm_parameter) | data source | +| [aws_subnets.efa](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| [additional\_cluster\_dns\_ips](#input\_additional\_cluster\_dns\_ips) | Additional DNS IP addresses to use for the cluster. Only used when `ami_type` = `BOTTLEROCKET_*` | `list(string)` | `[]` | no | | [ami\_id](#input\_ami\_id) | The AMI from which to launch the instance | `string` | `""` | no | +| [ami\_type](#input\_ami\_type) | Type of Amazon Machine Image (AMI) associated with the node group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values | `string` | `"AL2_x86_64"` | no | | [autoscaling\_group\_tags](#input\_autoscaling\_group\_tags) | A map of additional tags to add to the autoscaling group created. Tags are applied to the autoscaling group only and are NOT propagated to instances | `map(string)` | `{}` | no | | [availability\_zones](#input\_availability\_zones) | A list of one or more availability zones for the group. Used for EC2-Classic and default subnets when not specified with `subnet_ids` argument. Conflicts with `subnet_ids` | `list(string)` | `null` | no | | [block\_device\_mappings](#input\_block\_device\_mappings) | Specify volumes to attach to the instance besides the volumes specified by the AMI | `any` | `{}` | no | -| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no | +| [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no | | [capacity\_rebalance](#input\_capacity\_rebalance) | Indicates whether capacity rebalance is enabled | `bool` | `null` | no | | [capacity\_reservation\_specification](#input\_capacity\_reservation\_specification) | Targeting for EC2 capacity reservations | `any` | `{}` | no | +| [cloudinit\_post\_nodeadm](#input\_cloudinit\_post\_nodeadm) | Array of cloud-init document parts that are created after the nodeadm document part |
list(object({
content = string
content_type = optional(string)
filename = optional(string)
merge_type = optional(string)
}))
| `[]` | no | +| [cloudinit\_pre\_nodeadm](#input\_cloudinit\_pre\_nodeadm) | Array of cloud-init document parts that are created before the nodeadm document part |
list(object({
content = string
content_type = optional(string)
filename = optional(string)
merge_type = optional(string)
}))
| `[]` | no | | [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no | | [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no | -| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `null` | no | +| [cluster\_ip\_family](#input\_cluster\_ip\_family) | The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6` | `string` | `"ipv4"` | no | | [cluster\_name](#input\_cluster\_name) | Name of associated EKS cluster | `string` | `""` | no | | [cluster\_primary\_security\_group\_id](#input\_cluster\_primary\_security\_group\_id) | The ID of the EKS cluster primary security group to associate with the instance(s). This is the security group that is automatically created by the EKS service | `string` | `null` | no | +| [cluster\_service\_cidr](#input\_cluster\_service\_cidr) | The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself | `string` | `""` | no | | [cluster\_version](#input\_cluster\_version) | Kubernetes cluster version - used to lookup default AMI ID if one is not provided | `string` | `null` | no | | [context](#input\_context) | Reserved | `string` | `null` | no | | [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `{}` | no | | [create](#input\_create) | Determines whether to create self managed node group or not | `bool` | `true` | no | +| [create\_access\_entry](#input\_create\_access\_entry) | Determines whether an access entry is created for the IAM role used by the nodegroup | `bool` | `true` | no | | [create\_autoscaling\_group](#input\_create\_autoscaling\_group) | Determines whether to create autoscaling group or not | `bool` | `true` | no | | [create\_iam\_instance\_profile](#input\_create\_iam\_instance\_profile) | Determines whether an IAM instance profile is created or to use an existing IAM instance profile | `bool` | `true` | no | | [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `true` | no | @@ -106,6 +117,7 @@ module "self_managed_node_group" { | [ebs\_optimized](#input\_ebs\_optimized) | If true, the launched EC2 instance will be EBS-optimized | `bool` | `null` | no | | [elastic\_gpu\_specifications](#input\_elastic\_gpu\_specifications) | The elastic GPU to attach to the instance | `any` | `{}` | no | | [elastic\_inference\_accelerator](#input\_elastic\_inference\_accelerator) | Configuration block containing an Elastic Inference Accelerator to attach to the instance | `map(string)` | `{}` | no | +| [enable\_efa\_support](#input\_enable\_efa\_support) | Determines whether to enable Elastic Fabric Adapter (EFA) support | `bool` | `false` | no | | [enable\_monitoring](#input\_enable\_monitoring) | Enables/disables detailed monitoring | `bool` | `true` | no | | [enabled\_metrics](#input\_enabled\_metrics) | A list of metrics to collect. The allowed values are `GroupDesiredCapacity`, `GroupInServiceCapacity`, `GroupPendingCapacity`, `GroupMinSize`, `GroupMaxSize`, `GroupInServiceInstances`, `GroupPendingInstances`, `GroupStandbyInstances`, `GroupStandbyCapacity`, `GroupTerminatingCapacity`, `GroupTerminatingInstances`, `GroupTotalCapacity`, `GroupTotalInstances` | `list(string)` | `[]` | no | | [enclave\_options](#input\_enclave\_options) | Enable Nitro Enclaves on launched instances | `map(string)` | `{}` | no | @@ -116,6 +128,7 @@ module "self_managed_node_group" { | [hibernation\_options](#input\_hibernation\_options) | The hibernation options for the instance | `map(string)` | `{}` | no | | [iam\_instance\_profile\_arn](#input\_iam\_instance\_profile\_arn) | Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group. Required if `create_iam_instance_profile` = `false` | `string` | `null` | no | | [iam\_role\_additional\_policies](#input\_iam\_role\_additional\_policies) | Additional policies to be added to the IAM role | `map(string)` | `{}` | no | +| [iam\_role\_arn](#input\_iam\_role\_arn) | ARN of the IAM role used by the instance profile. Required when `create_access_entry = true` and `create_iam_instance_profile = false` | `string` | `null` | no | | [iam\_role\_attach\_cni\_policy](#input\_iam\_role\_attach\_cni\_policy) | Whether to attach the `AmazonEKS_CNI_Policy`/`AmazonEKS_CNI_IPv6_Policy` IAM policy to the IAM IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster | `bool` | `true` | no | | [iam\_role\_description](#input\_iam\_role\_description) | Description of the role | `string` | `null` | no | | [iam\_role\_name](#input\_iam\_role\_name) | Name to use on IAM role created | `string` | `null` | no | @@ -125,6 +138,7 @@ module "self_managed_node_group" { | [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether cluster IAM role name (`iam_role_name`) is used as a prefix | `bool` | `true` | no | | [initial\_lifecycle\_hooks](#input\_initial\_lifecycle\_hooks) | One or more Lifecycle Hooks to attach to the Auto Scaling Group before instances are launched. The syntax is exactly the same as the separate `aws_autoscaling_lifecycle_hook` resource, without the `autoscaling_group_name` attribute. Please note that this will only work when creating a new Auto Scaling Group. For all other use-cases, please use `aws_autoscaling_lifecycle_hook` resource | `list(map(string))` | `[]` | no | | [instance\_initiated\_shutdown\_behavior](#input\_instance\_initiated\_shutdown\_behavior) | Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`) | `string` | `null` | no | +| [instance\_maintenance\_policy](#input\_instance\_maintenance\_policy) | If this block is configured, add a instance maintenance policy to the specified Auto Scaling group | `any` | `{}` | no | | [instance\_market\_options](#input\_instance\_market\_options) | The market (purchasing) option for the instance | `any` | `{}` | no | | [instance\_refresh](#input\_instance\_refresh) | If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated | `any` |
{
"preferences": {
"min_healthy_percentage": 66
},
"strategy": "Rolling"
}
| no | | [instance\_requirements](#input\_instance\_requirements) | The attribute requirements for the type of instance. If present then `instance_type` cannot be present | `any` | `{}` | no | @@ -151,9 +165,9 @@ module "self_managed_node_group" { | [network\_interfaces](#input\_network\_interfaces) | Customize network interfaces to be attached at instance boot time | `list(any)` | `[]` | no | | [placement](#input\_placement) | The placement of the instance | `map(string)` | `{}` | no | | [placement\_group](#input\_placement\_group) | The name of the placement group into which you'll launch your instances, if any | `string` | `null` | no | -| [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no | -| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | -| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no | +| [platform](#input\_platform) | [DEPRECATED - must use `ami_type` instead. Will be removed in `v21.0`] | `string` | `null` | no | +| [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `""` | no | +| [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*` | `string` | `""` | no | | [private\_dns\_name\_options](#input\_private\_dns\_name\_options) | The options for the instance hostname. The default values are inherited from the subnet | `map(string)` | `{}` | no | | [protect\_from\_scale\_in](#input\_protect\_from\_scale\_in) | Allows setting instance protection. The autoscaling group will not select instances with this setting for termination during scale in events. | `bool` | `false` | no | | [ram\_disk\_id](#input\_ram\_disk\_id) | The ID of the ram disk | `string` | `null` | no | @@ -178,6 +192,7 @@ module "self_managed_node_group" { | Name | Description | |------|-------------| +| [access\_entry\_arn](#output\_access\_entry\_arn) | Amazon Resource Name (ARN) of the Access Entry | | [autoscaling\_group\_arn](#output\_autoscaling\_group\_arn) | The ARN for this autoscaling group | | [autoscaling\_group\_availability\_zones](#output\_autoscaling\_group\_availability\_zones) | The availability zones of the autoscaling group | | [autoscaling\_group\_default\_cooldown](#output\_autoscaling\_group\_default\_cooldown) | Time between a scaling activity and the succeeding scaling activity | @@ -201,6 +216,6 @@ module "self_managed_node_group" { | [launch\_template\_id](#output\_launch\_template\_id) | The ID of the launch template | | [launch\_template\_latest\_version](#output\_launch\_template\_latest\_version) | The latest version of the launch template | | [launch\_template\_name](#output\_launch\_template\_name) | The name of the launch template | -| [platform](#output\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | +| [platform](#output\_platform) | [DEPRECATED - Will be removed in `v21.0`] Identifies the OS platform as `bottlerocket`, `linux` (AL2), `al2023`, or `windows` | | [user\_data](#output\_user\_data) | Base64 encoded user data | diff --git a/modules/self-managed-node-group/main.tf b/modules/self-managed-node-group/main.tf index f46096470b..25b202dc39 100644 --- a/modules/self-managed-node-group/main.tf +++ b/modules/self-managed-node-group/main.tf @@ -1,16 +1,55 @@ data "aws_partition" "current" {} data "aws_caller_identity" "current" {} -data "aws_ami" "eks_default" { - count = var.create && var.create_launch_template ? 1 : 0 +################################################################################ +# AMI SSM Parameter +################################################################################ - filter { - name = "name" - values = ["amazon-eks-node-${var.cluster_version}-v*"] +locals { + # Just to ensure templating doesn't fail when values are not provided + ssm_cluster_version = var.cluster_version != null ? var.cluster_version : "" + + # TODO - Temporary stopgap for backwards compatibility until v21.0 + ami_type_to_user_data_type = { + AL2_x86_64 = "linux" + AL2_x86_64_GPU = "linux" + AL2_ARM_64 = "linux" + BOTTLEROCKET_ARM_64 = "bottlerocket" + BOTTLEROCKET_x86_64 = "bottlerocket" + BOTTLEROCKET_ARM_64_NVIDIA = "bottlerocket" + BOTTLEROCKET_x86_64_NVIDIA = "bottlerocket" + WINDOWS_CORE_2019_x86_64 = "windows" + WINDOWS_FULL_2019_x86_64 = "windows" + WINDOWS_CORE_2022_x86_64 = "windows" + WINDOWS_FULL_2022_x86_64 = "windows" + AL2023_x86_64_STANDARD = "al2023" + AL2023_ARM_64_STANDARD = "al2023" + } + + user_data_type = local.ami_type_to_user_data_type[var.ami_type] + + # Map the AMI type to the respective SSM param path + ami_type_to_ssm_param = { + AL2_x86_64 = "/aws/service/eks/optimized-ami/${local.ssm_cluster_version}/amazon-linux-2/recommended/image_id" + AL2_x86_64_GPU = "/aws/service/eks/optimized-ami/${local.ssm_cluster_version}/amazon-linux-2-gpu/recommended/image_id" + AL2_ARM_64 = "/aws/service/eks/optimized-ami/${local.ssm_cluster_version}/amazon-linux-2-arm64/recommended/image_id" + BOTTLEROCKET_ARM_64 = "/aws/service/bottlerocket/aws-k8s-${local.ssm_cluster_version}/arm64/latest/image_id" + BOTTLEROCKET_x86_64 = "/aws/service/bottlerocket/aws-k8s-${local.ssm_cluster_version}/x86_64/latest/image_id" + BOTTLEROCKET_ARM_64_NVIDIA = "/aws/service/bottlerocket/aws-k8s-${local.ssm_cluster_version}-nvidia/arm64/latest/image_id" + BOTTLEROCKET_x86_64_NVIDIA = "/aws/service/bottlerocket/aws-k8s-${local.ssm_cluster_version}-nvidia/x86_64/latest/image_id" + WINDOWS_CORE_2019_x86_64 = "/aws/service/ami-windows-latest/Windows_Server-2019-English-Full-EKS_Optimized-${local.ssm_cluster_version}/image_id" + WINDOWS_FULL_2019_x86_64 = "/aws/service/ami-windows-latest/Windows_Server-2019-English-Core-EKS_Optimized-${local.ssm_cluster_version}/image_id" + WINDOWS_CORE_2022_x86_64 = "/aws/service/ami-windows-latest/Windows_Server-2022-English-Full-EKS_Optimized-${local.ssm_cluster_version}/image_id" + WINDOWS_FULL_2022_x86_64 = "/aws/service/ami-windows-latest/Windows_Server-2022-English-Core-EKS_Optimized-${local.ssm_cluster_version}/image_id" + AL2023_x86_64_STANDARD = "/aws/service/eks/optimized-ami/${local.ssm_cluster_version}/amazon-linux-2023/x86_64/standard/recommended/image_id" + AL2023_ARM_64_STANDARD = "/aws/service/eks/optimized-ami/${local.ssm_cluster_version}/amazon-linux-2023/arm64/standard/recommended/image_id" } +} + +data "aws_ssm_parameter" "ami" { + count = var.create ? 1 : 0 - most_recent = true - owners = ["amazon"] + name = local.ami_type_to_ssm_param[var.ami_type] } ################################################################################ @@ -21,18 +60,54 @@ module "user_data" { source = "../_user_data" create = var.create - platform = var.platform + platform = local.user_data_type + ami_type = var.ami_type is_eks_managed_node_group = false - cluster_name = var.cluster_name - cluster_endpoint = var.cluster_endpoint - cluster_auth_base64 = var.cluster_auth_base64 + cluster_name = var.cluster_name + cluster_endpoint = var.cluster_endpoint + cluster_auth_base64 = var.cluster_auth_base64 + cluster_ip_family = var.cluster_ip_family + cluster_service_cidr = var.cluster_service_cidr + additional_cluster_dns_ips = var.additional_cluster_dns_ips enable_bootstrap_user_data = true pre_bootstrap_user_data = var.pre_bootstrap_user_data post_bootstrap_user_data = var.post_bootstrap_user_data bootstrap_extra_args = var.bootstrap_extra_args user_data_template_path = var.user_data_template_path + + cloudinit_pre_nodeadm = var.cloudinit_pre_nodeadm + cloudinit_post_nodeadm = var.cloudinit_post_nodeadm +} + +################################################################################ +# EFA Support +################################################################################ + +data "aws_ec2_instance_type" "this" { + count = local.enable_efa_support ? 1 : 0 + + instance_type = var.instance_type +} + +locals { + enable_efa_support = var.create && var.enable_efa_support && local.instance_type_provided + + instance_type_provided = var.instance_type != "" + num_network_cards = try(data.aws_ec2_instance_type.this[0].maximum_network_cards, 0) + + efa_network_interfaces = [ + for i in range(local.num_network_cards) : { + associate_public_ip_address = false + delete_on_termination = true + device_index = i == 0 ? 0 : 1 + network_card_index = i + interface_type = "efa" + } + ] + + network_interfaces = local.enable_efa_support ? local.efa_network_interfaces : var.network_interfaces } ################################################################################ @@ -42,6 +117,8 @@ module "user_data" { locals { launch_template_name = coalesce(var.launch_template_name, var.name) security_group_ids = compact(concat([var.cluster_primary_security_group_id], var.vpc_security_group_ids)) + + placement = local.enable_efa_support ? { group_name = aws_placement_group.this[0].name } : var.placement } resource "aws_launch_template" "this" { @@ -148,7 +225,7 @@ resource "aws_launch_template" "this" { arn = var.create_iam_instance_profile ? aws_iam_instance_profile.this[0].arn : var.iam_instance_profile_arn } - image_id = coalesce(var.ami_id, data.aws_ami.eks_default[0].image_id) + image_id = coalesce(var.ami_id, nonsensitive(data.aws_ssm_parameter.ami[0].value)) instance_initiated_shutdown_behavior = var.instance_initiated_shutdown_behavior dynamic "instance_market_options" { @@ -285,7 +362,7 @@ resource "aws_launch_template" "this" { for_each = length(var.license_specifications) > 0 ? var.license_specifications : {} content { - license_configuration_arn = license_specifications.value.license_configuration_arn + license_configuration_arn = license_specification.value.license_configuration_arn } } @@ -321,7 +398,8 @@ resource "aws_launch_template" "this" { name_prefix = var.launch_template_use_name_prefix ? "${local.launch_template_name}-" : null dynamic "network_interfaces" { - for_each = var.network_interfaces + for_each = local.network_interfaces + content { associate_carrier_ip_address = try(network_interfaces.value.associate_carrier_ip_address, null) associate_public_ip_address = try(network_interfaces.value.associate_public_ip_address, null) @@ -347,14 +425,14 @@ resource "aws_launch_template" "this" { } dynamic "placement" { - for_each = length(var.placement) > 0 ? [var.placement] : [] + for_each = length(local.placement) > 0 ? [local.placement] : [] content { affinity = try(placement.value.affinity, null) - availability_zone = try(placement.value.availability_zone, null) - group_name = try(placement.value.group_name, null) - host_id = try(placement.value.host_id, null) - host_resource_group_arn = try(placement.value.host_resource_group_arn, null) + availability_zone = lookup(placement.value, "availability_zone", null) + group_name = lookup(placement.value, "group_name", null) + host_id = lookup(placement.value, "host_id", null) + host_resource_group_arn = lookup(placement.value, "host_resource_group_arn", null) partition_number = try(placement.value.partition_number, null) spread_domain = try(placement.value.spread_domain, null) tenancy = try(placement.value.tenancy, null) @@ -384,7 +462,7 @@ resource "aws_launch_template" "this" { update_default_version = var.update_launch_template_default_version user_data = module.user_data.user_data - vpc_security_group_ids = length(var.network_interfaces) > 0 ? [] : local.security_group_ids + vpc_security_group_ids = length(local.network_interfaces) > 0 ? [] : local.security_group_ids tags = var.tags @@ -438,6 +516,15 @@ resource "aws_autoscaling_group" "this" { } } + dynamic "instance_maintenance_policy" { + for_each = length(var.instance_maintenance_policy) > 0 ? [var.instance_maintenance_policy] : [] + + content { + min_healthy_percentage = instance_maintenance_policy.value.min_healthy_percentage + max_healthy_percentage = instance_maintenance_policy.value.max_healthy_percentage + } + } + dynamic "instance_refresh" { for_each = length(var.instance_refresh) > 0 ? [var.instance_refresh] : [] @@ -446,11 +533,14 @@ resource "aws_autoscaling_group" "this" { for_each = try([instance_refresh.value.preferences], []) content { - checkpoint_delay = try(preferences.value.checkpoint_delay, null) - checkpoint_percentages = try(preferences.value.checkpoint_percentages, null) - instance_warmup = try(preferences.value.instance_warmup, null) - min_healthy_percentage = try(preferences.value.min_healthy_percentage, null) - skip_matching = try(preferences.value.skip_matching, null) + checkpoint_delay = try(preferences.value.checkpoint_delay, null) + checkpoint_percentages = try(preferences.value.checkpoint_percentages, null) + instance_warmup = try(preferences.value.instance_warmup, null) + max_healthy_percentage = try(preferences.value.max_healthy_percentage, null) + min_healthy_percentage = try(preferences.value.min_healthy_percentage, null) + scale_in_protected_instances = try(preferences.value.scale_in_protected_instances, null) + skip_matching = try(preferences.value.skip_matching, null) + standby_instances = try(preferences.value.standby_instances, null) } } @@ -527,8 +617,9 @@ resource "aws_autoscaling_group" "this" { } } - accelerator_types = try(instance_requirements.value.accelerator_types, []) - bare_metal = try(instance_requirements.value.bare_metal, null) + accelerator_types = try(instance_requirements.value.accelerator_types, []) + allowed_instance_types = try(instance_requirements.value.allowed_instance_types, null) + bare_metal = try(instance_requirements.value.bare_metal, null) dynamic "baseline_ebs_bandwidth_mbps" { for_each = try([instance_requirements.value.baseline_ebs_bandwidth_mbps], []) @@ -708,7 +799,7 @@ resource "aws_autoscaling_group" "this" { target_group_arns = var.target_group_arns termination_policies = var.termination_policies - vpc_zone_identifier = var.subnet_ids + vpc_zone_identifier = local.enable_efa_support ? data.aws_subnets.efa[0].ids : var.subnet_ids wait_for_capacity_timeout = var.wait_for_capacity_timeout wait_for_elb_capacity = var.wait_for_elb_capacity @@ -742,40 +833,26 @@ resource "aws_autoscaling_group" "this" { } } -################################################################################ -# Autoscaling group schedule -################################################################################ - -resource "aws_autoscaling_schedule" "this" { - for_each = { for k, v in var.schedules : k => v if var.create && var.create_schedule } - - scheduled_action_name = each.key - autoscaling_group_name = aws_autoscaling_group.this[0].name - - min_size = try(each.value.min_size, null) - max_size = try(each.value.max_size, null) - desired_capacity = try(each.value.desired_size, null) - start_time = try(each.value.start_time, null) - end_time = try(each.value.end_time, null) - time_zone = try(each.value.time_zone, null) - - # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] - # Cron examples: https://crontab.guru/examples.html - recurrence = try(each.value.recurrence, null) -} - ################################################################################ # IAM Role ################################################################################ locals { - iam_role_name = coalesce(var.iam_role_name, var.name) + create_iam_instance_profile = var.create && var.create_iam_instance_profile + + iam_role_name = coalesce(var.iam_role_name, "${var.name}") iam_role_policy_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" - cni_policy = var.cluster_ip_family == "ipv6" ? "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" : "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + + ipv4_cni_policy = { for k, v in { + AmazonEKS_CNI_Policy = "${local.iam_role_policy_prefix}/AmazonEKS_CNI_Policy" + } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv4" } + ipv6_cni_policy = { for k, v in { + AmazonEKS_CNI_IPv6_Policy = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:policy/AmazonEKS_CNI_IPv6_Policy" + } : k => v if var.iam_role_attach_cni_policy && var.cluster_ip_family == "ipv6" } } data "aws_iam_policy_document" "assume_role_policy" { - count = var.create && var.create_iam_instance_profile ? 1 : 0 + count = local.create_iam_instance_profile ? 1 : 0 statement { sid = "EKSNodeAssumeRole" @@ -783,13 +860,13 @@ data "aws_iam_policy_document" "assume_role_policy" { principals { type = "Service" - identifiers = ["ec2.${data.aws_partition.current.dns_suffix}"] + identifiers = ["ec2.amazonaws.com"] } } } resource "aws_iam_role" "this" { - count = var.create && var.create_iam_instance_profile ? 1 : 0 + count = local.create_iam_instance_profile ? 1 : 0 name = var.iam_role_use_name_prefix ? null : local.iam_role_name name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null @@ -803,26 +880,30 @@ resource "aws_iam_role" "this" { tags = merge(var.tags, var.iam_role_tags) } +# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group resource "aws_iam_role_policy_attachment" "this" { - for_each = { for k, v in toset(compact([ - "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy", - "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly", - var.iam_role_attach_cni_policy ? local.cni_policy : "", - ])) : k => v if var.create && var.create_iam_instance_profile } + for_each = { for k, v in merge( + { + AmazonEKSWorkerNodePolicy = "${local.iam_role_policy_prefix}/AmazonEKSWorkerNodePolicy" + AmazonEC2ContainerRegistryReadOnly = "${local.iam_role_policy_prefix}/AmazonEC2ContainerRegistryReadOnly" + }, + local.ipv4_cni_policy, + local.ipv6_cni_policy + ) : k => v if local.create_iam_instance_profile } policy_arn = each.value role = aws_iam_role.this[0].name } resource "aws_iam_role_policy_attachment" "additional" { - for_each = { for k, v in var.iam_role_additional_policies : k => v if var.create && var.create_iam_instance_profile } + for_each = { for k, v in var.iam_role_additional_policies : k => v if local.create_iam_instance_profile } policy_arn = each.value role = aws_iam_role.this[0].name } resource "aws_iam_instance_profile" "this" { - count = var.create && var.create_iam_instance_profile ? 1 : 0 + count = local.create_iam_instance_profile ? 1 : 0 role = aws_iam_role.this[0].name @@ -836,3 +917,89 @@ resource "aws_iam_instance_profile" "this" { create_before_destroy = true } } + +################################################################################ +# Placement Group +################################################################################ + +resource "aws_placement_group" "this" { + count = local.enable_efa_support ? 1 : 0 + + name = "${var.cluster_name}-${var.name}" + strategy = "cluster" + + tags = var.tags +} + +################################################################################ +# Instance AZ Lookup + +# Instances usually used in placement groups w/ EFA are only available in +# select availability zones. These data sources will cross reference the availability +# zones supported by the instance type with the subnets provided to ensure only +# AZs/subnets that are supported are used. +################################################################################ + +# Find the availability zones supported by the instance type +data "aws_ec2_instance_type_offerings" "this" { + count = local.enable_efa_support ? 1 : 0 + + filter { + name = "instance-type" + values = [var.instance_type] + } + + location_type = "availability-zone-id" +} + +# Reverse the lookup to find one of the subnets provided based on the availability +# availability zone ID of the queried instance type (supported) +data "aws_subnets" "efa" { + count = local.enable_efa_support ? 1 : 0 + + filter { + name = "subnet-id" + values = var.subnet_ids + } + + filter { + name = "availability-zone-id" + values = data.aws_ec2_instance_type_offerings.this[0].locations + } +} + +################################################################################ +# Access Entry +################################################################################ + +resource "aws_eks_access_entry" "this" { + count = var.create && var.create_access_entry ? 1 : 0 + + cluster_name = var.cluster_name + principal_arn = var.create_iam_instance_profile ? aws_iam_role.this[0].arn : var.iam_role_arn + type = local.user_data_type == "windows" ? "EC2_WINDOWS" : "EC2_LINUX" + + tags = var.tags +} + +################################################################################ +# Autoscaling group schedule +################################################################################ + +resource "aws_autoscaling_schedule" "this" { + for_each = { for k, v in var.schedules : k => v if var.create && var.create_schedule } + + scheduled_action_name = each.key + autoscaling_group_name = aws_autoscaling_group.this[0].name + + min_size = try(each.value.min_size, null) + max_size = try(each.value.max_size, null) + desired_capacity = try(each.value.desired_size, null) + start_time = try(each.value.start_time, null) + end_time = try(each.value.end_time, null) + time_zone = try(each.value.time_zone, null) + + # [Minute] [Hour] [Day_of_Month] [Month_of_Year] [Day_of_Week] + # Cron examples: https://crontab.guru/examples.html + recurrence = try(each.value.recurrence, null) +} diff --git a/modules/self-managed-node-group/migrations.tf b/modules/self-managed-node-group/migrations.tf new file mode 100644 index 0000000000..5d51a7208a --- /dev/null +++ b/modules/self-managed-node-group/migrations.tf @@ -0,0 +1,20 @@ +################################################################################ +# Migrations: v20.7 -> v20.8 +################################################################################ + +# Node IAM role policy attachment +# Commercial partition only - `moved` does now allow multiple moves to same target +moved { + from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"] + to = aws_iam_role_policy_attachment.this["AmazonEKSWorkerNodePolicy"] +} + +moved { + from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"] + to = aws_iam_role_policy_attachment.this["AmazonEC2ContainerRegistryReadOnly"] +} + +moved { + from = aws_iam_role_policy_attachment.this["arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"] + to = aws_iam_role_policy_attachment.this["AmazonEKS_CNI_Policy"] +} diff --git a/modules/self-managed-node-group/outputs.tf b/modules/self-managed-node-group/outputs.tf index 5c83497218..9607810ac3 100644 --- a/modules/self-managed-node-group/outputs.tf +++ b/modules/self-managed-node-group/outputs.tf @@ -81,15 +81,6 @@ output "autoscaling_group_vpc_zone_identifier" { value = try(aws_autoscaling_group.this[0].vpc_zone_identifier, null) } -################################################################################ -# Autoscaling Group Schedule -################################################################################ - -output "autoscaling_group_schedule_arns" { - description = "ARNs of autoscaling group schedules" - value = { for k, v in aws_autoscaling_schedule.this : k => v.arn } -} - ################################################################################ # IAM Role ################################################################################ @@ -128,13 +119,31 @@ output "iam_instance_profile_unique" { value = try(aws_iam_instance_profile.this[0].unique_id, null) } +################################################################################ +# Access Entry +################################################################################ + +output "access_entry_arn" { + description = "Amazon Resource Name (ARN) of the Access Entry" + value = try(aws_eks_access_entry.this[0].access_entry_arn, null) +} + +################################################################################ +# Autoscaling Group Schedule +################################################################################ + +output "autoscaling_group_schedule_arns" { + description = "ARNs of autoscaling group schedules" + value = { for k, v in aws_autoscaling_schedule.this : k => v.arn } +} + ################################################################################ # Additional ################################################################################ output "platform" { - description = "Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based" - value = var.platform + description = "[DEPRECATED - Will be removed in `v21.0`] Identifies the OS platform as `bottlerocket`, `linux` (AL2), `al2023`, or `windows`" + value = module.user_data.platform } output "image_id" { diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf index 7e5d0cecb6..aea1269080 100644 --- a/modules/self-managed-node-group/variables.tf +++ b/modules/self-managed-node-group/variables.tf @@ -10,10 +10,16 @@ variable "tags" { default = {} } +# tflint-ignore: terraform_unused_declarations variable "platform" { - description = "Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based" + description = "[DEPRECATED - must use `ami_type` instead. Will be removed in `v21.0`]" type = string - default = "linux" + default = null + + validation { + condition = var.platform == null + error_message = "`platform` is no longer valid due to the number of OS choices. Please provide an [`ami_type`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-nodegroup.html#cfn-eks-nodegroup-amitype) instead." + } } ################################################################################ @@ -38,20 +44,38 @@ variable "cluster_auth_base64" { default = "" } +variable "cluster_service_cidr" { + description = "The CIDR block (IPv4 or IPv6) used by the cluster to assign Kubernetes service IP addresses. This is derived from the cluster itself" + type = string + default = "" +} + +variable "cluster_ip_family" { + description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`" + type = string + default = "ipv4" +} + +variable "additional_cluster_dns_ips" { + description = "Additional DNS IP addresses to use for the cluster. Only used when `ami_type` = `BOTTLEROCKET_*`" + type = list(string) + default = [] +} + variable "pre_bootstrap_user_data" { - description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`" + description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`" type = string default = "" } variable "post_bootstrap_user_data" { - description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket`" + description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `ami_type` = `BOTTLEROCKET_*`" type = string default = "" } variable "bootstrap_extra_args" { - description = "Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data" + description = "Additional arguments passed to the bootstrap script. When `ami_type` = `BOTTLEROCKET_*`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data" type = string default = "" } @@ -62,6 +86,28 @@ variable "user_data_template_path" { default = "" } +variable "cloudinit_pre_nodeadm" { + description = "Array of cloud-init document parts that are created before the nodeadm document part" + type = list(object({ + content = string + content_type = optional(string) + filename = optional(string) + merge_type = optional(string) + })) + default = [] +} + +variable "cloudinit_post_nodeadm" { + description = "Array of cloud-init document parts that are created after the nodeadm document part" + type = list(object({ + content = string + content_type = optional(string) + filename = optional(string) + merge_type = optional(string) + })) + default = [] +} + ################################################################################ # Launch template ################################################################################ @@ -228,6 +274,12 @@ variable "ami_id" { default = "" } +variable "ami_type" { + description = "Type of Amazon Machine Image (AMI) associated with the node group. See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/APIReference/API_Nodegroup.html#AmazonEKS-Type-Nodegroup-amiType) for valid values" + type = string + default = "AL2_x86_64" +} + variable "cluster_version" { description = "Kubernetes cluster version - used to lookup default AMI ID if one is not provided" type = string @@ -270,6 +322,12 @@ variable "enable_monitoring" { default = true } +variable "enable_efa_support" { + description = "Determines whether to enable Elastic Fabric Adapter (EFA) support" + type = bool + default = false +} + variable "metadata_options" { description = "Customize the metadata options for the instance" type = map(string) @@ -476,6 +534,12 @@ variable "initial_lifecycle_hooks" { default = [] } +variable "instance_maintenance_policy" { + description = "If this block is configured, add a instance maintenance policy to the specified Auto Scaling group" + type = any + default = {} +} + variable "instance_refresh" { description = "If this block is configured, start an Instance Refresh when this Auto Scaling Group is updated" type = any @@ -517,22 +581,6 @@ variable "autoscaling_group_tags" { default = {} } -################################################################################ -# Autoscaling group schedule -################################################################################ - -variable "create_schedule" { - description = "Determines whether to create autoscaling group schedule or not" - type = bool - default = true -} - -variable "schedules" { - description = "Map of autoscaling group schedule to create" - type = map(any) - default = {} -} - ################################################################################ # IAM Role ################################################################################ @@ -543,12 +591,6 @@ variable "create_iam_instance_profile" { default = true } -variable "cluster_ip_family" { - description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`" - type = string - default = null -} - variable "iam_instance_profile_arn" { description = "Amazon Resource Name (ARN) of an existing IAM instance profile that provides permissions for the node group. Required if `create_iam_instance_profile` = `false`" type = string @@ -602,3 +644,35 @@ variable "iam_role_tags" { type = map(string) default = {} } + +################################################################################ +# Access Entry +################################################################################ + +variable "create_access_entry" { + description = "Determines whether an access entry is created for the IAM role used by the nodegroup" + type = bool + default = true +} + +variable "iam_role_arn" { + description = "ARN of the IAM role used by the instance profile. Required when `create_access_entry = true` and `create_iam_instance_profile = false`" + type = string + default = null +} + +################################################################################ +# Autoscaling group schedule +################################################################################ + +variable "create_schedule" { + description = "Determines whether to create autoscaling group schedule or not" + type = bool + default = true +} + +variable "schedules" { + description = "Map of autoscaling group schedule to create" + type = map(any) + default = {} +} diff --git a/modules/self-managed-node-group/versions.tf b/modules/self-managed-node-group/versions.tf index 01d187af62..6f83215f50 100644 --- a/modules/self-managed-node-group/versions.tf +++ b/modules/self-managed-node-group/versions.tf @@ -1,10 +1,10 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.57" + version = ">= 5.40" } } } diff --git a/node_groups.tf b/node_groups.tf index db78861a81..7228931071 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -17,6 +17,8 @@ locals { min_healthy_percentage = 66 } } + + kubernetes_network_config = try(aws_eks_cluster.this[0].kubernetes_network_config[0], {}) } # This sleep resource is used to provide a timed gap between the cluster creation and the downstream dependencies @@ -30,9 +32,10 @@ resource "time_sleep" "this" { create_duration = var.dataplane_wait_duration triggers = { - cluster_name = aws_eks_cluster.this[0].name - cluster_endpoint = aws_eks_cluster.this[0].endpoint - cluster_version = aws_eks_cluster.this[0].version + cluster_name = aws_eks_cluster.this[0].name + cluster_endpoint = aws_eks_cluster.this[0].endpoint + cluster_version = aws_eks_cluster.this[0].version + cluster_service_cidr = var.cluster_ip_family == "ipv6" ? try(local.kubernetes_network_config.service_ipv6_cidr, "") : try(local.kubernetes_network_config.service_ipv4_cidr, "") cluster_certificate_authority_data = aws_eks_cluster.this[0].certificate_authority[0].data } @@ -40,7 +43,6 @@ resource "time_sleep" "this" { ################################################################################ # EKS IPV6 CNI Policy -# TODO - hopefully AWS releases a managed policy which can replace this # https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy ################################################################################ @@ -62,7 +64,7 @@ data "aws_iam_policy_document" "cni_ipv6_policy" { statement { sid = "CreateTags" actions = ["ec2:CreateTags"] - resources = ["arn:${data.aws_partition.current.partition}:ec2:*:*:network-interface/*"] + resources = ["arn:${local.partition}:ec2:*:*:network-interface/*"] } } @@ -180,6 +182,27 @@ locals { ipv6_cidr_blocks = var.cluster_ip_family == "ipv6" ? ["::/0"] : null } } : k => v if var.node_security_group_enable_recommended_rules } + + efa_security_group_rules = { for k, v in + { + ingress_all_self_efa = { + description = "Node to node EFA" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "ingress" + self = true + } + egress_all_self_efa = { + description = "Node to node EFA" + protocol = "-1" + from_port = 0 + to_port = 0 + type = "egress" + self = true + } + } : k => v if var.enable_efa_support + } } resource "aws_security_group" "node" { @@ -206,6 +229,7 @@ resource "aws_security_group" "node" { resource "aws_security_group_rule" "node" { for_each = { for k, v in merge( + local.efa_security_group_rules, local.node_security_group_rules, local.node_security_group_recommended_rules, var.node_security_group_additional_rules, @@ -274,9 +298,8 @@ module "eks_managed_node_group" { create = try(each.value.create, true) - cluster_name = time_sleep.this[0].triggers["cluster_name"] - cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, time_sleep.this[0].triggers["cluster_version"]) - cluster_ip_family = var.cluster_ip_family + cluster_name = time_sleep.this[0].triggers["cluster_name"] + cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, time_sleep.this[0].triggers["cluster_version"]) # EKS Managed Node Group name = try(each.value.name, each.key) @@ -288,9 +311,10 @@ module "eks_managed_node_group" { max_size = try(each.value.max_size, var.eks_managed_node_group_defaults.max_size, 3) desired_size = try(each.value.desired_size, var.eks_managed_node_group_defaults.desired_size, 1) - ami_id = try(each.value.ami_id, var.eks_managed_node_group_defaults.ami_id, "") - ami_type = try(each.value.ami_type, var.eks_managed_node_group_defaults.ami_type, null) - ami_release_version = try(each.value.ami_release_version, var.eks_managed_node_group_defaults.ami_release_version, null) + ami_id = try(each.value.ami_id, var.eks_managed_node_group_defaults.ami_id, "") + ami_type = try(each.value.ami_type, var.eks_managed_node_group_defaults.ami_type, null) + ami_release_version = try(each.value.ami_release_version, var.eks_managed_node_group_defaults.ami_release_version, null) + use_latest_ami_release_version = try(each.value.use_latest_ami_release_version, var.eks_managed_node_group_defaults.use_latest_ami_release_version, false) capacity_type = try(each.value.capacity_type, var.eks_managed_node_group_defaults.capacity_type, null) disk_size = try(each.value.disk_size, var.eks_managed_node_group_defaults.disk_size, null) @@ -308,11 +332,15 @@ module "eks_managed_node_group" { cluster_endpoint = try(time_sleep.this[0].triggers["cluster_endpoint"], "") cluster_auth_base64 = try(time_sleep.this[0].triggers["cluster_certificate_authority_data"], "") cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr + cluster_ip_family = var.cluster_ip_family + cluster_service_cidr = try(time_sleep.this[0].triggers["cluster_service_cidr"], "") enable_bootstrap_user_data = try(each.value.enable_bootstrap_user_data, var.eks_managed_node_group_defaults.enable_bootstrap_user_data, false) pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.eks_managed_node_group_defaults.pre_bootstrap_user_data, "") post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.eks_managed_node_group_defaults.post_bootstrap_user_data, "") bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.eks_managed_node_group_defaults.bootstrap_extra_args, "") user_data_template_path = try(each.value.user_data_template_path, var.eks_managed_node_group_defaults.user_data_template_path, "") + cloudinit_pre_nodeadm = try(each.value.cloudinit_pre_nodeadm, var.eks_managed_node_group_defaults.cloudinit_pre_nodeadm, []) + cloudinit_post_nodeadm = try(each.value.cloudinit_post_nodeadm, var.eks_managed_node_group_defaults.cloudinit_post_nodeadm, []) # Launch Template create_launch_template = try(each.value.create_launch_template, var.eks_managed_node_group_defaults.create_launch_template, true) @@ -344,6 +372,9 @@ module "eks_managed_node_group" { license_specifications = try(each.value.license_specifications, var.eks_managed_node_group_defaults.license_specifications, {}) metadata_options = try(each.value.metadata_options, var.eks_managed_node_group_defaults.metadata_options, local.metadata_options) enable_monitoring = try(each.value.enable_monitoring, var.eks_managed_node_group_defaults.enable_monitoring, true) + enable_efa_support = try(each.value.enable_efa_support, var.eks_managed_node_group_defaults.enable_efa_support, false) + create_placement_group = try(each.value.create_placement_group, var.eks_managed_node_group_defaults.create_placement_group, false) + placement_group_strategy = try(each.value.placement_group_strategy, var.eks_managed_node_group_defaults.placement_group_strategy, "cluster") network_interfaces = try(each.value.network_interfaces, var.eks_managed_node_group_defaults.network_interfaces, []) placement = try(each.value.placement, var.eks_managed_node_group_defaults.placement, {}) maintenance_options = try(each.value.maintenance_options, var.eks_managed_node_group_defaults.maintenance_options, {}) @@ -363,6 +394,7 @@ module "eks_managed_node_group" { # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.eks_managed_node_group_defaults, "iam_role_additional_policies", {})) + # Autoscaling group schedule create_schedule = try(each.value.create_schedule, var.eks_managed_node_group_defaults.create_schedule, true) schedules = try(each.value.schedules, var.eks_managed_node_group_defaults.schedules, {}) @@ -384,8 +416,7 @@ module "self_managed_node_group" { create = try(each.value.create, true) - cluster_name = time_sleep.this[0].triggers["cluster_name"] - cluster_ip_family = var.cluster_ip_family + cluster_name = time_sleep.this[0].triggers["cluster_name"] # Autoscaling Group create_autoscaling_group = try(each.value.create_autoscaling_group, var.self_managed_node_group_defaults.create_autoscaling_group, true) @@ -423,26 +454,30 @@ module "self_managed_node_group" { metrics_granularity = try(each.value.metrics_granularity, var.self_managed_node_group_defaults.metrics_granularity, null) service_linked_role_arn = try(each.value.service_linked_role_arn, var.self_managed_node_group_defaults.service_linked_role_arn, null) - initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_managed_node_group_defaults.initial_lifecycle_hooks, []) - instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, local.default_instance_refresh) - use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_managed_node_group_defaults.use_mixed_instances_policy, false) - mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_managed_node_group_defaults.mixed_instances_policy, null) - warm_pool = try(each.value.warm_pool, var.self_managed_node_group_defaults.warm_pool, {}) - - create_schedule = try(each.value.create_schedule, var.self_managed_node_group_defaults.create_schedule, true) - schedules = try(each.value.schedules, var.self_managed_node_group_defaults.schedules, {}) + initial_lifecycle_hooks = try(each.value.initial_lifecycle_hooks, var.self_managed_node_group_defaults.initial_lifecycle_hooks, []) + instance_maintenance_policy = try(each.value.instance_maintenance_policy, var.self_managed_node_group_defaults.instance_maintenance_policy, {}) + instance_refresh = try(each.value.instance_refresh, var.self_managed_node_group_defaults.instance_refresh, local.default_instance_refresh) + use_mixed_instances_policy = try(each.value.use_mixed_instances_policy, var.self_managed_node_group_defaults.use_mixed_instances_policy, false) + mixed_instances_policy = try(each.value.mixed_instances_policy, var.self_managed_node_group_defaults.mixed_instances_policy, null) + warm_pool = try(each.value.warm_pool, var.self_managed_node_group_defaults.warm_pool, {}) delete_timeout = try(each.value.delete_timeout, var.self_managed_node_group_defaults.delete_timeout, null) autoscaling_group_tags = try(each.value.autoscaling_group_tags, var.self_managed_node_group_defaults.autoscaling_group_tags, {}) # User data - platform = try(each.value.platform, var.self_managed_node_group_defaults.platform, "linux") + platform = try(each.value.platform, var.self_managed_node_group_defaults.platform, null) + # TODO - update this when `var.platform` is removed in v21.0 + ami_type = try(each.value.ami_type, var.self_managed_node_group_defaults.ami_type, "AL2_x86_64") cluster_endpoint = try(time_sleep.this[0].triggers["cluster_endpoint"], "") cluster_auth_base64 = try(time_sleep.this[0].triggers["cluster_certificate_authority_data"], "") + cluster_service_cidr = try(time_sleep.this[0].triggers["cluster_service_cidr"], "") + cluster_ip_family = var.cluster_ip_family pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.self_managed_node_group_defaults.pre_bootstrap_user_data, "") post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.self_managed_node_group_defaults.post_bootstrap_user_data, "") bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.self_managed_node_group_defaults.bootstrap_extra_args, "") user_data_template_path = try(each.value.user_data_template_path, var.self_managed_node_group_defaults.user_data_template_path, "") + cloudinit_pre_nodeadm = try(each.value.cloudinit_pre_nodeadm, var.self_managed_node_group_defaults.cloudinit_pre_nodeadm, []) + cloudinit_post_nodeadm = try(each.value.cloudinit_post_nodeadm, var.self_managed_node_group_defaults.cloudinit_post_nodeadm, []) # Launch Template create_launch_template = try(each.value.create_launch_template, var.self_managed_node_group_defaults.create_launch_template, true) @@ -480,6 +515,7 @@ module "self_managed_node_group" { license_specifications = try(each.value.license_specifications, var.self_managed_node_group_defaults.license_specifications, {}) metadata_options = try(each.value.metadata_options, var.self_managed_node_group_defaults.metadata_options, local.metadata_options) enable_monitoring = try(each.value.enable_monitoring, var.self_managed_node_group_defaults.enable_monitoring, true) + enable_efa_support = try(each.value.enable_efa_support, var.self_managed_node_group_defaults.enable_efa_support, false) network_interfaces = try(each.value.network_interfaces, var.self_managed_node_group_defaults.network_interfaces, []) placement = try(each.value.placement, var.self_managed_node_group_defaults.placement, {}) maintenance_options = try(each.value.maintenance_options, var.self_managed_node_group_defaults.maintenance_options, {}) @@ -499,6 +535,14 @@ module "self_managed_node_group" { # https://github.com/hashicorp/terraform/issues/31646#issuecomment-1217279031 iam_role_additional_policies = lookup(each.value, "iam_role_additional_policies", lookup(var.self_managed_node_group_defaults, "iam_role_additional_policies", {})) + # Access entry + create_access_entry = try(each.value.create_access_entry, var.self_managed_node_group_defaults.create_access_entry, true) + iam_role_arn = try(each.value.iam_role_arn, var.self_managed_node_group_defaults.iam_role_arn, null) + + # Autoscaling group schedule + create_schedule = try(each.value.create_schedule, var.self_managed_node_group_defaults.create_schedule, true) + schedules = try(each.value.schedules, var.self_managed_node_group_defaults.schedules, {}) + # Security group vpc_security_group_ids = compact(concat([local.node_security_group_id], try(each.value.vpc_security_group_ids, var.self_managed_node_group_defaults.vpc_security_group_ids, []))) cluster_primary_security_group_id = try(each.value.attach_cluster_primary_security_group, var.self_managed_node_group_defaults.attach_cluster_primary_security_group, false) ? aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id : null diff --git a/outputs.tf b/outputs.tf index ea02a3a8cc..45b68a4a23 100644 --- a/outputs.tf +++ b/outputs.tf @@ -5,16 +5,31 @@ output "cluster_arn" { description = "The Amazon Resource Name (ARN) of the cluster" value = try(aws_eks_cluster.this[0].arn, null) + + depends_on = [ + aws_eks_access_entry.this, + aws_eks_access_policy_association.this, + ] } output "cluster_certificate_authority_data" { description = "Base64 encoded certificate data required to communicate with the cluster" value = try(aws_eks_cluster.this[0].certificate_authority[0].data, null) + + depends_on = [ + aws_eks_access_entry.this, + aws_eks_access_policy_association.this, + ] } output "cluster_endpoint" { description = "Endpoint for your Kubernetes API server" value = try(aws_eks_cluster.this[0].endpoint, null) + + depends_on = [ + aws_eks_access_entry.this, + aws_eks_access_policy_association.this, + ] } output "cluster_id" { @@ -25,6 +40,11 @@ output "cluster_id" { output "cluster_name" { description = "The name of the EKS cluster" value = try(aws_eks_cluster.this[0].name, "") + + depends_on = [ + aws_eks_access_entry.this, + aws_eks_access_policy_association.this, + ] } output "cluster_oidc_issuer_url" { @@ -52,6 +72,30 @@ output "cluster_primary_security_group_id" { value = try(aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id, null) } +output "cluster_service_cidr" { + description = "The CIDR block where Kubernetes pod and service IP addresses are assigned from" + value = var.cluster_ip_family == "ipv6" ? try(aws_eks_cluster.this[0].kubernetes_network_config[0].service_ipv6_cidr, null) : try(aws_eks_cluster.this[0].kubernetes_network_config[0].service_ipv4_cidr, null) +} + +output "cluster_ip_family" { + description = "The IP family used by the cluster (e.g. `ipv4` or `ipv6`)" + value = try(aws_eks_cluster.this[0].kubernetes_network_config[0].ip_family, null) +} + +################################################################################ +# Access Entry +################################################################################ + +output "access_entries" { + description = "Map of access entries created and their attributes" + value = aws_eks_access_entry.this +} + +output "access_policy_associations" { + description = "Map of eks cluster access policy associations created and their attributes" + value = aws_eks_access_policy_association.this +} + ################################################################################ # KMS Key ################################################################################ @@ -205,19 +249,3 @@ output "self_managed_node_groups_autoscaling_group_names" { description = "List of the autoscaling group names created by self-managed node groups" value = compact([for group in module.self_managed_node_group : group.autoscaling_group_name]) } - -################################################################################ -# Additional -################################################################################ - -output "aws_auth_configmap_yaml" { - description = "[DEPRECATED - use `var.manage_aws_auth_configmap`] Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" - value = templatefile("${path.module}/templates/aws_auth_cm.tpl", - { - eks_managed_role_arns = distinct(compact([for group in module.eks_managed_node_group : group.iam_role_arn])) - self_managed_role_arns = distinct(compact([for group in module.self_managed_node_group : group.iam_role_arn if group.platform != "windows"])) - win32_self_managed_role_arns = distinct(compact([for group in module.self_managed_node_group : group.iam_role_arn if group.platform == "windows"])) - fargate_profile_pod_execution_role_arns = distinct(compact([for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn])) - } - ) -} diff --git a/templates/al2023_user_data.tpl b/templates/al2023_user_data.tpl new file mode 100644 index 0000000000..cc360e6d65 --- /dev/null +++ b/templates/al2023_user_data.tpl @@ -0,0 +1,11 @@ +%{ if enable_bootstrap_user_data ~} +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + name: ${cluster_name} + apiServerEndpoint: ${cluster_endpoint} + certificateAuthority: ${cluster_auth_base64} + cidr: ${cluster_service_cidr} +%{ endif ~} diff --git a/templates/aws_auth_cm.tpl b/templates/aws_auth_cm.tpl deleted file mode 100644 index 73a898e966..0000000000 --- a/templates/aws_auth_cm.tpl +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: aws-auth - namespace: kube-system -data: - mapRoles: | -%{ for role in eks_managed_role_arns ~} - - rolearn: ${role} - username: system:node:{{EC2PrivateDNSName}} - groups: - - system:bootstrappers - - system:nodes -%{ endfor ~} -%{ for role in self_managed_role_arns ~} - - rolearn: ${role} - username: system:node:{{EC2PrivateDNSName}} - groups: - - system:bootstrappers - - system:nodes -%{ endfor ~} -%{ for role in win32_self_managed_role_arns ~} - - rolearn: ${role} - username: system:node:{{EC2PrivateDNSName}} - groups: - - eks:kube-proxy-windows - - system:bootstrappers - - system:nodes -%{ endfor ~} -%{ for role in fargate_profile_pod_execution_role_arns ~} - - rolearn: ${role} - username: system:node:{{SessionName}} - groups: - - system:bootstrappers - - system:nodes - - system:node-proxier -%{ endfor ~} diff --git a/templates/bottlerocket_user_data.tpl b/templates/bottlerocket_user_data.tpl index 640c801438..666d666069 100644 --- a/templates/bottlerocket_user_data.tpl +++ b/templates/bottlerocket_user_data.tpl @@ -3,5 +3,6 @@ "cluster-name" = "${cluster_name}" "api-server" = "${cluster_endpoint}" "cluster-certificate" = "${cluster_auth_base64}" +"cluster-dns-ip" = ${cluster_dns_ips} %{ endif ~} ${bootstrap_extra_args ~} diff --git a/templates/linux_user_data.tpl b/templates/linux_user_data.tpl index 14acbd2aff..d75d549ccc 100644 --- a/templates/linux_user_data.tpl +++ b/templates/linux_user_data.tpl @@ -3,12 +3,10 @@ set -e %{ endif ~} ${pre_bootstrap_user_data ~} -%{ if length(cluster_service_ipv4_cidr) > 0 ~} -export SERVICE_IPV4_CIDR=${cluster_service_ipv4_cidr} -%{ endif ~} %{ if enable_bootstrap_user_data ~} B64_CLUSTER_CA=${cluster_auth_base64} API_SERVER_URL=${cluster_endpoint} -/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL +/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL \ + --ip-family ${cluster_ip_family} --service-${cluster_ip_family}-cidr ${cluster_service_cidr} ${post_bootstrap_user_data ~} %{ endif ~} diff --git a/templates/windows_user_data.tpl b/templates/windows_user_data.tpl index 5000850604..9721d3cc33 100644 --- a/templates/windows_user_data.tpl +++ b/templates/windows_user_data.tpl @@ -1,5 +1,8 @@ +%{ if enable_bootstrap_user_data ~} +%{ endif ~} ${pre_bootstrap_user_data ~} +%{ if enable_bootstrap_user_data ~} [string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS" [string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1' [string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName" @@ -7,3 +10,4 @@ ${pre_bootstrap_user_data ~} $LastError = if ($?) { 0 } else { $Error[0].Exception.HResult } ${post_bootstrap_user_data ~} +%{ endif ~} diff --git a/variables.tf b/variables.tf index 988b97970c..639110a9fc 100644 --- a/variables.tf +++ b/variables.tf @@ -1,5 +1,5 @@ variable "create" { - description = "Controls if EKS resources should be created (affects nearly all resources)" + description = "Controls if resources should be created (affects nearly all resources)" type = bool default = true } @@ -38,6 +38,12 @@ variable "cluster_enabled_log_types" { default = ["audit", "api", "authenticator"] } +variable "authentication_mode" { + description = "The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP`" + type = string + default = "API_AND_CONFIG_MAP" +} + variable "cluster_additional_security_group_ids" { description = "List of additional, externally created security group IDs to attach to the cluster control plane" type = list(string) @@ -77,7 +83,7 @@ variable "cluster_endpoint_public_access_cidrs" { variable "cluster_ip_family" { description = "The IP family used to assign Kubernetes pod and service addresses. Valid values are `ipv4` (default) and `ipv6`. You can only specify an IP family when you create a cluster, changing this value will force a new cluster to be created" type = string - default = null + default = "ipv4" } variable "cluster_service_ipv4_cidr" { @@ -130,6 +136,22 @@ variable "cluster_timeouts" { default = {} } +################################################################################ +# Access Entry +################################################################################ + +variable "access_entries" { + description = "Map of access entries to add to the cluster" + type = any + default = {} +} + +variable "enable_cluster_creator_admin_permissions" { + description = "Indicates whether or not to add the cluster creator (the identity used by Terraform) as an administrator via access entry" + type = bool + default = false +} + ################################################################################ # KMS Key ################################################################################ @@ -153,15 +175,15 @@ variable "kms_key_deletion_window_in_days" { } variable "enable_kms_key_rotation" { - description = "Specifies whether key rotation is enabled. Defaults to `true`" + description = "Specifies whether key rotation is enabled" type = bool default = true } variable "kms_key_enable_default_policy" { - description = "Specifies whether to enable the default key policy. Defaults to `false`" + description = "Specifies whether to enable the default key policy" type = bool - default = false + default = true } variable "kms_key_owners" { @@ -228,6 +250,18 @@ variable "cloudwatch_log_group_kms_key_id" { default = null } +variable "cloudwatch_log_group_class" { + description = "Specified the log class of the log group. Possible values are: `STANDARD` or `INFREQUENT_ACCESS`" + type = string + default = null +} + +variable "cloudwatch_log_group_tags" { + description = "A map of additional tags to add to the cloudwatch log group created" + type = map(string) + default = {} +} + ################################################################################ # Cluster Security Group ################################################################################ @@ -342,6 +376,12 @@ variable "node_security_group_tags" { default = {} } +variable "enable_efa_support" { + description = "Determines whether to enable Elastic Fabric Adapter (EFA) support" + type = bool + default = false +} + ################################################################################ # IRSA ################################################################################ @@ -358,6 +398,12 @@ variable "openid_connect_audiences" { default = [] } +variable "include_oidc_root_ca_thumbprint" { + description = "Determines whether to include the root CA thumbprint in the OpenID Connect (OIDC) identity provider's server certificate(s)" + type = bool + default = true +} + variable "custom_oidc_thumbprints" { description = "Additional list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s)" type = list(string) @@ -416,14 +462,6 @@ variable "iam_role_additional_policies" { default = {} } -# TODO - hopefully this can be removed once the AWS endpoint is named properly in China -# https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1904 -variable "cluster_iam_role_dns_suffix" { - description = "Base DNS domain name for the current partition (e.g., amazonaws.com in AWS Commercial, amazonaws.com.cn in AWS China)" - type = string - default = null -} - variable "iam_role_tags" { description = "A map of additional tags to add to the IAM role created" type = map(string) @@ -545,55 +583,3 @@ variable "putin_khuylo" { type = bool default = true } - -################################################################################ -# aws-auth configmap -################################################################################ - -variable "manage_aws_auth_configmap" { - description = "Determines whether to manage the aws-auth configmap" - type = bool - default = false -} - -variable "create_aws_auth_configmap" { - description = "Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the configmap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap`" - type = bool - default = false -} - -variable "aws_auth_node_iam_role_arns_non_windows" { - description = "List of non-Windows based node IAM role ARNs to add to the aws-auth configmap" - type = list(string) - default = [] -} - -variable "aws_auth_node_iam_role_arns_windows" { - description = "List of Windows based node IAM role ARNs to add to the aws-auth configmap" - type = list(string) - default = [] -} - -variable "aws_auth_fargate_profile_pod_execution_role_arns" { - description = "List of Fargate profile pod execution role ARNs to add to the aws-auth configmap" - type = list(string) - default = [] -} - -variable "aws_auth_roles" { - description = "List of role maps to add to the aws-auth configmap" - type = list(any) - default = [] -} - -variable "aws_auth_users" { - description = "List of user maps to add to the aws-auth configmap" - type = list(any) - default = [] -} - -variable "aws_auth_accounts" { - description = "List of account maps to add to the aws-auth configmap" - type = list(any) - default = [] -} diff --git a/versions.tf b/versions.tf index 1dcaa257ea..d0f347a88a 100644 --- a/versions.tf +++ b/versions.tf @@ -1,19 +1,15 @@ terraform { - required_version = ">= 1.2.0" + required_version = ">= 1.3.2" required_providers { aws = { source = "hashicorp/aws" - version = ">= 4.47" + version = ">= 5.40" } tls = { source = "hashicorp/tls" version = ">= 3.0" } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.10" - } time = { source = "hashicorp/time" version = ">= 0.9"