From ecec21044a6236eccc52046e009b3eb312f4a714 Mon Sep 17 00:00:00 2001 From: Richard Case Date: Sat, 7 Aug 2021 14:38:01 +0100 Subject: [PATCH] feat: graduate eks The EKS functionality is graduated out of experimental with this change. This means it will be enabled by default. As part of the graduation the separate controlplane and bootstrap providers have been removed and merged into the main AWS infrastructure provider. clusterawsadm has been changed so that EKS is enabled by default. This ensures that the correct pre-requisites are enabled by default. The option in the bootstrap config to enable EKS have been changed to a disable option. The docs have also been updated to reflect the fact that EKS is enabled by default. Fargate profiles is now enabled via the **EKSFargate** feature flag and remains experimental. Signed-off-by: Richard Case --- Makefile | 127 +-------- .../eks/config/certmanager/certificate.yaml | 26 -- .../eks/config/certmanager/kustomization.yaml | 5 - .../config/certmanager/kustomizeconfig.yaml | 19 -- bootstrap/eks/config/crd/kustomization.yaml | 28 -- bootstrap/eks/config/crd/kustomizeconfig.yaml | 17 -- .../eks/config/default/kustomization.yaml | 59 ----- .../eks/config/default/kustomizeconfig.yaml | 4 - .../default/manager_auth_proxy_patch.yaml | 21 -- .../eks/config/default/manager_iam_patch.yaml | 12 - .../config/default/manager_image_patch.yaml | 11 - .../config/default/manager_pull_policy.yaml | 11 - .../manager_service_account_patch.yaml | 11 - .../config/default/manager_webhook_patch.yaml | 23 -- bootstrap/eks/config/default/namespace.yaml | 6 - .../default/webhookcainjection_patch.yaml | 7 - .../eks/config/manager/kustomization.yaml | 2 - bootstrap/eks/config/manager/manager.yaml | 45 ---- .../config/manager/manager_args_patch.yaml | 16 -- .../rbac/auth_proxy_client_clusterrole.yaml | 7 - .../eks/config/rbac/auth_proxy_role.yaml | 13 - .../config/rbac/auth_proxy_role_binding.yaml | 12 - .../eks/config/rbac/auth_proxy_service.yaml | 14 - .../config/rbac/eksconfig_editor_role.yaml | 24 -- .../config/rbac/eksconfig_viewer_role.yaml | 20 -- .../rbac/eksconfigtemplate_editor_role.yaml | 24 -- .../rbac/eksconfigtemplate_viewer_role.yaml | 20 -- bootstrap/eks/config/rbac/kustomization.yaml | 13 - .../eks/config/rbac/leader_election_role.yaml | 44 ---- .../rbac/leader_election_role_binding.yaml | 12 - bootstrap/eks/config/rbac/role.yaml | 65 ----- bootstrap/eks/config/rbac/role_binding.yaml | 12 - bootstrap/eks/config/rbac/serviceaccount.yaml | 11 - .../eks/config/webhook/kustomization.yaml | 6 - .../eks/config/webhook/kustomizeconfig.yaml | 25 -- bootstrap/eks/config/webhook/manifests.yaml | 100 ------- bootstrap/eks/config/webhook/service.yaml | 10 - bootstrap/eks/controllers/suite_test.go | 1 - bootstrap/eks/main.go | 189 -------------- .../api/bootstrap/v1alpha1/defaults.go | 6 +- .../api/bootstrap/v1alpha1/types.go | 4 +- .../bootstrap/cluster_api_controller.go | 2 +- .../bootstrap/cluster_api_node.go | 2 +- .../bootstrap/fixtures/customsuffix.yaml | 108 ++++++++ .../bootstrap/fixtures/default.yaml | 108 ++++++++ .../fixtures/with_all_secret_backends.yaml | 108 ++++++++ .../fixtures/with_bootstrap_user.yaml | 108 ++++++++ .../fixtures/with_custom_bootstrap_user.yaml | 108 ++++++++ .../with_different_instance_profiles.yaml | 108 ++++++++ ..._eks_enable.yaml => with_eks_disable.yaml} | 94 ------- .../fixtures/with_eks_kms_prefix.yaml | 15 ++ .../fixtures/with_extra_statements.yaml | 108 ++++++++ .../fixtures/with_ssm_secret_backend.yaml | 108 ++++++++ .../cloudformation/bootstrap/template.go | 6 +- .../cloudformation/bootstrap/template_test.go | 19 +- ...bootstrap.cluster.x-k8s.io_eksconfigs.yaml | 0 ...p.cluster.x-k8s.io_eksconfigtemplates.yaml | 0 ...ster.x-k8s.io_awsmanagedcontrolplanes.yaml | 0 ...e.cluster.x-k8s.io_awsmanagedclusters.yaml | 87 ------- config/crd/kustomization.yaml | 10 +- ...ainjection_in_awsmanagedcontrolplanes.yaml | 0 .../patches/cainjection_in_eksconfigs.yaml | 0 .../cainjection_in_eksconfigtemplates.yaml | 0 .../webhook_in_awsmanagedcontrolplanes.yaml | 0 .../crd/patches/webhook_in_eksconfigs.yaml | 0 .../webhook_in_eksconfigtemplates.yaml | 0 config/manager/manager.yaml | 2 +- config/rbac/role.yaml | 99 ++++++- config/webhook/manifests.yaml | 112 +++++++- controlplane/eks/api/v1alpha4/suite_test.go | 4 +- .../eks/config/certmanager/certificate.yaml | 26 -- .../eks/config/certmanager/kustomization.yaml | 5 - .../config/certmanager/kustomizeconfig.yaml | 19 -- .../eks/config/crd/kustomization.yaml | 25 -- .../eks/config/crd/kustomizeconfig.yaml | 17 -- .../eks/config/default/credentials.yaml | 8 - .../eks/config/default/kustomization.yaml | 58 ----- .../eks/config/default/kustomizeconfig.yaml | 4 - .../default/manager_auth_proxy_patch.yaml | 21 -- .../default/manager_credentials_patch.yaml | 20 -- .../eks/config/default/manager_iam_patch.yaml | 12 - .../config/default/manager_image_patch.yaml | 11 - .../config/default/manager_pull_policy.yaml | 11 - .../manager_service_account_patch.yaml | 11 - .../config/default/manager_webhook_patch.yaml | 23 -- .../eks/config/default/namespace.yaml | 6 - .../default/webhookcainjection_patch.yaml | 13 - .../eks/config/manager/kustomization.yaml | 2 - controlplane/eks/config/manager/manager.yaml | 45 ---- .../config/manager/manager_args_patch.yaml | 17 -- .../eks/config/rbac/auth_proxy_role.yaml | 13 - .../config/rbac/auth_proxy_role_binding.yaml | 12 - .../eks/config/rbac/auth_proxy_service.yaml | 14 - .../awsmanagedcontrolplane_editor_role.yaml | 24 -- .../awsmanagedcontrolplane_viewer_role.yaml | 20 -- .../eks/config/rbac/kustomization.yaml | 12 - .../eks/config/rbac/leader_election_role.yaml | 44 ---- .../rbac/leader_election_role_binding.yaml | 12 - controlplane/eks/config/rbac/role.yaml | 113 -------- .../eks/config/rbac/role_binding.yaml | 12 - .../eks/config/rbac/serviceaccount.yaml | 9 - .../eks/config/webhook/kustomization.yaml | 6 - .../eks/config/webhook/kustomizeconfig.yaml | 25 -- .../eks/config/webhook/manifests.yaml | 58 ----- controlplane/eks/config/webhook/service.yaml | 10 - .../awsmanagedcontrolplane_controller.go | 75 +----- controlplane/eks/main.go | 244 ------------------ docs/book/src/development/development.md | 43 +-- docs/book/src/topics/eks/disabling.md | 30 +++ docs/book/src/topics/eks/enabling.md | 64 +++-- docs/book/src/topics/eks/index.md | 14 +- docs/book/src/topics/eks/prerequisites.md | 2 +- ...-clusterawsadm-to-fulfill-prerequisites.md | 7 +- exp/api/v1alpha3/conversion.go | 28 -- exp/api/v1alpha3/conversion_test.go | 18 +- exp/api/v1alpha3/webhook_suite_test.go | 3 - exp/api/v1alpha3/zz_generated.conversion.go | 160 ------------ exp/api/v1alpha4/awsmanagecluster_webhook.go | 55 ---- exp/api/v1alpha4/awsmanagedcluster_types.go | 71 ----- exp/api/v1alpha4/conversion.go | 6 - exp/api/v1alpha4/zz_generated.deepcopy.go | 97 ------- .../awsmanagedcluster_controller.go | 189 -------------- feature/feature.go | 12 +- main.go | 108 +++++--- templates/cluster-template-eks-fargate.yaml | 6 +- test/e2e/data/e2e_eks_conf.yaml | 41 --- test/e2e/shared/identity.go | 18 -- test/e2e/shared/template.go | 2 +- tilt-provider.json | 27 +- 129 files changed, 1325 insertions(+), 3131 deletions(-) delete mode 100644 bootstrap/eks/config/certmanager/certificate.yaml delete mode 100644 bootstrap/eks/config/certmanager/kustomization.yaml delete mode 100644 bootstrap/eks/config/certmanager/kustomizeconfig.yaml delete mode 100644 bootstrap/eks/config/crd/kustomization.yaml delete mode 100644 bootstrap/eks/config/crd/kustomizeconfig.yaml delete mode 100644 bootstrap/eks/config/default/kustomization.yaml delete mode 100644 bootstrap/eks/config/default/kustomizeconfig.yaml delete mode 100644 bootstrap/eks/config/default/manager_auth_proxy_patch.yaml delete mode 100644 bootstrap/eks/config/default/manager_iam_patch.yaml delete mode 100644 bootstrap/eks/config/default/manager_image_patch.yaml delete mode 100644 bootstrap/eks/config/default/manager_pull_policy.yaml delete mode 100644 bootstrap/eks/config/default/manager_service_account_patch.yaml delete mode 100644 bootstrap/eks/config/default/manager_webhook_patch.yaml delete mode 100644 bootstrap/eks/config/default/namespace.yaml delete mode 100644 bootstrap/eks/config/default/webhookcainjection_patch.yaml delete mode 100644 bootstrap/eks/config/manager/kustomization.yaml delete mode 100644 bootstrap/eks/config/manager/manager.yaml delete mode 100644 bootstrap/eks/config/manager/manager_args_patch.yaml delete mode 100644 bootstrap/eks/config/rbac/auth_proxy_client_clusterrole.yaml delete mode 100644 bootstrap/eks/config/rbac/auth_proxy_role.yaml delete mode 100644 bootstrap/eks/config/rbac/auth_proxy_role_binding.yaml delete mode 100644 bootstrap/eks/config/rbac/auth_proxy_service.yaml delete mode 100644 bootstrap/eks/config/rbac/eksconfig_editor_role.yaml delete mode 100644 bootstrap/eks/config/rbac/eksconfig_viewer_role.yaml delete mode 100644 bootstrap/eks/config/rbac/eksconfigtemplate_editor_role.yaml delete mode 100644 bootstrap/eks/config/rbac/eksconfigtemplate_viewer_role.yaml delete mode 100644 bootstrap/eks/config/rbac/kustomization.yaml delete mode 100644 bootstrap/eks/config/rbac/leader_election_role.yaml delete mode 100644 bootstrap/eks/config/rbac/leader_election_role_binding.yaml delete mode 100644 bootstrap/eks/config/rbac/role.yaml delete mode 100644 bootstrap/eks/config/rbac/role_binding.yaml delete mode 100644 bootstrap/eks/config/rbac/serviceaccount.yaml delete mode 100644 bootstrap/eks/config/webhook/kustomization.yaml delete mode 100644 bootstrap/eks/config/webhook/kustomizeconfig.yaml delete mode 100644 bootstrap/eks/config/webhook/manifests.yaml delete mode 100644 bootstrap/eks/config/webhook/service.yaml delete mode 100644 bootstrap/eks/main.go rename cmd/clusterawsadm/cloudformation/bootstrap/fixtures/{with_eks_enable.yaml => with_eks_disable.yaml} (77%) rename {bootstrap/eks/config => config}/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml (100%) rename {bootstrap/eks/config => config}/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml (100%) rename {controlplane/eks/config => config}/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml (100%) rename {controlplane/eks/config => config}/crd/patches/cainjection_in_awsmanagedcontrolplanes.yaml (100%) rename {bootstrap/eks/config => config}/crd/patches/cainjection_in_eksconfigs.yaml (100%) rename {bootstrap/eks/config => config}/crd/patches/cainjection_in_eksconfigtemplates.yaml (100%) rename {controlplane/eks/config => config}/crd/patches/webhook_in_awsmanagedcontrolplanes.yaml (100%) rename {bootstrap/eks/config => config}/crd/patches/webhook_in_eksconfigs.yaml (100%) rename {bootstrap/eks/config => config}/crd/patches/webhook_in_eksconfigtemplates.yaml (100%) delete mode 100644 controlplane/eks/config/certmanager/certificate.yaml delete mode 100644 controlplane/eks/config/certmanager/kustomization.yaml delete mode 100644 controlplane/eks/config/certmanager/kustomizeconfig.yaml delete mode 100644 controlplane/eks/config/crd/kustomization.yaml delete mode 100644 controlplane/eks/config/crd/kustomizeconfig.yaml delete mode 100644 controlplane/eks/config/default/credentials.yaml delete mode 100644 controlplane/eks/config/default/kustomization.yaml delete mode 100644 controlplane/eks/config/default/kustomizeconfig.yaml delete mode 100644 controlplane/eks/config/default/manager_auth_proxy_patch.yaml delete mode 100644 controlplane/eks/config/default/manager_credentials_patch.yaml delete mode 100644 controlplane/eks/config/default/manager_iam_patch.yaml delete mode 100644 controlplane/eks/config/default/manager_image_patch.yaml delete mode 100644 controlplane/eks/config/default/manager_pull_policy.yaml delete mode 100644 controlplane/eks/config/default/manager_service_account_patch.yaml delete mode 100644 controlplane/eks/config/default/manager_webhook_patch.yaml delete mode 100644 controlplane/eks/config/default/namespace.yaml delete mode 100644 controlplane/eks/config/default/webhookcainjection_patch.yaml delete mode 100644 controlplane/eks/config/manager/kustomization.yaml delete mode 100644 controlplane/eks/config/manager/manager.yaml delete mode 100644 controlplane/eks/config/manager/manager_args_patch.yaml delete mode 100644 controlplane/eks/config/rbac/auth_proxy_role.yaml delete mode 100644 controlplane/eks/config/rbac/auth_proxy_role_binding.yaml delete mode 100644 controlplane/eks/config/rbac/auth_proxy_service.yaml delete mode 100644 controlplane/eks/config/rbac/awsmanagedcontrolplane_editor_role.yaml delete mode 100644 controlplane/eks/config/rbac/awsmanagedcontrolplane_viewer_role.yaml delete mode 100644 controlplane/eks/config/rbac/kustomization.yaml delete mode 100644 controlplane/eks/config/rbac/leader_election_role.yaml delete mode 100644 controlplane/eks/config/rbac/leader_election_role_binding.yaml delete mode 100644 controlplane/eks/config/rbac/role.yaml delete mode 100644 controlplane/eks/config/rbac/role_binding.yaml delete mode 100644 controlplane/eks/config/rbac/serviceaccount.yaml delete mode 100644 controlplane/eks/config/webhook/kustomization.yaml delete mode 100644 controlplane/eks/config/webhook/kustomizeconfig.yaml delete mode 100644 controlplane/eks/config/webhook/manifests.yaml delete mode 100644 controlplane/eks/config/webhook/service.yaml delete mode 100644 controlplane/eks/main.go create mode 100644 docs/book/src/topics/eks/disabling.md delete mode 100644 exp/api/v1alpha4/awsmanagecluster_webhook.go delete mode 100644 exp/api/v1alpha4/awsmanagedcluster_types.go delete mode 100644 exp/controllers/awsmanagedcluster_controller.go diff --git a/Makefile b/Makefile index 762e112419..dace75ec31 100644 --- a/Makefile +++ b/Makefile @@ -97,24 +97,6 @@ CORE_MANIFEST_FILE := infrastructure-components CORE_CONFIG_DIR := config/default CORE_NAMESPACE := capa-system -# bootstrap -EKS_BOOTSTRAP_IMAGE_NAME ?= eks-bootstrap-controller -EKS_BOOTSTRAP_CONTROLLER_IMG ?= $(REGISTRY)/$(EKS_BOOTSTRAP_IMAGE_NAME) -EKS_BOOTSTRAP_CONTROLLER_ORIGINAL_IMG := gcr.io/k8s-staging-cluster-api-aws/eks-bootstrap-controller -EKS_BOOTSTRAP_CONTROLLER_NAME := capa-eks-bootstrap-controller-manager -EKS_BOOTSTRAP_MANIFEST_FILE := eks-bootstrap-components -EKS_BOOTSTRAP_CONFIG_DIR := bootstrap/eks/config/default -EKS_BOOTSTRAP_NAMESPACE := capa-eks-bootstrap-system - -# bootstrap -EKS_CONTROLPLANE_IMAGE_NAME ?= eks-controlplane-controller -EKS_CONTROLPLANE_CONTROLLER_IMG ?= $(REGISTRY)/$(EKS_CONTROLPLANE_IMAGE_NAME) -EKS_CONTROLPLANE_CONTROLLER_ORIGINAL_IMG := gcr.io/k8s-staging-cluster-api-aws/eks-controlplane-controller -EKS_CONTROLPLANE_CONTROLLER_NAME := capa-eks-control-plane-controller-manager -EKS_CONTROLPLANE_MANIFEST_FILE := eks-controlplane-components -EKS_CONTROLPLANE_CONFIG_DIR := controlplane/eks/config/default -EKS_CONTROLPLANE_NAMESPACE := capa-eks-control-plane-system - # Allow overriding manifest generation destination directory MANIFEST_ROOT ?= config CRD_ROOT ?= $(MANIFEST_ROOT)/crd/bases @@ -207,8 +189,6 @@ test-e2e-eks: $(GINKGO) $(KIND) $(SSM_PLUGIN) $(KUSTOMIZE) e2e-image ## Run eks .PHONY: e2e-image e2e-image: docker-pull-prerequisites docker build -f Dockerfile --tag="gcr.io/k8s-staging-cluster-api/capa-manager:e2e" . - docker build -f Dockerfile --tag="gcr.io/k8s-staging-cluster-api/capa-eks-bootstrap-manager:e2e" --build-arg package=./bootstrap/eks . - docker build -f Dockerfile --tag="gcr.io/k8s-staging-cluster-api/capa-eks-controlplane-manager:e2e" --build-arg package=./controlplane/eks . CONFORMANCE_E2E_ARGS ?= -kubetest.config-file=$(KUBETEST_CONF_PATH) CONFORMANCE_E2E_ARGS += $(E2E_ARGS) @@ -231,22 +211,11 @@ binaries: managers clusterawsadm ## Builds and installs all binaries .PHONY: managers managers: $(MAKE) manager-aws-infrastructure - $(MAKE) manager-eks-bootstrap - $(MAKE) manager-eks-controlplane .PHONY: manager-aws-infrastructure manager-aws-infrastructure: ## Build manager binary. CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "${LDFLAGS} -extldflags '-static'" -o $(BIN_DIR)/manager . -.PHONY: manager-eks-bootstrap -manager-eks-bootstrap: - go build -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/eks-bootstrap-manager sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks - -.PHONY: manager-eks-controlplane -manager-eks-controlplane: - go build -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/eks-controlplane-manager sigs.k8s.io/cluster-api-provider-aws/controlplane/eks - - .PHONY: clusterawsadm clusterawsadm: ## Build clusterawsadm binary. go build -ldflags "$(LDFLAGS)" -o $(BIN_DIR)/clusterawsadm ./cmd/clusterawsadm @@ -331,16 +300,12 @@ generate-go-eks-controlplane: $(CONTROLLER_GEN) $(CONVERSION_GEN) --go-header-file=./hack/boilerplate/boilerplate.generatego.txt .PHONY: generate-manifests -generate-manifests: - $(MAKE) generate-core-manifests - $(MAKE) generate-eks-bootstrap-manifests - $(MAKE) generate-eks-controlplane-manifests - -.PHONY: generate-core-manifests -generate-core-manifests:$(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. +generate-manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. $(CONTROLLER_GEN) \ paths=./api/... \ paths=./$(EXP_DIR)/api/... \ + paths=./controlplane/eks/api/...\ + paths=./bootstrap/eks/api/... \ crd:crdVersions=v1 \ output:crd:dir=$(CRD_ROOT) \ output:webhook:dir=$(WEBHOOK_ROOT) \ @@ -348,60 +313,22 @@ generate-core-manifests:$(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC e $(CONTROLLER_GEN) \ paths=./controllers/... \ paths=./$(EXP_DIR)/controllers/... \ - output:rbac:dir=$(RBAC_ROOT) \ - rbac:roleName=manager-role - -.PHONY: generate-eks-bootstrap-manifests -generate-eks-bootstrap-manifests: $(CONTROLLER_GEN) - $(CONTROLLER_GEN) \ - paths=./bootstrap/eks/api/... \ paths=./bootstrap/eks/controllers/... \ - crd:crdVersions=v1 \ - rbac:roleName=manager-role \ - output:crd:dir=./bootstrap/eks/config/crd/bases \ - output:rbac:dir=./bootstrap/eks/config/rbac \ - output:webhook:dir=./bootstrap/eks/config/webhook \ - webhook - -.PHONY: generate-eks-controlplane-manifests -generate-eks-controlplane-manifests: $(CONTROLLER_GEN) - $(CONTROLLER_GEN) \ - paths=./controlplane/eks/api/... \ paths=./controlplane/eks/controllers/... \ - crd:crdVersions=v1 \ - rbac:roleName=manager-role \ - output:crd:dir=./controlplane/eks/config/crd/bases \ - output:rbac:dir=./controlplane/eks/config/rbac \ - output:webhook:dir=./controlplane/eks/config/webhook \ - webhook + output:rbac:dir=$(RBAC_ROOT) \ + rbac:roleName=manager-role ## -------------------------------------- ## Docker ## -------------------------------------- .PHONY: docker-build -docker-build: - $(MAKE) ARCH=$(ARCH) docker-build-core - $(MAKE) ARCH=$(ARCH) docker-build-eks-bootstrap - $(MAKE) ARCH=$(ARCH) docker-build-eks-controlplane - -.PHONY: docker-build-core -docker-build-core: docker-pull-prerequisites ## Build the docker image for controller-manager +docker-build: docker-pull-prerequisites ## Build the docker image for controller-manager docker build --build-arg ARCH=$(ARCH) --build-arg LDFLAGS="$(LDFLAGS)" . -t $(CORE_CONTROLLER_IMG)-$(ARCH):$(TAG) -.PHONY: docker-build-eks-bootstrap -docker-build-eks-bootstrap: docker-pull-prerequisites - docker build --build-arg ARCH=$(ARCH) --build-arg LDFLAGS="$(LDFLAGS)" --build-arg package=./bootstrap/eks . -t $(EKS_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG) - -.PHONY: docker-build-eks-controlplane -docker-build-eks-controlplane: docker-pull-prerequisites - docker build --build-arg ARCH=$(ARCH) --build-arg LDFLAGS="$(LDFLAGS)" --build-arg package=./controlplane/eks . -t $(EKS_CONTROLPLANE_CONTROLLER_IMG)-$(ARCH):$(TAG) - .PHONY: docker-push docker-push: ## Push the docker image docker push $(CORE_CONTROLLER_IMG)-$(ARCH):$(TAG) - docker push $(EKS_BOOTSTRAP_CONTROLLER_IMG)-$(ARCH):$(TAG) - docker push $(EKS_CONTROLPLANE_CONTROLLER_IMG)-$(ARCH):$(TAG) .PHONY: docker-pull-prerequisites docker-pull-prerequisites: @@ -422,8 +349,6 @@ docker-build-%: .PHONY: docker-push-all ## Push all the architecture docker images docker-push-all: $(addprefix docker-push-,$(ALL_ARCH)) $(MAKE) docker-push-core-manifest - $(MAKE) docker-push-eks-bootstrap-manifest - $(MAKE) docker-push-eks-controlplane-manifest docker-push-%: $(MAKE) ARCH=$* docker-push @@ -433,16 +358,6 @@ docker-push-core-manifest: ## Push the fat manifest docker image. ## Minimum docker version 18.06.0 is required for creating and pushing manifest images. $(MAKE) docker-push-manifest CONTROLLER_IMG=$(CORE_CONTROLLER_IMG) MANIFEST_FILE=$(CORE_MANIFEST_FILE) -.PHONY: docker-push-eks-bootstrap-manifest -docker-push-eks-bootstrap-manifest: ## Push the fat manifest docker image. - ## Minimum docker version 18.06.0 is required for creating and pushing manifest images. - $(MAKE) docker-push-manifest CONTROLLER_IMG=$(EKS_BOOTSTRAP_CONTROLLER_IMG) MANIFEST_FILE=$(EKS_BOOTSTRAP_MANIFEST_FILE) - -.PHONY: docker-push-eks-controlplane-manifest -docker-push-eks-controlplane-manifest: ## Push the fat manifest docker image. - ## Minimum docker version 18.06.0 is required for creating and pushing manifest images. - $(MAKE) docker-push-manifest CONTROLLER_IMG=$(EKS_CONTROLPLANE_CONTROLLER_IMG) MANIFEST_FILE=$(EKS_CONTROLPLANE_MANIFEST_FILE) - .PHONY: docker-push-manifest docker-push-manifest: docker manifest create --amend $(CONTROLLER_IMG):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(CONTROLLER_IMG)\-&:$(TAG)~g") @@ -452,8 +367,6 @@ docker-push-manifest: .PHONY: staging-manifests staging-manifests: $(MAKE) $(RELEASE_DIR)/$(CORE_MANIFEST_FILE).yaml PULL_POLICY=IfNotPresent TAG=$(RELEASE_ALIAS_TAG) - $(MAKE) $(RELEASE_DIR)/$(EKS_BOOTSTRAP_MANIFEST_FILE).yaml PULL_POLICY=IfNotPresent TAG=$(RELEASE_ALIAS_TAG) - $(MAKE) $(RELEASE_DIR)/$(EKS_CONTROLPLANE_MANIFEST_FILE).yaml PULL_POLICY=IfNotPresent TAG=$(RELEASE_ALIAS_TAG) ## -------------------------------------- ## Release @@ -466,10 +379,6 @@ $(RELEASE_DIR): list-staging-releases: ## List staging images for image promotion @echo $(CORE_IMAGE_NAME): $(MAKE) list-image RELEASE_TAG=$(RELEASE_TAG) IMAGE=$(CORE_IMAGE_NAME) - @echo $(EKS_BOOTSTRAP_IMAGE_NAME): - $(MAKE) list-image RELEASE_TAG=$(RELEASE_TAG) IMAGE=$(EKS_BOOTSTRAP_IMAGE_NAME) - @echo $(EKS_CONTROLPLANE_IMAGE_NAME): - $(MAKE) list-image RELEASE_TAG=$(RELEASE_TAG) IMAGE=$(EKS_CONTROLPLANE_IMAGE_NAME) list-image: gcloud container images list-tags $(STAGING_REGISTRY)/$(IMAGE) --filter="tags=('$(RELEASE_TAG)')" --format=json @@ -513,8 +422,6 @@ $(RELEASE_DIR)/AWSIAMManagedPolicyCloudProviderNodes.json: $(RELEASE_DIR) $(CLUS .PHONY: release-manifests release-manifests: $(MAKE) $(RELEASE_DIR)/$(CORE_MANIFEST_FILE).yaml TAG=$(RELEASE_TAG) PULL_POLICY=IfNotPresent - $(MAKE) $(RELEASE_DIR)/$(EKS_BOOTSTRAP_MANIFEST_FILE).yaml TAG=$(RELEASE_TAG) PULL_POLICY=IfNotPresent - $(MAKE) $(RELEASE_DIR)/$(EKS_CONTROLPLANE_MANIFEST_FILE).yaml TAG=$(RELEASE_TAG) PULL_POLICY=IfNotPresent # Add metadata to the release artifacts cp metadata.yaml $(RELEASE_DIR)/metadata.yaml @@ -575,8 +482,6 @@ upload-gh-artifacts: $(GH) ## Upload artifacts to Github release .PHONY: release-alias-tag release-alias-tag: # Adds the tag to the last build tag. gcloud container images add-tag -q $(CORE_CONTROLLER_IMG):$(TAG) $(CORE_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag -q $(EKS_BOOTSTRAP_CONTROLLER_IMG):$(TAG) $(EKS_BOOTSTRAP_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) - gcloud container images add-tag -q $(EKS_CONTROLPLANE_CONTROLLER_IMG):$(TAG) $(EKS_CONTROLPLANE_CONTROLLER_IMG):$(RELEASE_ALIAS_TAG) .PHONY: release-templates release-templates: $(RELEASE_DIR) ## Generate release templates @@ -597,26 +502,6 @@ $(RELEASE_DIR)/$(CORE_MANIFEST_FILE).yaml: PROVIDER_CONFIG_DIR=$(CORE_CONFIG_DIR) \ NAMESPACE=$(CORE_NAMESPACE) \ -.PHONY: $(RELEASE_DIR)/$(EKS_BOOTSTRAP_MANIFEST_FILE).yaml -$(RELEASE_DIR)/$(EKS_BOOTSTRAP_MANIFEST_FILE).yaml: - $(MAKE) compiled-manifest \ - PROVIDER=$(EKS_BOOTSTRAP_MANIFEST_FILE) \ - OLD_IMG=$(EKS_BOOTSTRAP_CONTROLLER_ORIGINAL_IMG) \ - MANIFEST_IMG=$(EKS_BOOTSTRAP_CONTROLLER_IMG) \ - CONTROLLER_NAME=$(EKS_BOOTSTRAP_CONTROLLER_NAME) \ - PROVIDER_CONFIG_DIR=$(EKS_BOOTSTRAP_CONFIG_DIR) \ - NAMESPACE=$(EKS_BOOTSTRAP_NAMESPACE) - -.PHONY: $(RELEASE_DIR)/$(EKS_CONTROLPLANE_MANIFEST_FILE).yaml -$(RELEASE_DIR)/$(EKS_CONTROLPLANE_MANIFEST_FILE).yaml: - $(MAKE) compiled-manifest \ - PROVIDER=$(EKS_CONTROLPLANE_MANIFEST_FILE) \ - OLD_IMG=$(EKS_CONTROLPLANE_CONTROLLER_ORIGINAL_IMG) \ - MANIFEST_IMG=$(EKS_CONTROLPLANE_CONTROLLER_IMG) \ - CONTROLLER_NAME=$(EKS_CONTROLPLANE_CONTROLLER_NAME) \ - PROVIDER_CONFIG_DIR=$(EKS_CONTROLPLANE_CONFIG_DIR) \ - NAMESPACE=$(EKS_CONTROLPLANE_NAMESPACE) \ - .PHONY: compiled-manifest compiled-manifest: $(RELEASE_DIR) $(KUSTOMIZE) $(MAKE) image-patch-source-manifest diff --git a/bootstrap/eks/config/certmanager/certificate.yaml b/bootstrap/eks/config/certmanager/certificate.yaml deleted file mode 100644 index d3924656c9..0000000000 --- a/bootstrap/eks/config/certmanager/certificate.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# The following manifests contain a self-signed issuer CR and a certificate CR. -# More document can be found at https://docs.cert-manager.io -# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for -# breaking changes -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: selfsigned-issuer - namespace: system -spec: - selfSigned: {} ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml - namespace: system -spec: - # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize - dnsNames: - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local - issuerRef: - kind: Issuer - name: selfsigned-issuer - secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/bootstrap/eks/config/certmanager/kustomization.yaml b/bootstrap/eks/config/certmanager/kustomization.yaml deleted file mode 100644 index bebea5a595..0000000000 --- a/bootstrap/eks/config/certmanager/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -resources: -- certificate.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/bootstrap/eks/config/certmanager/kustomizeconfig.yaml b/bootstrap/eks/config/certmanager/kustomizeconfig.yaml deleted file mode 100644 index 28a895a404..0000000000 --- a/bootstrap/eks/config/certmanager/kustomizeconfig.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# This configuration is for teaching kustomize how to update name ref and var substitution -nameReference: -- kind: Issuer - group: cert-manager.io - fieldSpecs: - - kind: Certificate - group: cert-manager.io - path: spec/issuerRef/name - -varReference: -- kind: Certificate - group: cert-manager.io - path: spec/commonName -- kind: Certificate - group: cert-manager.io - path: spec/dnsNames -- kind: Certificate - group: cert-manager.io - path: spec/secretName diff --git a/bootstrap/eks/config/crd/kustomization.yaml b/bootstrap/eks/config/crd/kustomization.yaml deleted file mode 100644 index 8a40cd3849..0000000000 --- a/bootstrap/eks/config/crd/kustomization.yaml +++ /dev/null @@ -1,28 +0,0 @@ -commonLabels: - cluster.x-k8s.io/v1alpha3: v1alpha3 - cluster.x-k8s.io/v1alpha4: v1alpha4 - -# This kustomization.yaml is not intended to be run by itself, -# since it depends on service name and namespace that are out of this kustomize package. -# It should be run by config/ -resources: - - bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml - - bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml -# +kubebuilder:scaffold:crdkustomizeresource - -patchesStrategicMerge: - # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. - # patches here are for enabling the conversion webhook for each CRD -- patches/webhook_in_eksconfigs.yaml -- patches/webhook_in_eksconfigtemplates.yaml - # +kubebuilder:scaffold:crdkustomizewebhookpatch - - # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. - # patches here are for enabling the CA injection for each CRD -- patches/cainjection_in_eksconfigs.yaml -- patches/cainjection_in_eksconfigtemplates.yaml -# +kubebuilder:scaffold:crdkustomizecainjectionpatch - -# the following config is for teaching kustomize how to do kustomization for CRDs. -configurations: - - kustomizeconfig.yaml diff --git a/bootstrap/eks/config/crd/kustomizeconfig.yaml b/bootstrap/eks/config/crd/kustomizeconfig.yaml deleted file mode 100644 index e3fd575d60..0000000000 --- a/bootstrap/eks/config/crd/kustomizeconfig.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# This file is for teaching kustomize how to substitute name and namespace reference in CRD -nameReference: - - kind: Service - version: v1 - fieldSpecs: - - kind: CustomResourceDefinition - group: apiextensions.k8s.io - path: spec/conversion/webhook/clientConfig/service/name - -namespace: - - kind: CustomResourceDefinition - group: apiextensions.k8s.io - path: spec/conversion/webhook/clientConfig/service/namespace - create: false - -varReference: - - path: metadata/annotations diff --git a/bootstrap/eks/config/default/kustomization.yaml b/bootstrap/eks/config/default/kustomization.yaml deleted file mode 100644 index e0b9d1f8d4..0000000000 --- a/bootstrap/eks/config/default/kustomization.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# Adds namespace to all resources. -namespace: capa-eks-bootstrap-system -namePrefix: capa-eks-bootstrap- - -commonLabels: - cluster.x-k8s.io/provider: "bootstrap-eks" - -resources: -- namespace.yaml - -bases: -- ../rbac -- ../manager -- ../crd -- ../certmanager - -patchesStrategicMerge: -- manager_service_account_patch.yaml -- manager_iam_patch.yaml -- manager_auth_proxy_patch.yaml -- manager_image_patch.yaml -- manager_pull_policy.yaml -# UNCOMMENT for webhooks -#- manager_webhook_patch.yaml -#- webhookcainjection_patch.yaml - -configurations: - - kustomizeconfig.yaml - -vars: - # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. - - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR - objref: - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert # this name should match the one in certificate.yaml - fieldref: - fieldpath: metadata.namespace - - name: CERTIFICATE_NAME - objref: - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert # this name should match the one in certificate.yaml - -# UNCOMMENT for webhooks -# - name: SERVICE_NAMESPACE # namespace of the service -# objref: -# kind: Service -# version: v1 -# name: webhook-service -# fieldref: -# fieldpath: metadata.namespace -# - name: SERVICE_NAME -# objref: -# kind: Service -# version: v1 -# name: webhook-service diff --git a/bootstrap/eks/config/default/kustomizeconfig.yaml b/bootstrap/eks/config/default/kustomizeconfig.yaml deleted file mode 100644 index 524d39cc2b..0000000000 --- a/bootstrap/eks/config/default/kustomizeconfig.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# This configuration is for teaching kustomize how to update name ref and var substitution -varReference: - - kind: Deployment - path: spec/template/spec/volumes/secret/secretName diff --git a/bootstrap/eks/config/default/manager_auth_proxy_patch.yaml b/bootstrap/eks/config/default/manager_auth_proxy_patch.yaml deleted file mode 100644 index a7987a993f..0000000000 --- a/bootstrap/eks/config/default/manager_auth_proxy_patch.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the controller manager, -# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: kube-rbac-proxy - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=10" - ports: - - containerPort: 8443 - name: https diff --git a/bootstrap/eks/config/default/manager_iam_patch.yaml b/bootstrap/eks/config/default/manager_iam_patch.yaml deleted file mode 100644 index 071af144bd..0000000000 --- a/bootstrap/eks/config/default/manager_iam_patch.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# This patch injects annotations to run using KIAM / kube2iam -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - metadata: - annotations: - iam.amazonaws.com/role: ${AWS_CONTROLLER_IAM_ROLE:=""} diff --git a/bootstrap/eks/config/default/manager_image_patch.yaml b/bootstrap/eks/config/default/manager_image_patch.yaml deleted file mode 100644 index b08a8977fc..0000000000 --- a/bootstrap/eks/config/default/manager_image_patch.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - image: gcr.io/k8s-staging-cluster-api-aws/eks-bootstrap-controller:latest - name: manager diff --git a/bootstrap/eks/config/default/manager_pull_policy.yaml b/bootstrap/eks/config/default/manager_pull_policy.yaml deleted file mode 100644 index 74a0879c60..0000000000 --- a/bootstrap/eks/config/default/manager_pull_policy.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - imagePullPolicy: Always diff --git a/bootstrap/eks/config/default/manager_service_account_patch.yaml b/bootstrap/eks/config/default/manager_service_account_patch.yaml deleted file mode 100644 index 5892631f46..0000000000 --- a/bootstrap/eks/config/default/manager_service_account_patch.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - serviceAccountName: controller-manager - securityContext: - fsGroup: 1000 diff --git a/bootstrap/eks/config/default/manager_webhook_patch.yaml b/bootstrap/eks/config/default/manager_webhook_patch.yaml deleted file mode 100644 index b387eb0eae..0000000000 --- a/bootstrap/eks/config/default/manager_webhook_patch.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: $(SERVICE_NAME)-cert diff --git a/bootstrap/eks/config/default/namespace.yaml b/bootstrap/eks/config/default/namespace.yaml deleted file mode 100644 index 8b55c3cd89..0000000000 --- a/bootstrap/eks/config/default/namespace.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: system diff --git a/bootstrap/eks/config/default/webhookcainjection_patch.yaml b/bootstrap/eks/config/default/webhookcainjection_patch.yaml deleted file mode 100644 index 7cc9d3580c..0000000000 --- a/bootstrap/eks/config/default/webhookcainjection_patch.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: validating-webhook-configuration - annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) diff --git a/bootstrap/eks/config/manager/kustomization.yaml b/bootstrap/eks/config/manager/kustomization.yaml deleted file mode 100644 index 5c5f0b84cb..0000000000 --- a/bootstrap/eks/config/manager/kustomization.yaml +++ /dev/null @@ -1,2 +0,0 @@ -resources: -- manager.yaml diff --git a/bootstrap/eks/config/manager/manager.yaml b/bootstrap/eks/config/manager/manager.yaml deleted file mode 100644 index 79e2b4ffff..0000000000 --- a/bootstrap/eks/config/manager/manager.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system - labels: - control-plane: controller-manager -spec: - selector: - matchLabels: - control-plane: controller-manager - replicas: 1 - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - command: - - /manager - args: - - --leader-elect - - "--metrics-bind-addr=127.0.0.1:8080" - image: controller:latest - name: manager - terminationGracePeriodSeconds: 10 - tolerations: - - effect: NoSchedule - key: ${K8S_CP_LABEL:=node-role.kubernetes.io/control-plane} - - effect: NoSchedule - key: node-role.kubernetes.io/master - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 10 - preference: - matchExpressions: - - key: ${K8S_CP_LABEL:=node-role.kubernetes.io/control-plane} - operator: Exists - # remove once usage of node-role.kubernetes.io/master is removed from Kubernetes - - weight: 10 - preference: - matchExpressions: - - key: node-role.kubernetes.io/master - operator: Exists diff --git a/bootstrap/eks/config/manager/manager_args_patch.yaml b/bootstrap/eks/config/manager/manager_args_patch.yaml deleted file mode 100644 index e1ed22ef6d..0000000000 --- a/bootstrap/eks/config/manager/manager_args_patch.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the controller manager, -# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - args: - - "--metrics-bind-addr=127.0.0.1:8080" - - "--leader-elect" - - "--feature-gates=MachinePool=${EXP_MACHINE_POOL:=false}" diff --git a/bootstrap/eks/config/rbac/auth_proxy_client_clusterrole.yaml b/bootstrap/eks/config/rbac/auth_proxy_client_clusterrole.yaml deleted file mode 100644 index bd4af137a9..0000000000 --- a/bootstrap/eks/config/rbac/auth_proxy_client_clusterrole.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metrics-reader -rules: -- nonResourceURLs: ["/metrics"] - verbs: ["get"] diff --git a/bootstrap/eks/config/rbac/auth_proxy_role.yaml b/bootstrap/eks/config/rbac/auth_proxy_role.yaml deleted file mode 100644 index 618f5e4177..0000000000 --- a/bootstrap/eks/config/rbac/auth_proxy_role.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: proxy-role -rules: -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: ["create"] -- apiGroups: ["authorization.k8s.io"] - resources: - - subjectaccessreviews - verbs: ["create"] diff --git a/bootstrap/eks/config/rbac/auth_proxy_role_binding.yaml b/bootstrap/eks/config/rbac/auth_proxy_role_binding.yaml deleted file mode 100644 index ec7acc0a1b..0000000000 --- a/bootstrap/eks/config/rbac/auth_proxy_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: proxy-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system diff --git a/bootstrap/eks/config/rbac/auth_proxy_service.yaml b/bootstrap/eks/config/rbac/auth_proxy_service.yaml deleted file mode 100644 index 6cf656be14..0000000000 --- a/bootstrap/eks/config/rbac/auth_proxy_service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - name: controller-manager-metrics-service - namespace: system -spec: - ports: - - name: https - port: 8443 - targetPort: https - selector: - control-plane: controller-manager diff --git a/bootstrap/eks/config/rbac/eksconfig_editor_role.yaml b/bootstrap/eks/config/rbac/eksconfig_editor_role.yaml deleted file mode 100644 index 438b3aedf4..0000000000 --- a/bootstrap/eks/config/rbac/eksconfig_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit eksconfigs. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: eksconfig-editor-role -rules: -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - eksconfigs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - eksconfigs/status - verbs: - - get diff --git a/bootstrap/eks/config/rbac/eksconfig_viewer_role.yaml b/bootstrap/eks/config/rbac/eksconfig_viewer_role.yaml deleted file mode 100644 index d2a2caefd2..0000000000 --- a/bootstrap/eks/config/rbac/eksconfig_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view eksconfigs. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: eksconfig-viewer-role -rules: -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - eksconfigs - verbs: - - get - - list - - watch -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - eksconfigs/status - verbs: - - get diff --git a/bootstrap/eks/config/rbac/eksconfigtemplate_editor_role.yaml b/bootstrap/eks/config/rbac/eksconfigtemplate_editor_role.yaml deleted file mode 100644 index 1749b23aa2..0000000000 --- a/bootstrap/eks/config/rbac/eksconfigtemplate_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit eksconfigtemplates. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: eksconfigtemplate-editor-role -rules: -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - eksconfigtemplates - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - eksconfigtemplates/status - verbs: - - get diff --git a/bootstrap/eks/config/rbac/eksconfigtemplate_viewer_role.yaml b/bootstrap/eks/config/rbac/eksconfigtemplate_viewer_role.yaml deleted file mode 100644 index 807f88f58a..0000000000 --- a/bootstrap/eks/config/rbac/eksconfigtemplate_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view eksconfigtemplates. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: eksconfigtemplate-viewer-role -rules: -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - eksconfigtemplates - verbs: - - get - - list - - watch -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - eksconfigtemplates/status - verbs: - - get diff --git a/bootstrap/eks/config/rbac/kustomization.yaml b/bootstrap/eks/config/rbac/kustomization.yaml deleted file mode 100644 index 00f0799130..0000000000 --- a/bootstrap/eks/config/rbac/kustomization.yaml +++ /dev/null @@ -1,13 +0,0 @@ -resources: -- role.yaml -- role_binding.yaml -- serviceaccount.yaml -- leader_election_role.yaml -- leader_election_role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -- auth_proxy_service.yaml -- auth_proxy_role.yaml -- auth_proxy_role_binding.yaml -- auth_proxy_client_clusterrole.yaml diff --git a/bootstrap/eks/config/rbac/leader_election_role.yaml b/bootstrap/eks/config/rbac/leader_election_role.yaml deleted file mode 100644 index 7963f6c29b..0000000000 --- a/bootstrap/eks/config/rbac/leader_election_role.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# permissions to do leader election. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: leader-elect-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - update - - patch -- apiGroups: - - "" - resources: - - events - verbs: - - create -- apiGroups: - - "coordination.k8s.io" - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete diff --git a/bootstrap/eks/config/rbac/leader_election_role_binding.yaml b/bootstrap/eks/config/rbac/leader_election_role_binding.yaml deleted file mode 100644 index e77b3e19b4..0000000000 --- a/bootstrap/eks/config/rbac/leader_election_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: leader-elect-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: leader-elect-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system diff --git a/bootstrap/eks/config/rbac/role.yaml b/bootstrap/eks/config/rbac/role.yaml deleted file mode 100644 index f79524ea64..0000000000 --- a/bootstrap/eks/config/rbac/role.yaml +++ /dev/null @@ -1,65 +0,0 @@ - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: manager-role -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - get - - list - - update - - watch -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - eksconfigs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - bootstrap.cluster.x-k8s.io - resources: - - eksconfigs/status - verbs: - - get - - patch - - update -- apiGroups: - - cluster.x-k8s.io - resources: - - clusters - - machinepools - - machines - verbs: - - get - - list - - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - machinepools - verbs: - - get - - list - - watch -- apiGroups: - - controlplane.cluster.x-k8s.io - resources: - - awsmanagedcontrolplanes - verbs: - - get - - list - - watch diff --git a/bootstrap/eks/config/rbac/role_binding.yaml b/bootstrap/eks/config/rbac/role_binding.yaml deleted file mode 100644 index 2070ede446..0000000000 --- a/bootstrap/eks/config/rbac/role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: manager-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system diff --git a/bootstrap/eks/config/rbac/serviceaccount.yaml b/bootstrap/eks/config/rbac/serviceaccount.yaml deleted file mode 100644 index bd2f723364..0000000000 --- a/bootstrap/eks/config/rbac/serviceaccount.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: controller-manager - namespace: system - labels: - control-plane: controller-manager - annotations: - # The following uses the prefix substitution functionality of envsubst (https://github.com/drone/envsubst) - # Not compatible with GNU envsubst - ${AWS_CONTROLLER_IAM_ROLE/#arn/eks.amazonaws.com/role-arn: arn} diff --git a/bootstrap/eks/config/webhook/kustomization.yaml b/bootstrap/eks/config/webhook/kustomization.yaml deleted file mode 100644 index 9cf26134e4..0000000000 --- a/bootstrap/eks/config/webhook/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -resources: -- manifests.yaml -- service.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/bootstrap/eks/config/webhook/kustomizeconfig.yaml b/bootstrap/eks/config/webhook/kustomizeconfig.yaml deleted file mode 100644 index 25e21e3c96..0000000000 --- a/bootstrap/eks/config/webhook/kustomizeconfig.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# the following config is for teaching kustomize where to look at when substituting vars. -# It requires kustomize v2.1.0 or newer to work properly. -nameReference: -- kind: Service - version: v1 - fieldSpecs: - - kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - - kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - -namespace: -- kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true -- kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true - -varReference: -- path: metadata/annotations diff --git a/bootstrap/eks/config/webhook/manifests.yaml b/bootstrap/eks/config/webhook/manifests.yaml deleted file mode 100644 index f5e0b18ffc..0000000000 --- a/bootstrap/eks/config/webhook/manifests.yaml +++ /dev/null @@ -1,100 +0,0 @@ - ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: mutating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /mutate-bootstrap-cluster-x-k8s-io-v1alpha4-eksconfig - failurePolicy: Fail - matchPolicy: Equivalent - name: default.eksconfigs.bootstrap.cluster.x-k8s.io - rules: - - apiGroups: - - bootstrap.cluster.x-k8s.io - apiVersions: - - v1alpha4 - operations: - - CREATE - - UPDATE - resources: - - eksconfig - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /mutate-bootstrap-cluster-x-k8s-io-v1alpha4-eksconfigtemplate - failurePolicy: Fail - matchPolicy: Equivalent - name: default.eksconfigtemplates.bootstrap.cluster.x-k8s.io - rules: - - apiGroups: - - bootstrap.cluster.x-k8s.io - apiVersions: - - v1alpha4 - operations: - - CREATE - - UPDATE - resources: - - eksconfigtemplate - sideEffects: None - ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: validating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /validate-bootstrap-cluster-x-k8s-io-v1alpha4-eksconfig - failurePolicy: Fail - matchPolicy: Equivalent - name: validation.eksconfigs.bootstrap.cluster.x-k8s.io - rules: - - apiGroups: - - bootstrap.cluster.x-k8s.io - apiVersions: - - v1alpha4 - operations: - - CREATE - - UPDATE - resources: - - eksconfig - sideEffects: None -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /validate-bootstrap-cluster-x-k8s-io-v1alpha4-eksconfigtemplate - failurePolicy: Fail - matchPolicy: Equivalent - name: validation.eksconfigtemplates.bootstrap.cluster.x-k8s.io - rules: - - apiGroups: - - bootstrap.cluster.x-k8s.io - apiVersions: - - v1alpha4 - operations: - - CREATE - - UPDATE - resources: - - eksconfigtemplate - sideEffects: None diff --git a/bootstrap/eks/config/webhook/service.yaml b/bootstrap/eks/config/webhook/service.yaml deleted file mode 100644 index 9bc95014fd..0000000000 --- a/bootstrap/eks/config/webhook/service.yaml +++ /dev/null @@ -1,10 +0,0 @@ - -apiVersion: v1 -kind: Service -metadata: - name: webhook-service - namespace: system -spec: - ports: - - port: 443 - targetPort: webhook-server diff --git a/bootstrap/eks/controllers/suite_test.go b/bootstrap/eks/controllers/suite_test.go index 68c72ea69c..460092c61d 100644 --- a/bootstrap/eks/controllers/suite_test.go +++ b/bootstrap/eks/controllers/suite_test.go @@ -50,7 +50,6 @@ func setup() { utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), - path.Join("controlplane", "eks", "config", "crd", "bases"), }, ) var err error diff --git a/bootstrap/eks/main.go b/bootstrap/eks/main.go deleted file mode 100644 index fe47c21636..0000000000 --- a/bootstrap/eks/main.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "context" - "flag" - "fmt" - "math/rand" - "net/http" - _ "net/http/pprof" - "os" - "time" - - "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog/v2" - "k8s.io/klog/v2/klogr" - bootstrapv1alpha3 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1alpha3" - bootstrapv1alpha4 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1alpha4" - bootstrapv1controllers "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/controllers" - controlplanev1alpha3 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha3" - controlplanev1alpha4 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha4" - expinfrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" - expinfrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-aws/version" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" - "sigs.k8s.io/cluster-api/feature" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - // +kubebuilder:scaffold:imports -) - -var ( - scheme = runtime.NewScheme() - setupLog = ctrl.Log.WithName("setup") -) - -func init() { - klog.InitFlags(nil) - - _ = clientgoscheme.AddToScheme(scheme) - _ = clusterv1.AddToScheme(scheme) - _ = bootstrapv1alpha3.AddToScheme(scheme) - _ = bootstrapv1alpha4.AddToScheme(scheme) - _ = expinfrav1alpha3.AddToScheme(scheme) - _ = expinfrav1alpha4.AddToScheme(scheme) - _ = expclusterv1.AddToScheme(scheme) - _ = controlplanev1alpha3.AddToScheme(scheme) - _ = controlplanev1alpha4.AddToScheme(scheme) - - // +kubebuilder:scaffold:scheme -} - -var ( - metricsAddr string - enableLeaderElection bool - leaderElectionLeaseDuration time.Duration - leaderElectionRenewDeadline time.Duration - leaderElectionRetryPeriod time.Duration - watchNamespace string - watchFilterValue string - profilerAddress string - eksConfigConcurrency int - syncPeriod time.Duration - webhookPort int - webhookCertDir string -) - -// InitFlags initializes this manager's flags. -func InitFlags(fs *pflag.FlagSet) { - fs.StringVar(&metricsAddr, "metrics-bind-addr", ":8080", - "The address the metric endpoint binds to.") - - fs.BoolVar(&enableLeaderElection, "leader-elect", false, - "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") - - fs.DurationVar(&leaderElectionLeaseDuration, "leader-elect-lease-duration", 15*time.Second, - "Interval at which non-leader candidates will wait to force acquire leadership (duration string)") - - fs.DurationVar(&leaderElectionRenewDeadline, "leader-elect-renew-deadline", 10*time.Second, - "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)") - - fs.DurationVar(&leaderElectionRetryPeriod, "leader-elect-retry-period", 2*time.Second, - "Duration the LeaderElector clients should wait between tries of actions (duration string)") - - fs.StringVar(&watchNamespace, "namespace", "", - "Namespace that the controller watches to reconcile objects. If unspecified, the controller watches for objects across all namespaces.") - - fs.StringVar(&profilerAddress, "profiler-address", "", - "Bind address to expose the pprof profiler (e.g. localhost:6060)") - - fs.IntVar(&eksConfigConcurrency, "eksconfig-concurrency", 10, - "Number of EKS configs to process simultaneously") - - fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, - "The minimum interval at which watched resources are reconciled (e.g. 15m)") - - fs.IntVar(&webhookPort, "webhook-port", 9443, - "Webhook Server port, disabled by default. When enabled, the manager will only work as webhook server, no reconcilers are installed.") - - fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", - "Webhook cert dir, only used when webhook-port is specified.") - - fs.StringVar( - &watchFilterValue, - "watch-filter", - "", - fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel), - ) - - feature.MutableGates.AddFlag(fs) -} - -func main() { - rand.Seed(time.Now().UnixNano()) - - InitFlags(pflag.CommandLine) - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - pflag.Parse() - - ctrl.SetLogger(klogr.New()) - - if profilerAddress != "" { - klog.Infof("Profiler listening for requests at %s", profilerAddress) - go func() { - klog.Info(http.ListenAndServe(profilerAddress, nil)) - }() - } - - restConfig := ctrl.GetConfigOrDie() - restConfig.UserAgent = "cluster-api-provider-aws-controller" - mgr, err := ctrl.NewManager(restConfig, ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - LeaderElection: enableLeaderElection, - LeaderElectionID: "eks-bootstrap-manager-leader-elect-capa", - LeaseDuration: &leaderElectionLeaseDuration, - RenewDeadline: &leaderElectionRenewDeadline, - RetryPeriod: &leaderElectionRetryPeriod, - Namespace: watchNamespace, - SyncPeriod: &syncPeriod, - Port: webhookPort, - CertDir: webhookCertDir, - }) - if err != nil { - setupLog.Error(err, "unable to start manager") - os.Exit(1) - } - ctx := ctrl.SetupSignalHandler() - setupReconcilers(ctx, mgr) - - // +kubebuilder:scaffold:builder - setupLog.Info("starting manager", "version", version.Get().String()) - if err := mgr.Start(ctx); err != nil { - setupLog.Error(err, "problem running manager") - os.Exit(1) - } -} - -func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { - if err := (&bootstrapv1controllers.EKSConfigReconciler{ - Client: mgr.GetClient(), - WatchFilterValue: watchFilterValue, - }).SetupWithManager(ctx, mgr, concurrency(eksConfigConcurrency)); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "EKSConfig") - os.Exit(1) - } -} - -func concurrency(c int) controller.Options { - return controller.Options{MaxConcurrentReconciles: c} -} diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/defaults.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/defaults.go index 7749ea2cc1..62dc9dd1bc 100644 --- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/defaults.go +++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/defaults.go @@ -58,13 +58,13 @@ func SetDefaults_AWSIAMConfigurationSpec(obj *AWSIAMConfigurationSpec) { //nolin } if obj.EKS == nil { obj.EKS = &EKSConfig{ - Enable: false, + Disable: false, AllowIAMRoleCreation: false, DefaultControlPlaneRole: AWSIAMRoleSpec{ - Disable: true, + Disable: false, }, } - } else if obj.EKS.Enable { + } else if !obj.EKS.Disable { obj.Nodes.EC2ContainerRegistryReadOnly = true } if obj.EventBridge == nil { diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/types.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/types.go index ce288dbbde..8395ff0c24 100644 --- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/types.go +++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/types.go @@ -91,8 +91,8 @@ type AWSIAMRoleSpec struct { // EKSConfig represents the EKS related configuration config. type EKSConfig struct { - // Enable controls whether EKS-related permissions are granted - Enable bool `json:"enable"` + // Disable controls whether EKS-related permissions are granted + Disable bool `json:"disable"` // AllowIAMRoleCreation controls whether the EKS controllers have permissions for creating IAM // roles per cluster AllowIAMRoleCreation bool `json:"iamRoleCreation,omitempty"` diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go index a36a675541..105adfd72d 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go +++ b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go @@ -235,7 +235,7 @@ func (t Template) ControllersPolicy() *infrav1.PolicyDocument { }) } } - if t.Spec.EKS.Enable { + if !t.Spec.EKS.Disable { allowedIAMActions := infrav1.Actions{ "iam:GetRole", "iam:ListAttachedRolePolicies", diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_node.go b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_node.go index 818f6a29ef..934631767b 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_node.go +++ b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_node.go @@ -66,7 +66,7 @@ func (t Template) sessionManagerPolicy() infrav1.StatementEntry { func (t Template) nodeManagedPolicies() []string { policies := t.Spec.Nodes.ExtraPolicyAttachments - if t.Spec.EKS.Enable { + if !t.Spec.EKS.Disable { policies = append(policies, t.generateAWSManagedPolicyARN("AmazonEKSWorkerNodePolicy"), t.generateAWSManagedPolicyARN("AmazonEKS_CNI_Policy"), diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml index 7bb35c0cee..af2d1ab687 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml @@ -250,6 +250,96 @@ Resources: Effect: Allow Resource: - arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/* + - Action: + - ssm:GetParameter + Effect: Allow + Resource: + - arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/* + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-nodegroup.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-fargate.amazonaws.com + Effect: Allow + Resource: + - arn:aws:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate + - Action: + - iam:GetRole + - iam:ListAttachedRolePolicies + Effect: Allow + Resource: + - arn:*:iam::*:role/* + - Action: + - iam:GetPolicy + Effect: Allow + Resource: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + - Action: + - eks:DescribeCluster + - eks:ListClusters + - eks:CreateCluster + - eks:TagResource + - eks:UpdateClusterVersion + - eks:DeleteCluster + - eks:UpdateClusterConfig + - eks:UntagResource + - eks:UpdateNodegroupVersion + - eks:DescribeNodegroup + - eks:DeleteNodegroup + - eks:UpdateNodegroupConfig + - eks:CreateNodegroup + - eks:AssociateEncryptionConfig + Effect: Allow + Resource: + - arn:*:eks:*:*:cluster/* + - arn:*:eks:*:*:nodegroup/*/*/* + - Action: + - eks:ListAddons + - eks:CreateAddon + - eks:DescribeAddonVersions + - eks:DescribeAddon + - eks:DeleteAddon + - eks:UpdateAddon + - eks:TagResource + - eks:DescribeFargateProfile + - eks:CreateFargateProfile + - eks:DeleteFargateProfile + Effect: Allow + Resource: + - '*' + - Action: + - iam:PassRole + Condition: + StringEquals: + iam:PassedToService: eks.amazonaws.com + Effect: Allow + Resource: + - '*' + - Action: + - kms:CreateGrant + - kms:DescribeKey + Condition: + ForAnyValue:StringLike: + kms:ResourceAliases: alias/cluster-api-provider-aws-* + Effect: Allow + Resource: + - '*' Version: 2012-10-17 Roles: - Ref: AWSIAMRoleControllers @@ -281,6 +371,21 @@ Resources: Version: 2012-10-17 RoleName: controllers.custom-suffix.com Type: AWS::IAM::Role + AWSIAMRoleEKSControlPlane: + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: + - sts:AssumeRole + Effect: Allow + Principal: + Service: + - eks.amazonaws.com + Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + RoleName: eks-controlplane.cluster-api-provider-aws.sigs.k8s.io + Type: AWS::IAM::Role AWSIAMRoleNodes: Properties: AssumeRolePolicyDocument: @@ -292,5 +397,8 @@ Resources: Service: - ec2.amazonaws.com Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy RoleName: nodes.custom-suffix.com Type: AWS::IAM::Role diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml index 27fda19e3b..c9c6770e8f 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml @@ -250,6 +250,96 @@ Resources: Effect: Allow Resource: - arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/* + - Action: + - ssm:GetParameter + Effect: Allow + Resource: + - arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/* + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-nodegroup.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-fargate.amazonaws.com + Effect: Allow + Resource: + - arn:aws:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate + - Action: + - iam:GetRole + - iam:ListAttachedRolePolicies + Effect: Allow + Resource: + - arn:*:iam::*:role/* + - Action: + - iam:GetPolicy + Effect: Allow + Resource: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + - Action: + - eks:DescribeCluster + - eks:ListClusters + - eks:CreateCluster + - eks:TagResource + - eks:UpdateClusterVersion + - eks:DeleteCluster + - eks:UpdateClusterConfig + - eks:UntagResource + - eks:UpdateNodegroupVersion + - eks:DescribeNodegroup + - eks:DeleteNodegroup + - eks:UpdateNodegroupConfig + - eks:CreateNodegroup + - eks:AssociateEncryptionConfig + Effect: Allow + Resource: + - arn:*:eks:*:*:cluster/* + - arn:*:eks:*:*:nodegroup/*/*/* + - Action: + - eks:ListAddons + - eks:CreateAddon + - eks:DescribeAddonVersions + - eks:DescribeAddon + - eks:DeleteAddon + - eks:UpdateAddon + - eks:TagResource + - eks:DescribeFargateProfile + - eks:CreateFargateProfile + - eks:DeleteFargateProfile + Effect: Allow + Resource: + - '*' + - Action: + - iam:PassRole + Condition: + StringEquals: + iam:PassedToService: eks.amazonaws.com + Effect: Allow + Resource: + - '*' + - Action: + - kms:CreateGrant + - kms:DescribeKey + Condition: + ForAnyValue:StringLike: + kms:ResourceAliases: alias/cluster-api-provider-aws-* + Effect: Allow + Resource: + - '*' Version: 2012-10-17 Roles: - Ref: AWSIAMRoleControllers @@ -281,6 +371,21 @@ Resources: Version: 2012-10-17 RoleName: controllers.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role + AWSIAMRoleEKSControlPlane: + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: + - sts:AssumeRole + Effect: Allow + Principal: + Service: + - eks.amazonaws.com + Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + RoleName: eks-controlplane.cluster-api-provider-aws.sigs.k8s.io + Type: AWS::IAM::Role AWSIAMRoleNodes: Properties: AssumeRolePolicyDocument: @@ -292,5 +397,8 @@ Resources: Service: - ec2.amazonaws.com Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy RoleName: nodes.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml index 7a1de222b8..71cbf16577 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml @@ -263,6 +263,96 @@ Resources: Effect: Allow Resource: - arn:*:ssm:*:*:parameter/cluster.x-k8s.io/* + - Action: + - ssm:GetParameter + Effect: Allow + Resource: + - arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/* + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-nodegroup.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-fargate.amazonaws.com + Effect: Allow + Resource: + - arn:aws:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate + - Action: + - iam:GetRole + - iam:ListAttachedRolePolicies + Effect: Allow + Resource: + - arn:*:iam::*:role/* + - Action: + - iam:GetPolicy + Effect: Allow + Resource: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + - Action: + - eks:DescribeCluster + - eks:ListClusters + - eks:CreateCluster + - eks:TagResource + - eks:UpdateClusterVersion + - eks:DeleteCluster + - eks:UpdateClusterConfig + - eks:UntagResource + - eks:UpdateNodegroupVersion + - eks:DescribeNodegroup + - eks:DeleteNodegroup + - eks:UpdateNodegroupConfig + - eks:CreateNodegroup + - eks:AssociateEncryptionConfig + Effect: Allow + Resource: + - arn:*:eks:*:*:cluster/* + - arn:*:eks:*:*:nodegroup/*/*/* + - Action: + - eks:ListAddons + - eks:CreateAddon + - eks:DescribeAddonVersions + - eks:DescribeAddon + - eks:DeleteAddon + - eks:UpdateAddon + - eks:TagResource + - eks:DescribeFargateProfile + - eks:CreateFargateProfile + - eks:DeleteFargateProfile + Effect: Allow + Resource: + - '*' + - Action: + - iam:PassRole + Condition: + StringEquals: + iam:PassedToService: eks.amazonaws.com + Effect: Allow + Resource: + - '*' + - Action: + - kms:CreateGrant + - kms:DescribeKey + Condition: + ForAnyValue:StringLike: + kms:ResourceAliases: alias/cluster-api-provider-aws-* + Effect: Allow + Resource: + - '*' Version: 2012-10-17 Roles: - Ref: AWSIAMRoleControllers @@ -294,6 +384,21 @@ Resources: Version: 2012-10-17 RoleName: controllers.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role + AWSIAMRoleEKSControlPlane: + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: + - sts:AssumeRole + Effect: Allow + Principal: + Service: + - eks.amazonaws.com + Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + RoleName: eks-controlplane.cluster-api-provider-aws.sigs.k8s.io + Type: AWS::IAM::Role AWSIAMRoleNodes: Properties: AssumeRolePolicyDocument: @@ -305,5 +410,8 @@ Resources: Service: - ec2.amazonaws.com Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy RoleName: nodes.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml index 2d5ff89c37..cfa949ab5e 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml @@ -255,6 +255,96 @@ Resources: Effect: Allow Resource: - arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/* + - Action: + - ssm:GetParameter + Effect: Allow + Resource: + - arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/* + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-nodegroup.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-fargate.amazonaws.com + Effect: Allow + Resource: + - arn:aws:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate + - Action: + - iam:GetRole + - iam:ListAttachedRolePolicies + Effect: Allow + Resource: + - arn:*:iam::*:role/* + - Action: + - iam:GetPolicy + Effect: Allow + Resource: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + - Action: + - eks:DescribeCluster + - eks:ListClusters + - eks:CreateCluster + - eks:TagResource + - eks:UpdateClusterVersion + - eks:DeleteCluster + - eks:UpdateClusterConfig + - eks:UntagResource + - eks:UpdateNodegroupVersion + - eks:DescribeNodegroup + - eks:DeleteNodegroup + - eks:UpdateNodegroupConfig + - eks:CreateNodegroup + - eks:AssociateEncryptionConfig + Effect: Allow + Resource: + - arn:*:eks:*:*:cluster/* + - arn:*:eks:*:*:nodegroup/*/*/* + - Action: + - eks:ListAddons + - eks:CreateAddon + - eks:DescribeAddonVersions + - eks:DescribeAddon + - eks:DeleteAddon + - eks:UpdateAddon + - eks:TagResource + - eks:DescribeFargateProfile + - eks:CreateFargateProfile + - eks:DeleteFargateProfile + Effect: Allow + Resource: + - '*' + - Action: + - iam:PassRole + Condition: + StringEquals: + iam:PassedToService: eks.amazonaws.com + Effect: Allow + Resource: + - '*' + - Action: + - kms:CreateGrant + - kms:DescribeKey + Condition: + ForAnyValue:StringLike: + kms:ResourceAliases: alias/cluster-api-provider-aws-* + Effect: Allow + Resource: + - '*' Version: 2012-10-17 Roles: - Ref: AWSIAMRoleControllers @@ -286,6 +376,21 @@ Resources: Version: 2012-10-17 RoleName: controllers.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role + AWSIAMRoleEKSControlPlane: + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: + - sts:AssumeRole + Effect: Allow + Principal: + Service: + - eks.amazonaws.com + Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + RoleName: eks-controlplane.cluster-api-provider-aws.sigs.k8s.io + Type: AWS::IAM::Role AWSIAMRoleNodes: Properties: AssumeRolePolicyDocument: @@ -297,6 +402,9 @@ Resources: Service: - ec2.amazonaws.com Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy RoleName: nodes.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role AWSIAMUserBootstrapper: diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml index ba31a18134..a0210432b2 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml @@ -255,6 +255,96 @@ Resources: Effect: Allow Resource: - arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/* + - Action: + - ssm:GetParameter + Effect: Allow + Resource: + - arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/* + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-nodegroup.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-fargate.amazonaws.com + Effect: Allow + Resource: + - arn:aws:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate + - Action: + - iam:GetRole + - iam:ListAttachedRolePolicies + Effect: Allow + Resource: + - arn:*:iam::*:role/* + - Action: + - iam:GetPolicy + Effect: Allow + Resource: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + - Action: + - eks:DescribeCluster + - eks:ListClusters + - eks:CreateCluster + - eks:TagResource + - eks:UpdateClusterVersion + - eks:DeleteCluster + - eks:UpdateClusterConfig + - eks:UntagResource + - eks:UpdateNodegroupVersion + - eks:DescribeNodegroup + - eks:DeleteNodegroup + - eks:UpdateNodegroupConfig + - eks:CreateNodegroup + - eks:AssociateEncryptionConfig + Effect: Allow + Resource: + - arn:*:eks:*:*:cluster/* + - arn:*:eks:*:*:nodegroup/*/*/* + - Action: + - eks:ListAddons + - eks:CreateAddon + - eks:DescribeAddonVersions + - eks:DescribeAddon + - eks:DeleteAddon + - eks:UpdateAddon + - eks:TagResource + - eks:DescribeFargateProfile + - eks:CreateFargateProfile + - eks:DeleteFargateProfile + Effect: Allow + Resource: + - '*' + - Action: + - iam:PassRole + Condition: + StringEquals: + iam:PassedToService: eks.amazonaws.com + Effect: Allow + Resource: + - '*' + - Action: + - kms:CreateGrant + - kms:DescribeKey + Condition: + ForAnyValue:StringLike: + kms:ResourceAliases: alias/cluster-api-provider-aws-* + Effect: Allow + Resource: + - '*' Version: 2012-10-17 Roles: - Ref: AWSIAMRoleControllers @@ -286,6 +376,21 @@ Resources: Version: 2012-10-17 RoleName: controllers.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role + AWSIAMRoleEKSControlPlane: + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: + - sts:AssumeRole + Effect: Allow + Principal: + Service: + - eks.amazonaws.com + Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + RoleName: eks-controlplane.cluster-api-provider-aws.sigs.k8s.io + Type: AWS::IAM::Role AWSIAMRoleNodes: Properties: AssumeRolePolicyDocument: @@ -297,6 +402,9 @@ Resources: Service: - ec2.amazonaws.com Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy RoleName: nodes.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role AWSIAMUserBootstrapper: diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml index 63bfc6214c..378bd628c9 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml @@ -250,6 +250,96 @@ Resources: Effect: Allow Resource: - arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/* + - Action: + - ssm:GetParameter + Effect: Allow + Resource: + - arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/* + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-nodegroup.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-fargate.amazonaws.com + Effect: Allow + Resource: + - arn:aws:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate + - Action: + - iam:GetRole + - iam:ListAttachedRolePolicies + Effect: Allow + Resource: + - arn:*:iam::*:role/* + - Action: + - iam:GetPolicy + Effect: Allow + Resource: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + - Action: + - eks:DescribeCluster + - eks:ListClusters + - eks:CreateCluster + - eks:TagResource + - eks:UpdateClusterVersion + - eks:DeleteCluster + - eks:UpdateClusterConfig + - eks:UntagResource + - eks:UpdateNodegroupVersion + - eks:DescribeNodegroup + - eks:DeleteNodegroup + - eks:UpdateNodegroupConfig + - eks:CreateNodegroup + - eks:AssociateEncryptionConfig + Effect: Allow + Resource: + - arn:*:eks:*:*:cluster/* + - arn:*:eks:*:*:nodegroup/*/*/* + - Action: + - eks:ListAddons + - eks:CreateAddon + - eks:DescribeAddonVersions + - eks:DescribeAddon + - eks:DeleteAddon + - eks:UpdateAddon + - eks:TagResource + - eks:DescribeFargateProfile + - eks:CreateFargateProfile + - eks:DeleteFargateProfile + Effect: Allow + Resource: + - '*' + - Action: + - iam:PassRole + Condition: + StringEquals: + iam:PassedToService: eks.amazonaws.com + Effect: Allow + Resource: + - '*' + - Action: + - kms:CreateGrant + - kms:DescribeKey + Condition: + ForAnyValue:StringLike: + kms:ResourceAliases: alias/cluster-api-provider-aws-* + Effect: Allow + Resource: + - '*' Version: 2012-10-17 Roles: - Ref: AWSIAMRoleControllers @@ -281,6 +371,21 @@ Resources: Version: 2012-10-17 RoleName: controllers.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role + AWSIAMRoleEKSControlPlane: + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: + - sts:AssumeRole + Effect: Allow + Principal: + Service: + - eks.amazonaws.com + Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + RoleName: eks-controlplane.cluster-api-provider-aws.sigs.k8s.io + Type: AWS::IAM::Role AWSIAMRoleNodes: Properties: AssumeRolePolicyDocument: @@ -292,5 +397,8 @@ Resources: Service: - ec2.amazonaws.com Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy RoleName: nodes.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_enable.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml similarity index 77% rename from cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_enable.yaml rename to cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml index db0f45af48..27fda19e3b 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_enable.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml @@ -250,96 +250,6 @@ Resources: Effect: Allow Resource: - arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/* - - Action: - - ssm:GetParameter - Effect: Allow - Resource: - - arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/* - - Action: - - iam:CreateServiceLinkedRole - Condition: - StringLike: - iam:AWSServiceName: eks.amazonaws.com - Effect: Allow - Resource: - - arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS - - Action: - - iam:CreateServiceLinkedRole - Condition: - StringLike: - iam:AWSServiceName: eks-nodegroup.amazonaws.com - Effect: Allow - Resource: - - arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup - - Action: - - iam:CreateServiceLinkedRole - Condition: - StringLike: - iam:AWSServiceName: eks-fargate.amazonaws.com - Effect: Allow - Resource: - - arn:aws:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate - - Action: - - iam:GetRole - - iam:ListAttachedRolePolicies - Effect: Allow - Resource: - - arn:*:iam::*:role/* - - Action: - - iam:GetPolicy - Effect: Allow - Resource: - - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy - - Action: - - eks:DescribeCluster - - eks:ListClusters - - eks:CreateCluster - - eks:TagResource - - eks:UpdateClusterVersion - - eks:DeleteCluster - - eks:UpdateClusterConfig - - eks:UntagResource - - eks:UpdateNodegroupVersion - - eks:DescribeNodegroup - - eks:DeleteNodegroup - - eks:UpdateNodegroupConfig - - eks:CreateNodegroup - - eks:AssociateEncryptionConfig - Effect: Allow - Resource: - - arn:*:eks:*:*:cluster/* - - arn:*:eks:*:*:nodegroup/*/*/* - - Action: - - eks:ListAddons - - eks:CreateAddon - - eks:DescribeAddonVersions - - eks:DescribeAddon - - eks:DeleteAddon - - eks:UpdateAddon - - eks:TagResource - - eks:DescribeFargateProfile - - eks:CreateFargateProfile - - eks:DeleteFargateProfile - Effect: Allow - Resource: - - '*' - - Action: - - iam:PassRole - Condition: - StringEquals: - iam:PassedToService: eks.amazonaws.com - Effect: Allow - Resource: - - '*' - - Action: - - kms:CreateGrant - - kms:DescribeKey - Condition: - ForAnyValue:StringLike: - kms:ResourceAliases: alias/cluster-api-provider-aws-* - Effect: Allow - Resource: - - '*' Version: 2012-10-17 Roles: - Ref: AWSIAMRoleControllers @@ -382,9 +292,5 @@ Resources: Service: - ec2.amazonaws.com Version: 2012-10-17 - ManagedPolicyArns: - - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy - - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy - - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly RoleName: nodes.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml index 2657d650f6..749a9d23c9 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml @@ -371,6 +371,21 @@ Resources: Version: 2012-10-17 RoleName: controllers.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role + AWSIAMRoleEKSControlPlane: + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: + - sts:AssumeRole + Effect: Allow + Principal: + Service: + - eks.amazonaws.com + Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + RoleName: eks-controlplane.cluster-api-provider-aws.sigs.k8s.io + Type: AWS::IAM::Role AWSIAMRoleNodes: Properties: AssumeRolePolicyDocument: diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml index 23986d43c0..d0d38042de 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml @@ -255,6 +255,96 @@ Resources: Effect: Allow Resource: - arn:*:secretsmanager:*:*:secret:aws.cluster.x-k8s.io/* + - Action: + - ssm:GetParameter + Effect: Allow + Resource: + - arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/* + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-nodegroup.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-fargate.amazonaws.com + Effect: Allow + Resource: + - arn:aws:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate + - Action: + - iam:GetRole + - iam:ListAttachedRolePolicies + Effect: Allow + Resource: + - arn:*:iam::*:role/* + - Action: + - iam:GetPolicy + Effect: Allow + Resource: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + - Action: + - eks:DescribeCluster + - eks:ListClusters + - eks:CreateCluster + - eks:TagResource + - eks:UpdateClusterVersion + - eks:DeleteCluster + - eks:UpdateClusterConfig + - eks:UntagResource + - eks:UpdateNodegroupVersion + - eks:DescribeNodegroup + - eks:DeleteNodegroup + - eks:UpdateNodegroupConfig + - eks:CreateNodegroup + - eks:AssociateEncryptionConfig + Effect: Allow + Resource: + - arn:*:eks:*:*:cluster/* + - arn:*:eks:*:*:nodegroup/*/*/* + - Action: + - eks:ListAddons + - eks:CreateAddon + - eks:DescribeAddonVersions + - eks:DescribeAddon + - eks:DeleteAddon + - eks:UpdateAddon + - eks:TagResource + - eks:DescribeFargateProfile + - eks:CreateFargateProfile + - eks:DeleteFargateProfile + Effect: Allow + Resource: + - '*' + - Action: + - iam:PassRole + Condition: + StringEquals: + iam:PassedToService: eks.amazonaws.com + Effect: Allow + Resource: + - '*' + - Action: + - kms:CreateGrant + - kms:DescribeKey + Condition: + ForAnyValue:StringLike: + kms:ResourceAliases: alias/cluster-api-provider-aws-* + Effect: Allow + Resource: + - '*' Version: 2012-10-17 Roles: - Ref: AWSIAMRoleControllers @@ -306,6 +396,21 @@ Resources: PolicyName: cluster-api-provider-aws-sigs-k8s-io RoleName: controllers.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role + AWSIAMRoleEKSControlPlane: + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: + - sts:AssumeRole + Effect: Allow + Principal: + Service: + - eks.amazonaws.com + Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + RoleName: eks-controlplane.cluster-api-provider-aws.sigs.k8s.io + Type: AWS::IAM::Role AWSIAMRoleNodes: Properties: AssumeRolePolicyDocument: @@ -317,6 +422,9 @@ Resources: Service: - ec2.amazonaws.com Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy Policies: - PolicyDocument: Statement: diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml index 573bf41673..7a10384c38 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml @@ -250,6 +250,96 @@ Resources: Effect: Allow Resource: - arn:*:ssm:*:*:parameter/cluster.x-k8s.io/* + - Action: + - ssm:GetParameter + Effect: Allow + Resource: + - arn:*:ssm:*:*:parameter/aws/service/eks/optimized-ami/* + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks.amazonaws.com/AWSServiceRoleForAmazonEKS + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-nodegroup.amazonaws.com + Effect: Allow + Resource: + - arn:*:iam::*:role/aws-service-role/eks-nodegroup.amazonaws.com/AWSServiceRoleForAmazonEKSNodegroup + - Action: + - iam:CreateServiceLinkedRole + Condition: + StringLike: + iam:AWSServiceName: eks-fargate.amazonaws.com + Effect: Allow + Resource: + - arn:aws:iam::*:role/aws-service-role/eks-fargate-pods.amazonaws.com/AWSServiceRoleForAmazonEKSForFargate + - Action: + - iam:GetRole + - iam:ListAttachedRolePolicies + Effect: Allow + Resource: + - arn:*:iam::*:role/* + - Action: + - iam:GetPolicy + Effect: Allow + Resource: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + - Action: + - eks:DescribeCluster + - eks:ListClusters + - eks:CreateCluster + - eks:TagResource + - eks:UpdateClusterVersion + - eks:DeleteCluster + - eks:UpdateClusterConfig + - eks:UntagResource + - eks:UpdateNodegroupVersion + - eks:DescribeNodegroup + - eks:DeleteNodegroup + - eks:UpdateNodegroupConfig + - eks:CreateNodegroup + - eks:AssociateEncryptionConfig + Effect: Allow + Resource: + - arn:*:eks:*:*:cluster/* + - arn:*:eks:*:*:nodegroup/*/*/* + - Action: + - eks:ListAddons + - eks:CreateAddon + - eks:DescribeAddonVersions + - eks:DescribeAddon + - eks:DeleteAddon + - eks:UpdateAddon + - eks:TagResource + - eks:DescribeFargateProfile + - eks:CreateFargateProfile + - eks:DeleteFargateProfile + Effect: Allow + Resource: + - '*' + - Action: + - iam:PassRole + Condition: + StringEquals: + iam:PassedToService: eks.amazonaws.com + Effect: Allow + Resource: + - '*' + - Action: + - kms:CreateGrant + - kms:DescribeKey + Condition: + ForAnyValue:StringLike: + kms:ResourceAliases: alias/cluster-api-provider-aws-* + Effect: Allow + Resource: + - '*' Version: 2012-10-17 Roles: - Ref: AWSIAMRoleControllers @@ -281,6 +371,21 @@ Resources: Version: 2012-10-17 RoleName: controllers.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role + AWSIAMRoleEKSControlPlane: + Properties: + AssumeRolePolicyDocument: + Statement: + - Action: + - sts:AssumeRole + Effect: Allow + Principal: + Service: + - eks.amazonaws.com + Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy + RoleName: eks-controlplane.cluster-api-provider-aws.sigs.k8s.io + Type: AWS::IAM::Role AWSIAMRoleNodes: Properties: AssumeRolePolicyDocument: @@ -292,5 +397,8 @@ Resources: Service: - ec2.amazonaws.com Version: 2012-10-17 + ManagedPolicyArns: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy RoleName: nodes.cluster-api-provider-aws.sigs.k8s.io Type: AWS::IAM::Role diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/template.go b/cmd/clusterawsadm/cloudformation/bootstrap/template.go index 739b9f947d..c357eedee6 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/template.go +++ b/cmd/clusterawsadm/cloudformation/bootstrap/template.go @@ -167,7 +167,7 @@ func (t Template) RenderCloudFormation() *cloudformation.Template { }, } - if !t.Spec.EKS.DefaultControlPlaneRole.Disable { + if !t.Spec.EKS.DefaultControlPlaneRole.Disable && !t.Spec.EKS.Disable { template.Resources[AWSIAMRoleEKSControlPlane] = &cfn_iam.Role{ RoleName: ekscontrolplanev1.DefaultEKSControlPlaneRole, AssumeRolePolicyDocument: AssumeRolePolicy(v1alpha4.PrincipalService, []string{"eks.amazonaws.com"}), @@ -176,7 +176,7 @@ func (t Template) RenderCloudFormation() *cloudformation.Template { } } - if !t.Spec.EKS.ManagedMachinePool.Disable { + if !t.Spec.EKS.ManagedMachinePool.Disable && !t.Spec.EKS.Disable { template.Resources[AWSIAMRoleEKSNodegroup] = &cfn_iam.Role{ RoleName: infrav1exp.DefaultEKSNodegroupRole, AssumeRolePolicyDocument: AssumeRolePolicy(v1alpha4.PrincipalService, []string{"ec2.amazonaws.com", "eks.amazonaws.com"}), @@ -185,7 +185,7 @@ func (t Template) RenderCloudFormation() *cloudformation.Template { } } - if !t.Spec.EKS.Fargate.Disable { + if !t.Spec.EKS.Fargate.Disable && !t.Spec.EKS.Disable { template.Resources[AWSIAMRoleEKSFargate] = &cfn_iam.Role{ RoleName: infrav1exp.DefaultEKSFargateRole, AssumeRolePolicyDocument: AssumeRolePolicy(v1alpha4.PrincipalService, []string{eksiam.EKSFargateService}), diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go b/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go index 27abfc33bc..6b92481bb6 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go +++ b/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go @@ -94,20 +94,10 @@ func Test_RenderCloudformation(t *testing.T) { return t }, }, - { - fixture: "with_eks_enable", - template: func() Template { - t := NewTemplate() - t.Spec.EKS.Enable = true - t.Spec.Nodes.EC2ContainerRegistryReadOnly = true - return t - }, - }, { fixture: "with_eks_default_roles", template: func() Template { t := NewTemplate() - t.Spec.EKS.Enable = true t.Spec.Nodes.EC2ContainerRegistryReadOnly = true t.Spec.EKS.DefaultControlPlaneRole.Disable = false t.Spec.EKS.ManagedMachinePool.Disable = false @@ -119,7 +109,6 @@ func Test_RenderCloudformation(t *testing.T) { fixture: "with_eks_kms_prefix", template: func() Template { t := NewTemplate() - t.Spec.EKS.Enable = true t.Spec.Nodes.EC2ContainerRegistryReadOnly = true t.Spec.EKS.KMSAliasPrefix = "custom-prefix-*" return t @@ -161,6 +150,14 @@ func Test_RenderCloudformation(t *testing.T) { return t }, }, + { + fixture: "with_eks_disable", + template: func() Template { + t := NewTemplate() + t.Spec.EKS.Disable = true + return t + }, + }, } for _, c := range cases { diff --git a/bootstrap/eks/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml similarity index 100% rename from bootstrap/eks/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml rename to config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml diff --git a/bootstrap/eks/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml b/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml similarity index 100% rename from bootstrap/eks/config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml rename to config/crd/bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml diff --git a/controlplane/eks/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml similarity index 100% rename from controlplane/eks/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml rename to config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml index 983b48527a..65ef97312e 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml @@ -104,93 +104,6 @@ spec: type: object type: object served: true - storage: false - subresources: - status: {} - - additionalPrinterColumns: - - description: Cluster to which this AWSManagedControl belongs - jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name - name: Cluster - type: string - - description: Control plane infrastructure is ready for worker nodes - jsonPath: .status.ready - name: Ready - type: string - - description: AWS VPC the control plane is using - jsonPath: .spec.network.vpc.id - name: VPC - type: string - - description: API Endpoint - jsonPath: .spec.controlPlaneEndpoint.host - name: Endpoint - priority: 1 - type: string - name: v1alpha4 - schema: - openAPIV3Schema: - description: AWSManagedCluster is the Schema for the awsmanagedclusters API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: AWSManagedClusterSpec defines the desired state of AWSManagedCluster - properties: - controlPlaneEndpoint: - description: ControlPlaneEndpoint represents the endpoint used to - communicate with the control plane. - properties: - host: - description: The hostname on which the API server is serving. - type: string - port: - description: The port on which the API server is serving. - format: int32 - type: integer - required: - - host - - port - type: object - type: object - status: - description: AWSManagedClusterStatus defines the observed state of AWSManagedCluster - properties: - failureDomains: - additionalProperties: - description: FailureDomainSpec is the Schema for Cluster API failure - domains. It allows controllers to understand how many failure - domains a cluster can optionally span across. - properties: - attributes: - additionalProperties: - type: string - description: Attributes is a free form map of attributes an - infrastructure provider might use or require. - type: object - controlPlane: - description: ControlPlane determines if this failure domain - is suitable for use by control plane machines. - type: boolean - type: object - description: FailureDomains specifies a list fo available availability - zones that can be used - type: object - ready: - description: Ready is when the AWSManagedControlPlane has a API server - URL. - type: boolean - type: object - type: object - served: true storage: true subresources: status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index dbf2dd3b0c..527b925026 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -10,13 +10,15 @@ resources: - bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml - bases/infrastructure.cluster.x-k8s.io_awsfargateprofiles.yaml - bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml -- bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml - bases/infrastructure.cluster.x-k8s.io_awsmachinepools.yaml - bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml - bases/infrastructure.cluster.x-k8s.io_awsclusterroleidentities.yaml - bases/infrastructure.cluster.x-k8s.io_awsclusterstaticidentities.yaml - bases/infrastructure.cluster.x-k8s.io_awsclustercontrolleridentities.yaml - bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml +- bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +- bases/bootstrap.cluster.x-k8s.io_eksconfigs.yaml +- bases/bootstrap.cluster.x-k8s.io_eksconfigtemplates.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -28,6 +30,9 @@ patchesStrategicMerge: - patches/webhook_in_awsclustercontrolleridentities.yaml - patches/webhook_in_awsclusterroleidentities.yaml - patches/webhook_in_awsclustertemplates.yaml +- patches/webhook_in_awsmanagedcontrolplanes.yaml +- patches/webhook_in_eksconfigs.yaml +- patches/webhook_in_eksconfigtemplates.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. @@ -38,6 +43,9 @@ patchesStrategicMerge: - patches/cainjection_in_awsclustercontrolleridentities.yaml - patches/cainjection_in_awsclusterroleidentities.yaml - patches/cainjection_in_awsclustertemplates.yaml +- patches/cainjection_in_awsmanagedcontrolplanes.yaml +- patches/cainjection_in_eksconfigs.yaml +- patches/cainjection_in_eksconfigtemplates.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # [LABEL] To enable label, uncomment all the sections with [LABEL] prefix. diff --git a/controlplane/eks/config/crd/patches/cainjection_in_awsmanagedcontrolplanes.yaml b/config/crd/patches/cainjection_in_awsmanagedcontrolplanes.yaml similarity index 100% rename from controlplane/eks/config/crd/patches/cainjection_in_awsmanagedcontrolplanes.yaml rename to config/crd/patches/cainjection_in_awsmanagedcontrolplanes.yaml diff --git a/bootstrap/eks/config/crd/patches/cainjection_in_eksconfigs.yaml b/config/crd/patches/cainjection_in_eksconfigs.yaml similarity index 100% rename from bootstrap/eks/config/crd/patches/cainjection_in_eksconfigs.yaml rename to config/crd/patches/cainjection_in_eksconfigs.yaml diff --git a/bootstrap/eks/config/crd/patches/cainjection_in_eksconfigtemplates.yaml b/config/crd/patches/cainjection_in_eksconfigtemplates.yaml similarity index 100% rename from bootstrap/eks/config/crd/patches/cainjection_in_eksconfigtemplates.yaml rename to config/crd/patches/cainjection_in_eksconfigtemplates.yaml diff --git a/controlplane/eks/config/crd/patches/webhook_in_awsmanagedcontrolplanes.yaml b/config/crd/patches/webhook_in_awsmanagedcontrolplanes.yaml similarity index 100% rename from controlplane/eks/config/crd/patches/webhook_in_awsmanagedcontrolplanes.yaml rename to config/crd/patches/webhook_in_awsmanagedcontrolplanes.yaml diff --git a/bootstrap/eks/config/crd/patches/webhook_in_eksconfigs.yaml b/config/crd/patches/webhook_in_eksconfigs.yaml similarity index 100% rename from bootstrap/eks/config/crd/patches/webhook_in_eksconfigs.yaml rename to config/crd/patches/webhook_in_eksconfigs.yaml diff --git a/bootstrap/eks/config/crd/patches/webhook_in_eksconfigtemplates.yaml b/config/crd/patches/webhook_in_eksconfigtemplates.yaml similarity index 100% rename from bootstrap/eks/config/crd/patches/webhook_in_eksconfigtemplates.yaml rename to config/crd/patches/webhook_in_eksconfigtemplates.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 54454a7121..59f3635bdd 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -20,7 +20,7 @@ spec: - args: - "--metrics-bind-addr=127.0.0.1:8080" - "--leader-elect" - - "--feature-gates=EKS=${EXP_EKS:=false},EKSEnableIAM=${EXP_EKS_IAM:=false},MachinePool=${EXP_MACHINE_POOL:=false},EventBridgeInstanceState=${EVENT_BRIDGE_INSTANCE_STATE:=false},AutoControllerIdentityCreator=${AUTO_CONTROLLER_IDENTITY_CREATOR:=true}" + - "--feature-gates=EKS=${CAPA_EKS:=true},EKSEnableIAM=${CAPA_EKS_IAM:=false},EKSAllowAddRoles=${CAPA_EKS_ADD_ROLES:=false},EKSFargate=${EXP_EKS_FARGATE:=false},MachinePool=${EXP_MACHINE_POOL:=false},EventBridgeInstanceState=${EVENT_BRIDGE_INSTANCE_STATE:=false},AutoControllerIdentityCreator=${AUTO_CONTROLLER_IDENTITY_CREATOR:=true}" image: controller:latest imagePullPolicy: Always name: manager diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index b81ce3a832..4d4043d854 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -37,6 +37,26 @@ rules: - patch - update - watch +- apiGroups: + - bootstrap.cluster.x-k8s.io + resources: + - eksconfigs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - bootstrap.cluster.x-k8s.io + resources: + - eksconfigs/status + verbs: + - get + - patch + - update - apiGroups: - cluster.x-k8s.io resources: @@ -46,6 +66,24 @@ rules: - get - list - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - clusters + - machinepools + - machines + verbs: + - get + - list + - watch +- apiGroups: + - cluster.x-k8s.io + resources: + - machinepools + verbs: + - get + - list + - watch - apiGroups: - cluster.x-k8s.io resources: @@ -64,6 +102,18 @@ rules: - get - list - watch +- apiGroups: + - controlplane.cluster.x-k8s.io + resources: + - awsmanagedcontrolplanes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - controlplane.cluster.x-k8s.io resources: @@ -73,6 +123,14 @@ rules: - get - list - watch +- apiGroups: + - controlplane.cluster.x-k8s.io + resources: + - awsmanagedcontrolplanes/status + verbs: + - get + - patch + - update - apiGroups: - "" resources: @@ -92,6 +150,16 @@ rules: - get - list - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsclustercontrolleridentities + - awsclusterroleidentities + - awsclusterstaticidentities + verbs: + - get + - list + - watch - apiGroups: - infrastructure.cluster.x-k8s.io resources: @@ -153,6 +221,15 @@ rules: - patch - update - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmachinepools + - awsmachinepools/status + verbs: + - get + - list + - watch - apiGroups: - infrastructure.cluster.x-k8s.io resources: @@ -176,42 +253,40 @@ rules: - apiGroups: - infrastructure.cluster.x-k8s.io resources: + - awsmachines - awsmachines/status verbs: - get - - patch - - update + - list + - watch - apiGroups: - infrastructure.cluster.x-k8s.io resources: - - awsmanagedclusters + - awsmachines/status verbs: - - create - - delete - get - - list - patch - update - - watch - apiGroups: - infrastructure.cluster.x-k8s.io resources: - - awsmanagedclusters/status + - awsmanagedmachinepools verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - infrastructure.cluster.x-k8s.io resources: - awsmanagedmachinepools + - awsmanagedmachinepools/status verbs: - - create - - delete - get - list - - patch - - update - watch - apiGroups: - infrastructure.cluster.x-k8s.io diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 0fe8111f10..2fe853da89 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -179,10 +179,10 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-infrastructure-cluster-x-k8s-io-v1alpha4-awsmanagedcluster + path: /mutate-infrastructure-cluster-x-k8s-io-v1alpha4-awsmanagedmachinepool failurePolicy: Fail matchPolicy: Equivalent - name: default.awsmanagedcluster.infrastructure.cluster.x-k8s.io + name: default.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io @@ -192,7 +192,7 @@ webhooks: - CREATE - UPDATE resources: - - awsmanagedclusters + - awsmanagedmachinepools sideEffects: None - admissionReviewVersions: - v1beta1 @@ -200,20 +200,62 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-infrastructure-cluster-x-k8s-io-v1alpha4-awsmanagedmachinepool + path: /mutate-controlplane-cluster-x-k8s-io-v1alpha4-awsmanagedcontrolplane failurePolicy: Fail matchPolicy: Equivalent - name: default.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io + name: default.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io rules: - apiGroups: - - infrastructure.cluster.x-k8s.io + - controlplane.cluster.x-k8s.io apiVersions: - v1alpha4 operations: - CREATE - UPDATE resources: - - awsmanagedmachinepools + - awsmanagedcontrolplanes + sideEffects: None +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-bootstrap-cluster-x-k8s-io-v1alpha4-eksconfig + failurePolicy: Fail + matchPolicy: Equivalent + name: default.eksconfigs.bootstrap.cluster.x-k8s.io + rules: + - apiGroups: + - bootstrap.cluster.x-k8s.io + apiVersions: + - v1alpha4 + operations: + - CREATE + - UPDATE + resources: + - eksconfig + sideEffects: None +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-bootstrap-cluster-x-k8s-io-v1alpha4-eksconfigtemplate + failurePolicy: Fail + matchPolicy: Equivalent + name: default.eksconfigtemplates.bootstrap.cluster.x-k8s.io + rules: + - apiGroups: + - bootstrap.cluster.x-k8s.io + apiVersions: + - v1alpha4 + operations: + - CREATE + - UPDATE + resources: + - eksconfigtemplate sideEffects: None --- @@ -418,10 +460,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1alpha4-awsmanagedcluster + path: /validate-infrastructure-cluster-x-k8s-io-v1alpha4-awsmanagedmachinepool failurePolicy: Fail matchPolicy: Equivalent - name: validation.awsmanagedcluster.infrastructure.cluster.x-k8s.io + name: validation.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io @@ -431,7 +473,7 @@ webhooks: - CREATE - UPDATE resources: - - awsmanagedclusters + - awsmanagedmachinepools sideEffects: None - admissionReviewVersions: - v1beta1 @@ -439,18 +481,60 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1alpha4-awsmanagedmachinepool + path: /validate-controlplane-cluster-x-k8s-io-v1alpha4-awsmanagedcontrolplane failurePolicy: Fail matchPolicy: Equivalent - name: validation.awsmanagedmachinepool.infrastructure.cluster.x-k8s.io + name: validation.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io rules: - apiGroups: - - infrastructure.cluster.x-k8s.io + - controlplane.cluster.x-k8s.io apiVersions: - v1alpha4 operations: - CREATE - UPDATE resources: - - awsmanagedmachinepools + - awsmanagedcontrolplanes + sideEffects: None +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-bootstrap-cluster-x-k8s-io-v1alpha4-eksconfig + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.eksconfigs.bootstrap.cluster.x-k8s.io + rules: + - apiGroups: + - bootstrap.cluster.x-k8s.io + apiVersions: + - v1alpha4 + operations: + - CREATE + - UPDATE + resources: + - eksconfig + sideEffects: None +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-bootstrap-cluster-x-k8s-io-v1alpha4-eksconfigtemplate + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.eksconfigtemplates.bootstrap.cluster.x-k8s.io + rules: + - apiGroups: + - bootstrap.cluster.x-k8s.io + apiVersions: + - v1alpha4 + operations: + - CREATE + - UPDATE + resources: + - eksconfigtemplate sideEffects: None diff --git a/controlplane/eks/api/v1alpha4/suite_test.go b/controlplane/eks/api/v1alpha4/suite_test.go index 82c11b5716..06fc483659 100644 --- a/controlplane/eks/api/v1alpha4/suite_test.go +++ b/controlplane/eks/api/v1alpha4/suite_test.go @@ -43,9 +43,9 @@ func TestMain(m *testing.M) { func setup() { utilruntime.Must(AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ - path.Join("controlplane", "eks", "config", "crd", "bases"), + path.Join("config", "crd", "bases"), }, - ).WithWebhookConfiguration("managed", path.Join("controlplane", "eks", "config", "webhook", "manifests.yaml")) + ).WithWebhookConfiguration("managed", path.Join("config", "webhook", "manifests.yaml")) var err error testEnv, err = testEnvConfig.Build() if err != nil { diff --git a/controlplane/eks/config/certmanager/certificate.yaml b/controlplane/eks/config/certmanager/certificate.yaml deleted file mode 100644 index d3924656c9..0000000000 --- a/controlplane/eks/config/certmanager/certificate.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# The following manifests contain a self-signed issuer CR and a certificate CR. -# More document can be found at https://docs.cert-manager.io -# WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for -# breaking changes -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: selfsigned-issuer - namespace: system -spec: - selfSigned: {} ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml - namespace: system -spec: - # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize - dnsNames: - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc - - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local - issuerRef: - kind: Issuer - name: selfsigned-issuer - secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/controlplane/eks/config/certmanager/kustomization.yaml b/controlplane/eks/config/certmanager/kustomization.yaml deleted file mode 100644 index bebea5a595..0000000000 --- a/controlplane/eks/config/certmanager/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -resources: -- certificate.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/controlplane/eks/config/certmanager/kustomizeconfig.yaml b/controlplane/eks/config/certmanager/kustomizeconfig.yaml deleted file mode 100644 index d6109c9d88..0000000000 --- a/controlplane/eks/config/certmanager/kustomizeconfig.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# This configuration is for teaching kustomize how to update name ref and var substitution -nameReference: -- kind: Issuer - group: cert-manager.io - fieldSpecs: - - kind: Certificate - group: cert-manager.io - path: spec/issuerRef/name - -varReference: -- kind: Certificate - group: cert-manager.io - path: spec/commonName -- kind: Certificate - group: cert-manager.io - path: spec/dnsNames -- kind: Certificate - group: cert-manager.io - path: spec/secretName diff --git a/controlplane/eks/config/crd/kustomization.yaml b/controlplane/eks/config/crd/kustomization.yaml deleted file mode 100644 index b922cb3cc4..0000000000 --- a/controlplane/eks/config/crd/kustomization.yaml +++ /dev/null @@ -1,25 +0,0 @@ -commonLabels: - cluster.x-k8s.io/v1alpha3: v1alpha3 - cluster.x-k8s.io/v1alpha4: v1alpha4 - -# This kustomization.yaml is not intended to be run by itself, -# since it depends on service name and namespace that are out of this kustomize package. -# It should be run by config/default -resources: -- bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml -# +kubebuilder:scaffold:crdkustomizeresource - -patchesStrategicMerge: -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. -# patches here are for enabling the conversion webhook for each CRD -- patches/webhook_in_awsmanagedcontrolplanes.yaml -# +kubebuilder:scaffold:crdkustomizewebhookpatch - -# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. -# patches here are for enabling the CA injection for each CRD -- patches/cainjection_in_awsmanagedcontrolplanes.yaml -# +kubebuilder:scaffold:crdkustomizecainjectionpatch - -# the following config is for teaching kustomize how to do kustomization for CRDs. -configurations: -- kustomizeconfig.yaml diff --git a/controlplane/eks/config/crd/kustomizeconfig.yaml b/controlplane/eks/config/crd/kustomizeconfig.yaml deleted file mode 100644 index 8e2d8d6b17..0000000000 --- a/controlplane/eks/config/crd/kustomizeconfig.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# This file is for teaching kustomize how to substitute name and namespace reference in CRD -nameReference: -- kind: Service - version: v1 - fieldSpecs: - - kind: CustomResourceDefinition - group: apiextensions.k8s.io - path: spec/conversion/webhook/clientConfig/service/name - -namespace: -- kind: CustomResourceDefinition - group: apiextensions.k8s.io - path: spec/conversion/webhook/clientConfig/service/namespace - create: false - -varReference: -- path: metadata/annotations diff --git a/controlplane/eks/config/default/credentials.yaml b/controlplane/eks/config/default/credentials.yaml deleted file mode 100644 index aeff2b5337..0000000000 --- a/controlplane/eks/config/default/credentials.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: controlplane-manager-bootstrap-credentials - namespace: system -type: Opaque -data: - credentials: ${AWS_B64ENCODED_CREDENTIALS} \ No newline at end of file diff --git a/controlplane/eks/config/default/kustomization.yaml b/controlplane/eks/config/default/kustomization.yaml deleted file mode 100644 index e9a5662b2d..0000000000 --- a/controlplane/eks/config/default/kustomization.yaml +++ /dev/null @@ -1,58 +0,0 @@ -namespace: capa-eks-control-plane-system -namePrefix: capa-eks-control-plane- - -commonLabels: - cluster.x-k8s.io/provider: "control-plane-eks" - -resources: -- namespace.yaml -- credentials.yaml - -bases: -- ../rbac -- ../manager -- ../crd -- ../certmanager -- ../webhook - -patchesStrategicMerge: -- manager_credentials_patch.yaml -- manager_service_account_patch.yaml -- manager_iam_patch.yaml -- manager_auth_proxy_patch.yaml -- manager_image_patch.yaml -- manager_pull_policy.yaml -- manager_webhook_patch.yaml -- webhookcainjection_patch.yaml - -configurations: - - kustomizeconfig.yaml - -vars: - # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. - - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR - objref: - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert # this name should match the one in certificate.yaml - fieldref: - fieldpath: metadata.namespace - - name: CERTIFICATE_NAME - objref: - kind: Certificate - group: cert-manager.io - version: v1 - name: serving-cert # this name should match the one in certificate.yaml - - name: SERVICE_NAMESPACE # namespace of the service - objref: - kind: Service - version: v1 - name: webhook-service - fieldref: - fieldpath: metadata.namespace - - name: SERVICE_NAME - objref: - kind: Service - version: v1 - name: webhook-service diff --git a/controlplane/eks/config/default/kustomizeconfig.yaml b/controlplane/eks/config/default/kustomizeconfig.yaml deleted file mode 100644 index 524d39cc2b..0000000000 --- a/controlplane/eks/config/default/kustomizeconfig.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# This configuration is for teaching kustomize how to update name ref and var substitution -varReference: - - kind: Deployment - path: spec/template/spec/volumes/secret/secretName diff --git a/controlplane/eks/config/default/manager_auth_proxy_patch.yaml b/controlplane/eks/config/default/manager_auth_proxy_patch.yaml deleted file mode 100644 index 955b3577d7..0000000000 --- a/controlplane/eks/config/default/manager_auth_proxy_patch.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the controller manager, -# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: kube-rbac-proxy - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=10" - ports: - - containerPort: 8443 - name: http \ No newline at end of file diff --git a/controlplane/eks/config/default/manager_credentials_patch.yaml b/controlplane/eks/config/default/manager_credentials_patch.yaml deleted file mode 100644 index 36f626ac2f..0000000000 --- a/controlplane/eks/config/default/manager_credentials_patch.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - env: - - name: AWS_SHARED_CREDENTIALS_FILE - value: /home/.aws/credentials - volumeMounts: - - name: credentials - mountPath: /home/.aws - volumes: - - name: credentials - secret: - secretName: controlplane-manager-bootstrap-credentials diff --git a/controlplane/eks/config/default/manager_iam_patch.yaml b/controlplane/eks/config/default/manager_iam_patch.yaml deleted file mode 100644 index 071af144bd..0000000000 --- a/controlplane/eks/config/default/manager_iam_patch.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# This patch injects annotations to run using KIAM / kube2iam -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - metadata: - annotations: - iam.amazonaws.com/role: ${AWS_CONTROLLER_IAM_ROLE:=""} diff --git a/controlplane/eks/config/default/manager_image_patch.yaml b/controlplane/eks/config/default/manager_image_patch.yaml deleted file mode 100644 index 9f9f4e3a0f..0000000000 --- a/controlplane/eks/config/default/manager_image_patch.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - image: gcr.io/k8s-staging-cluster-api-aws/eks-controlplane-controller:latest - name: manager \ No newline at end of file diff --git a/controlplane/eks/config/default/manager_pull_policy.yaml b/controlplane/eks/config/default/manager_pull_policy.yaml deleted file mode 100644 index f115cf9e80..0000000000 --- a/controlplane/eks/config/default/manager_pull_policy.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - imagePullPolicy: Always \ No newline at end of file diff --git a/controlplane/eks/config/default/manager_service_account_patch.yaml b/controlplane/eks/config/default/manager_service_account_patch.yaml deleted file mode 100644 index 5892631f46..0000000000 --- a/controlplane/eks/config/default/manager_service_account_patch.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - serviceAccountName: controller-manager - securityContext: - fsGroup: 1000 diff --git a/controlplane/eks/config/default/manager_webhook_patch.yaml b/controlplane/eks/config/default/manager_webhook_patch.yaml deleted file mode 100644 index f346436f63..0000000000 --- a/controlplane/eks/config/default/manager_webhook_patch.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: $(SERVICE_NAME)-cert \ No newline at end of file diff --git a/controlplane/eks/config/default/namespace.yaml b/controlplane/eks/config/default/namespace.yaml deleted file mode 100644 index 550ec6d6ec..0000000000 --- a/controlplane/eks/config/default/namespace.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: system \ No newline at end of file diff --git a/controlplane/eks/config/default/webhookcainjection_patch.yaml b/controlplane/eks/config/default/webhookcainjection_patch.yaml deleted file mode 100644 index 1a16db503c..0000000000 --- a/controlplane/eks/config/default/webhookcainjection_patch.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - name: mutating-webhook-configuration - annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: validating-webhook-configuration - annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) \ No newline at end of file diff --git a/controlplane/eks/config/manager/kustomization.yaml b/controlplane/eks/config/manager/kustomization.yaml deleted file mode 100644 index 5c5f0b84cb..0000000000 --- a/controlplane/eks/config/manager/kustomization.yaml +++ /dev/null @@ -1,2 +0,0 @@ -resources: -- manager.yaml diff --git a/controlplane/eks/config/manager/manager.yaml b/controlplane/eks/config/manager/manager.yaml deleted file mode 100644 index a59055b473..0000000000 --- a/controlplane/eks/config/manager/manager.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system - labels: - control-plane: controller-manager -spec: - selector: - matchLabels: - control-plane: controller-manager - replicas: 1 - template: - metadata: - labels: - control-plane: controller-manager - spec: - containers: - - command: - - /manager - args: - - --leader-elect - - "--metrics-bind-addr=127.0.0.1:8080" - image: controller:latest - name: manager - terminationGracePeriodSeconds: 10 - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - affinity: - nodeAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 10 - preference: - matchExpressions: - - key: ${K8S_CP_LABEL:=node-role.kubernetes.io/control-plane} - operator: Exists - # remove once usage of node-role.kubernetes.io/master is removed from Kubernetes - - weight: 10 - preference: - matchExpressions: - - key: node-role.kubernetes.io/master - operator: Exists diff --git a/controlplane/eks/config/manager/manager_args_patch.yaml b/controlplane/eks/config/manager/manager_args_patch.yaml deleted file mode 100644 index 9141c61828..0000000000 --- a/controlplane/eks/config/manager/manager_args_patch.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# This patch inject a sidecar container which is a HTTP proxy for the controller manager, -# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - args: - - "--metrics-bind-addr=127.0.0.1:8080" - - "--leader-elect" - - "--feature-gates=EKSEnableIAM=${EXP_EKS_IAM:=false},EKSAllowAddRoles=${EXP_EKS_ADD_ROLES:=false},MachinePool=${EXP_MACHINE_POOL:=false}" - - "--v=${EKS_CP_LOGLEVEL:=4}" diff --git a/controlplane/eks/config/rbac/auth_proxy_role.yaml b/controlplane/eks/config/rbac/auth_proxy_role.yaml deleted file mode 100644 index 618f5e4177..0000000000 --- a/controlplane/eks/config/rbac/auth_proxy_role.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: proxy-role -rules: -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: ["create"] -- apiGroups: ["authorization.k8s.io"] - resources: - - subjectaccessreviews - verbs: ["create"] diff --git a/controlplane/eks/config/rbac/auth_proxy_role_binding.yaml b/controlplane/eks/config/rbac/auth_proxy_role_binding.yaml deleted file mode 100644 index ec7acc0a1b..0000000000 --- a/controlplane/eks/config/rbac/auth_proxy_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: proxy-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system diff --git a/controlplane/eks/config/rbac/auth_proxy_service.yaml b/controlplane/eks/config/rbac/auth_proxy_service.yaml deleted file mode 100644 index 6cf656be14..0000000000 --- a/controlplane/eks/config/rbac/auth_proxy_service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - name: controller-manager-metrics-service - namespace: system -spec: - ports: - - name: https - port: 8443 - targetPort: https - selector: - control-plane: controller-manager diff --git a/controlplane/eks/config/rbac/awsmanagedcontrolplane_editor_role.yaml b/controlplane/eks/config/rbac/awsmanagedcontrolplane_editor_role.yaml deleted file mode 100644 index 68933d04d3..0000000000 --- a/controlplane/eks/config/rbac/awsmanagedcontrolplane_editor_role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# permissions for end users to edit awsmanagedcontrolplanes. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: awsmanagedcontrolplane-editor-role -rules: -- apiGroups: - - controlplane.cluster.x-k8s.io - resources: - - awsmanagedcontrolplanes - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - controlplane.cluster.x-k8s.io - resources: - - awsmanagedcontrolplanes/status - verbs: - - get diff --git a/controlplane/eks/config/rbac/awsmanagedcontrolplane_viewer_role.yaml b/controlplane/eks/config/rbac/awsmanagedcontrolplane_viewer_role.yaml deleted file mode 100644 index e52bcc3a69..0000000000 --- a/controlplane/eks/config/rbac/awsmanagedcontrolplane_viewer_role.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# permissions for end users to view awsmanagedcontrolplanes. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: awsmanagedcontrolplane-viewer-role -rules: -- apiGroups: - - controlplane.cluster.x-k8s.io - resources: - - awsmanagedcontrolplanes - verbs: - - get - - list - - watch -- apiGroups: - - controlplane.cluster.x-k8s.io - resources: - - awsmanagedcontrolplanes/status - verbs: - - get diff --git a/controlplane/eks/config/rbac/kustomization.yaml b/controlplane/eks/config/rbac/kustomization.yaml deleted file mode 100644 index 12892ff8ca..0000000000 --- a/controlplane/eks/config/rbac/kustomization.yaml +++ /dev/null @@ -1,12 +0,0 @@ -resources: -- role.yaml -- role_binding.yaml -- serviceaccount.yaml -- leader_election_role.yaml -- leader_election_role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -- auth_proxy_service.yaml -- auth_proxy_role.yaml -- auth_proxy_role_binding.yaml diff --git a/controlplane/eks/config/rbac/leader_election_role.yaml b/controlplane/eks/config/rbac/leader_election_role.yaml deleted file mode 100644 index cf511f9f7a..0000000000 --- a/controlplane/eks/config/rbac/leader_election_role.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# permissions to do leader election. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: leader-elect-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - configmaps/status - verbs: - - get - - update - - patch -- apiGroups: - - "" - resources: - - events - verbs: - - create -- apiGroups: - - "coordination.k8s.io" - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete diff --git a/controlplane/eks/config/rbac/leader_election_role_binding.yaml b/controlplane/eks/config/rbac/leader_election_role_binding.yaml deleted file mode 100644 index e77b3e19b4..0000000000 --- a/controlplane/eks/config/rbac/leader_election_role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: leader-elect-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: leader-elect-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system diff --git a/controlplane/eks/config/rbac/role.yaml b/controlplane/eks/config/rbac/role.yaml deleted file mode 100644 index 221d504d54..0000000000 --- a/controlplane/eks/config/rbac/role.yaml +++ /dev/null @@ -1,113 +0,0 @@ - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: manager-role -rules: -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - cluster.x-k8s.io - resources: - - clusters - - clusters/status - verbs: - - get - - list - - watch -- apiGroups: - - controlplane.cluster.x-k8s.io - resources: - - awsmanagedcontrolplanes - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - controlplane.cluster.x-k8s.io - resources: - - awsmanagedcontrolplanes/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - get - - list - - patch - - watch -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - awsclustercontrolleridentities - - awsclusterroleidentities - - awsclusterstaticidentities - verbs: - - get - - list - - watch -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - awsmachinepools - - awsmachinepools/status - verbs: - - get - - list - - watch -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - awsmachines - - awsmachines/status - verbs: - - get - - list - - watch -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - awsmanagedclusters - - awsmanagedclusters/status - verbs: - - get - - list - - watch -- apiGroups: - - infrastructure.cluster.x-k8s.io - resources: - - awsmanagedmachinepools - - awsmanagedmachinepools/status - verbs: - - get - - list - - watch diff --git a/controlplane/eks/config/rbac/role_binding.yaml b/controlplane/eks/config/rbac/role_binding.yaml deleted file mode 100644 index 2070ede446..0000000000 --- a/controlplane/eks/config/rbac/role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: manager-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system diff --git a/controlplane/eks/config/rbac/serviceaccount.yaml b/controlplane/eks/config/rbac/serviceaccount.yaml deleted file mode 100644 index 263e4e3b92..0000000000 --- a/controlplane/eks/config/rbac/serviceaccount.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: controller-manager - namespace: system - labels: - control-plane: controller-manager - annotations: - ${AWS_CONTROLLER_IAM_ROLE/#arn/eks.amazonaws.com/role-arn: arn} diff --git a/controlplane/eks/config/webhook/kustomization.yaml b/controlplane/eks/config/webhook/kustomization.yaml deleted file mode 100644 index 9cf26134e4..0000000000 --- a/controlplane/eks/config/webhook/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -resources: -- manifests.yaml -- service.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/controlplane/eks/config/webhook/kustomizeconfig.yaml b/controlplane/eks/config/webhook/kustomizeconfig.yaml deleted file mode 100644 index 25e21e3c96..0000000000 --- a/controlplane/eks/config/webhook/kustomizeconfig.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# the following config is for teaching kustomize where to look at when substituting vars. -# It requires kustomize v2.1.0 or newer to work properly. -nameReference: -- kind: Service - version: v1 - fieldSpecs: - - kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - - kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - -namespace: -- kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true -- kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true - -varReference: -- path: metadata/annotations diff --git a/controlplane/eks/config/webhook/manifests.yaml b/controlplane/eks/config/webhook/manifests.yaml deleted file mode 100644 index 628956a198..0000000000 --- a/controlplane/eks/config/webhook/manifests.yaml +++ /dev/null @@ -1,58 +0,0 @@ - ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - creationTimestamp: null - name: mutating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /mutate-controlplane-cluster-x-k8s-io-v1alpha4-awsmanagedcontrolplane - failurePolicy: Fail - matchPolicy: Equivalent - name: default.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io - rules: - - apiGroups: - - controlplane.cluster.x-k8s.io - apiVersions: - - v1alpha4 - operations: - - CREATE - - UPDATE - resources: - - awsmanagedcontrolplanes - sideEffects: None - ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - creationTimestamp: null - name: validating-webhook-configuration -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /validate-controlplane-cluster-x-k8s-io-v1alpha4-awsmanagedcontrolplane - failurePolicy: Fail - matchPolicy: Equivalent - name: validation.awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io - rules: - - apiGroups: - - controlplane.cluster.x-k8s.io - apiVersions: - - v1alpha4 - operations: - - CREATE - - UPDATE - resources: - - awsmanagedcontrolplanes - sideEffects: None diff --git a/controlplane/eks/config/webhook/service.yaml b/controlplane/eks/config/webhook/service.yaml deleted file mode 100644 index 9bc95014fd..0000000000 --- a/controlplane/eks/config/webhook/service.yaml +++ /dev/null @@ -1,10 +0,0 @@ - -apiVersion: v1 -kind: Service -metadata: - name: webhook-service - namespace: system -spec: - ports: - - port: 443 - targetPort: webhook-server diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go index 3dc1402eb0..db4032119f 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go @@ -21,22 +21,10 @@ import ( "fmt" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4" - controlplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha4" - infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-aws/feature" - "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope" - "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/awsnode" - "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2" - "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks" - "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/iamauth" - "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/network" - "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/securitygroup" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" @@ -49,6 +37,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4" + controlplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha4" + infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-aws/feature" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/awsnode" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/iamauth" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/network" + "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/securitygroup" ) const ( @@ -77,10 +77,6 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, For(awsManagedControlPlane). WithOptions(options). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)). - Watches( - &source.Kind{Type: &infrav1exp.AWSManagedCluster{}}, - handler.EnqueueRequestsFromMapFunc(r.managedClusterToManagedControlPlane(log)), - ). Build(r) if err != nil { @@ -102,7 +98,6 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete;patch // +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch -// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters;awsmanagedclusters/status,verbs=get;list;watch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines;awsmachines/status,verbs=get;list;watch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedmachinepools;awsmanagedmachinepools/status,verbs=get;list;watch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools;awsmachinepools/status,verbs=get;list;watch @@ -254,7 +249,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, func (r *AWSManagedControlPlaneReconciler) reconcileDelete(ctx context.Context, managedScope *scope.ManagedControlPlaneScope) (_ ctrl.Result, reterr error) { log := ctrl.LoggerFrom(ctx) - managedScope.Info("Reconciling AWSManagedClusterPlane delete") + managedScope.Info("Reconciling AWSManagedControlPlane delete") controlPlane := managedScope.ControlPlane @@ -319,48 +314,6 @@ func (r *AWSManagedControlPlaneReconciler) ClusterToAWSManagedControlPlane(o cli return nil } -func (r *AWSManagedControlPlaneReconciler) managedClusterToManagedControlPlane(log logr.Logger) handler.MapFunc { - return func(o client.Object) []ctrl.Request { - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() - - awsManagedCluster, ok := o.(*infrav1exp.AWSManagedCluster) - if !ok { - panic(fmt.Sprintf("Expected a AWSManagedCluster but got a %T", o)) - } - - if !awsManagedCluster.ObjectMeta.DeletionTimestamp.IsZero() { - log.V(4).Info("AWSManagedCluster has a deletion timestamp, skipping mapping") - return nil - } - - cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedCluster.ObjectMeta) - if err != nil { - log.Error(err, "failed to get owning cluster") - return nil - } - if cluster == nil { - log.V(4).Info("Owning cluster not set on AWSManagedCluster, skipping mapping") - return nil - } - - controlPlaneRef := cluster.Spec.ControlPlaneRef - if controlPlaneRef == nil || controlPlaneRef.Kind != "AWSManagedControlPlane" { - log.V(4).Info("ControlPlaneRef is nil or not AWSManagedControlPlane, skipping mapping") - return nil - } - - return []ctrl.Request{ - { - NamespacedName: types.NamespacedName{ - Name: controlPlaneRef.Name, - Namespace: controlPlaneRef.Namespace, - }, - }, - } - } -} - func (r *AWSManagedControlPlaneReconciler) dependencyCount(ctx context.Context, managedScope *scope.ManagedControlPlaneScope) (int, error) { log := ctrl.LoggerFrom(ctx) diff --git a/controlplane/eks/main.go b/controlplane/eks/main.go deleted file mode 100644 index a28fa51480..0000000000 --- a/controlplane/eks/main.go +++ /dev/null @@ -1,244 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "context" - "errors" - "flag" - "fmt" - "math/rand" - "net/http" - "os" - "time" - - "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - cgrecord "k8s.io/client-go/tools/record" - "k8s.io/klog/v2" - "k8s.io/klog/v2/klogr" - infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" - infrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4" - controlplanev1alpha3 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha3" - controlplanev1alpha4 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/controllers" - expinfrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" - expinfrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-aws/feature" - "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/endpoints" - "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope" - "sigs.k8s.io/cluster-api-provider-aws/pkg/record" - "sigs.k8s.io/cluster-api-provider-aws/version" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/healthz" - // +kubebuilder:scaffold:imports -) - -var ( - scheme = runtime.NewScheme() - setupLog = ctrl.Log.WithName("setup") -) - -func init() { - _ = clientgoscheme.AddToScheme(scheme) - _ = controlplanev1alpha3.AddToScheme(scheme) - _ = controlplanev1alpha4.AddToScheme(scheme) - _ = infrav1alpha3.AddToScheme(scheme) - _ = infrav1alpha4.AddToScheme(scheme) - _ = expinfrav1alpha3.AddToScheme(scheme) - _ = expinfrav1alpha4.AddToScheme(scheme) - _ = clusterv1.AddToScheme(scheme) - // +kubebuilder:scaffold:scheme -} - -var ( - metricsBindAddr string - enableLeaderElection bool - watchNamespace string - watchFilterValue string - profilerAddress string - eksControlPlaneConcurrency int - syncPeriod time.Duration - webhookPort int - webhookCertDir string - healthAddr string - serviceEndpoints string - - maxEKSSyncPeriod = time.Minute * 10 - errMaxSyncPeriodExceeded = errors.New("sync period greater than maximum allowed") - errEKSInvalidFlags = errors.New("invalid EKS flag combination") -) - -// InitFlags initializes this manager's flags. -func InitFlags(fs *pflag.FlagSet) { - fs.StringVar(&metricsBindAddr, "metrics-bind-addr", ":8080", - "The address the metric endpoint binds to.") - - fs.BoolVar(&enableLeaderElection, "leader-elect", false, - "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") - - fs.StringVar(&watchNamespace, "namespace", "", - "Namespace that the controller watches to reconcile objects. If unspecified, the controller watches for objects across all namespaces.") - - fs.StringVar(&profilerAddress, "profiler-address", "", - "Bind address to expose the pprof profiler (e.g. localhost:6060)") - - fs.IntVar(&eksControlPlaneConcurrency, "ekscontrolplane-concurrency", 10, - "Number of EKS control planes to process simultaneously") - - fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, - "The minimum interval at which watched resources are reconciled (e.g. 15m)") - - fs.IntVar(&webhookPort, "webhook-port", 9443, - "Webhook Server port, disabled by default. When enabled, the manager will only work as webhook server, no reconcilers are installed.") - - fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", - "Webhook cert dir, only used when webhook-port is specified.") - - fs.StringVar(&serviceEndpoints, "service-endpoints", "", - "Set custom AWS service endpoins in semi-colon separated format: ${SigningRegion1}:${ServiceID1}=${URL},${ServiceID2}=${URL};${SigningRegion2}...") - - fs.StringVar( - &watchFilterValue, - "watch-filter", - "", - fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel), - ) - - feature.MutableGates.AddFlag(fs) -} - -func main() { - klog.InitFlags(nil) - - rand.Seed(time.Now().UnixNano()) - InitFlags(pflag.CommandLine) - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - pflag.Parse() - - ctrl.SetLogger(klogr.New()) - - if watchNamespace != "" { - setupLog.Info("Watching cluster-api objects only in namespace for reconciliation", "namespace", watchNamespace) - } - - if profilerAddress != "" { - klog.Infof("Profiler listening for requests at %s", profilerAddress) - go func() { - klog.Info(http.ListenAndServe(profilerAddress, nil)) - }() - } - - if syncPeriod > maxEKSSyncPeriod { - setupLog.Error(errMaxSyncPeriodExceeded, "sync period exceeded maximum allowed when using EKS", "max-sync-period", maxEKSSyncPeriod) - os.Exit(1) - } - - // Parse service endpoints. - AWSServiceEndpoints, err := endpoints.ParseFlag(serviceEndpoints) - if err != nil { - setupLog.Error(err, "unable to parse service endpoints", "controller", "AWSCluster") - os.Exit(1) - } - - enableIAM := feature.Gates.Enabled(feature.EKSEnableIAM) - allowAddRoles := feature.Gates.Enabled(feature.EKSAllowAddRoles) - setupLog.Info("EKS IAM role creation", "enabled", enableIAM) - setupLog.Info("EKS IAM additional roles", "enabled", allowAddRoles) - if allowAddRoles && !enableIAM { - setupLog.Error(errEKSInvalidFlags, "cannot use EKSAllowAddRoles flag without EKSEnableIAM") - os.Exit(1) - } - - // Machine and cluster operations can create enough events to trigger the event recorder spam filter - // Setting the burst size higher ensures all events will be recorded and submitted to the API - broadcaster := cgrecord.NewBroadcasterWithCorrelatorOptions(cgrecord.CorrelatorOptions{ - BurstSize: 100, - }) - - restConfig := ctrl.GetConfigOrDie() - restConfig.UserAgent = "cluster-api-provider-aws-controller" - mgr, err := ctrl.NewManager(restConfig, ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsBindAddr, - LeaderElection: enableLeaderElection, - LeaderElectionID: "eks-controlplane-manager-leader-elect-capa", - SyncPeriod: &syncPeriod, - Namespace: watchNamespace, - EventBroadcaster: broadcaster, - CertDir: webhookCertDir, - Port: webhookPort, - HealthProbeBindAddress: healthAddr, - }) - if err != nil { - setupLog.Error(err, "unable to start manager") - os.Exit(1) - } - - // Initialize event recorder. - record.InitFromRecorder(mgr.GetEventRecorderFor("aws-controller")) - - setupLog.V(1).Info(fmt.Sprintf("%+v\n", feature.Gates)) - ctx := ctrl.SetupSignalHandler() - setupReconcilers(ctx, mgr, enableIAM, allowAddRoles, AWSServiceEndpoints) - setupWebhooks(mgr) - - // +kubebuilder:scaffold:builder - - if err := mgr.AddReadyzCheck("ping", healthz.Ping); err != nil { - setupLog.Error(err, "unable to create ready check") - os.Exit(1) - } - - if err := mgr.AddHealthzCheck("ping", healthz.Ping); err != nil { - setupLog.Error(err, "unable to create health check") - os.Exit(1) - } - - setupLog.Info("starting manager", "version", version.Get().String()) - if err := mgr.Start(ctx); err != nil { - setupLog.Error(err, "problem running manager") - os.Exit(1) - } -} - -func setupReconcilers(ctx context.Context, mgr ctrl.Manager, enableIAM bool, allowAddRoles bool, serviceEndpoints []scope.ServiceEndpoint) { - if err := (&controllers.AWSManagedControlPlaneReconciler{ - Client: mgr.GetClient(), - EnableIAM: enableIAM, - AllowAdditionalRoles: allowAddRoles, - Endpoints: serviceEndpoints, - WatchFilterValue: watchFilterValue, - }).SetupWithManager(ctx, mgr, concurrency(eksControlPlaneConcurrency)); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "AWSManagedControlPlane") - os.Exit(1) - } -} - -func setupWebhooks(mgr ctrl.Manager) { - if err := (&controlplanev1alpha4.AWSManagedControlPlane{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "AWSManagedControlPlane") - os.Exit(1) - } -} - -func concurrency(c int) controller.Options { - return controller.Options{MaxConcurrentReconciles: c} -} diff --git a/docs/book/src/development/development.md b/docs/book/src/development/development.md index 91a3e15d5d..169887af43 100644 --- a/docs/book/src/development/development.md +++ b/docs/book/src/development/development.md @@ -1,7 +1,9 @@ # Developer Guide ## Initial setup for development environment + ### Install prerequisites + 1. Install [go][go] - Get the latest patch version for go v1.16. 2. Install [jq][jq] @@ -15,12 +17,13 @@ - `choco install kustomize` on Windows. - [install instructions][kustomizelinux] on Linux 5. Install [envsubst][envsubst] -7. Install make. -8. Install direnv +6. Install make. +7. Install direnv - `brew install direnv` on macOS. ### Get the source -Fork cluster-api-provider-aws repo: https://github.com/kubernetes-sigs/cluster-api-provider-aws + +Fork the [cluster-api-provider-aws repo](https://github.com/kubernetes-sigs/cluster-api-provider-aws): ```bash cd "$(go env GOPATH)" @@ -32,23 +35,23 @@ git remote add upstream git@github.com:kubernetes-sigs/cluster-api-provider-aws. git fetch upstream ``` -### Build clusterawsadm +### Build clusterawsadm -Build `clusterawsadm` in `cluster-api-provider-aws` +Build `clusterawsadm` in `cluster-api-provider-aws`: ```bash -$ cd "$(go env GOPATH)"/src/sigs.k8s.io/cluster-api-provider-aws -$ make clusterawsadm -$ mv ./bin/clusterawsadm /usr/local/bin/clusterawsadm +cd "$(go env GOPATH)"/src/sigs.k8s.io/cluster-api-provider-aws +make clusterawsadm +mv ./bin/clusterawsadm /usr/local/bin/clusterawsadm ``` -### Setup AWS Environment -**For cluster-api-provider-aws managed clusters** +### Setup AWS Environment Create bootstrap file and bootstrap IAM roles and policies using `clusterawsadm` ```bash $ cat config-bootstrap.yaml + apiVersion: bootstrap.aws.infrastructure.cluster.x-k8s.io/v1alpha1 kind: AWSIAMConfiguration spec: @@ -59,9 +62,9 @@ $ clusterawsadm bootstrap iam create-cloudformation-stack Attempting to create AWS CloudFormation stack cluster-api-provider-aws-sigs-k8s-io ``` -**For EKS clusters** +#### Customizing the bootstrap permission -Create IAM Resources that will be needed for bootstrapping EKS +The IAM permissions can be customized by using a configuration file with **clusterawsadm**. For example, to create the default IAM role for use with managed machine pools: ```bash $ cat config-bootstrap.yaml @@ -71,22 +74,23 @@ spec: bootstrapUser: enable: true eks: - enable: true iamRoleCreation: false # Set to true if you plan to use the EKSEnableIAM feature flag to enable automatic creation of IAM roles - defaultControlPlaneRole: - disable: false # Set to false to enable creation of the default control plane role managedMachinePool: disable: false # Set to false to enable creation of the default node role for managed machine pools ``` -create IAM Resources that will be needed for bootstrapping EKS +Use the configuration file to create the additional IAM role: ```bash $ ./bin/clusterawsadm bootstrap iam create-cloudformation-stack --config=config-bootstrap.yaml Attempting to create AWS CloudFormation stack cluster-api-provider-aws-sigs-k8s-io ``` -This will create cloudformation stack for those IAM resources +> If you don't plan on using EKS then see the [documentation on disabling EKS support](../topics/eks/disabling.md). + +#### Sample Output + +When creating the CloudFormation stack using **clusterawsadm** you will see output similar to this: ```bash Following resources are in the stack: @@ -112,7 +116,6 @@ AWS::IAM::User |bootstrapper.cluster-api-provider-aws.sigs.k8s.io - Create a security credentials in the `bootstrapper.cluster-api-provider-aws.sigs.k8s.io` IAM user that is created by cloud-formation stack and copy the `AWS_ACCESS_KEY_ID` and `AWS_SECRETS_ACCESS_KEY`. (Or use admin user credentials instead) - - Set AWS_B64ENCODED_CREDENTIALS environment variable ```bash @@ -122,7 +125,6 @@ AWS::IAM::User |bootstrapper.cluster-api-provider-aws.sigs.k8s.io export AWS_B64ENCODED_CREDENTIALS=$(clusterawsadm bootstrap credentials encode-as-profile) ``` - ## Running local management cluster for development Before the next steps, make sure [initial setup for development environment][Initial-setup-for-development-environment] steps are complete. @@ -142,6 +144,7 @@ Many of the Cluster API engineers use it for quick iteration. Please see our [Ti ### Option 2: The Old-fashioned way Running cluster-api and cluster-api-provider-aws controllers in a kind cluster: + 1. Create a local kind cluster - `kind create cluster` 2. Install core cluster-api controllers (the version must match the cluster-api version in [go.mod][go.mod]) @@ -152,7 +155,7 @@ Running cluster-api and cluster-api-provider-aws controllers in a kind cluster: - `RELEASE_TAG="e2e" make release-manifests` 5. Apply the manifests - `kubectl apply -f ./out/infrastructure.yaml` - + [go]: https://golang.org/doc/install [jq]: https://stedolan.github.io/jq/download/ [go.mod]: https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/master/go.mod diff --git a/docs/book/src/topics/eks/disabling.md b/docs/book/src/topics/eks/disabling.md new file mode 100644 index 0000000000..5d197ca49b --- /dev/null +++ b/docs/book/src/topics/eks/disabling.md @@ -0,0 +1,30 @@ +# Disabling EKS Support + +Support for EKS is enabled by default when you use the AWS infrastructure provider. But if you never plan to use EKS then you can disable the EKS support. The following sections describe the process. + +## Disabling IAM objects for EKS + +To ensure that there are no IAM objects created for EKS you will need to use a configuration file with `clusterawsadm` and specify that EKS is disabled: + +```yaml +apiVersion: bootstrap.aws.infrastructure.cluster.x-k8s.io/v1alpha1 +kind: AWSIAMConfiguration +spec: + eks: + disable: true +``` + +and then use that configuration file: + +```bash +clusterawsadm bootstrap iam create-cloudformation-stack --config bootstrap-config.yaml +``` + +## Disable EKS in the provider + +Disbling EKS support is done via the **EKS** feature flag by setting it to false. This can be done before running `clusterctl init` by using the **CAPA_EKS** environment variable: + +```shell +export CAPA_EKS=false +clusterctl init --infrastructure aws +``` diff --git a/docs/book/src/topics/eks/enabling.md b/docs/book/src/topics/eks/enabling.md index 7efffaac8e..88e058b6b2 100644 --- a/docs/book/src/topics/eks/enabling.md +++ b/docs/book/src/topics/eks/enabling.md @@ -1,32 +1,58 @@ # Enabling EKS Support -You must explicitly enable the EKS support in the provider by doing the following: +Support for EKS is enabled by default when you use the AWS infrastructure provider. For example: -- Enabling support in the infrastructure manager (capa-controller-manager) by enabling the **EKS** feature flags (see below) -- Add the EKS Control Plane Provider (aws-eks) -- Add the EKS Bootstrap Provider (aws-eks) +```shell +clusterctl init --infrastructure aws +``` + +## Enabling optional **EKS** features + +There are additional EKS experimental features that are disabled by default. The sections below cover how to enable these features. + +### Machine Pools + +To enable support for machine pools the **MachinePool** feature flag must be set to to **true**. This can be done using the **EXP_MACHINE_POOL** environment variable: + +```shell +export EXP_MACHINE_POOL=true +clusterctl init --infrastructure aws +``` -## Enabling the **EKS** features +See the [machine pool documentation](../machinepools.md) for further information. -Enabling the **EKS** functionality is done using the following feature flags: +NOTE: you will need to enable the creation of the default IAM role. The easiest way is using `clusterawsadm`, for instructions see the [prerequisites](../using-clusterawsadm-to-fulfill-prerequisites.md). -- **EKS** - this enables the core EKS functionality and is required for the other EKS feature flags -- **EKSEnableIAM** - by enabling this the controllers will create any IAM roles required by EKS and the roles will be cluster specific. If this isn't enabled then you can manually create a role and specify the role name in the AWSManagedControlPlane spec otherwise the default rolename will be used. -- **EKSAllowAddRoles** - by enabling this you can add additional roles to the control plane role that is created. This has no affect unless used wtih __EKSEnableIAM__ +### IAM Roles Per Cluster -Enabling the feature flags can be done using `clusterctl` by setting the following environment variables to **true** (they all default to **false**): +By default EKS clusters will use the same IAM roles (i.e. control plane, node group roles). There is a feature that allows each cluster to have its own IAM roles. This is done by enabling the **EKSEnableIAM** feature flag. This can be done before running `clusterctl init` by using the the **CAPA_EKS_IAM** environment variable: + +```shell +export CAPA_EKS_IAM=true +clusterctl init --infrastructure aws +``` + +NOTE: you will need the correct prerequisities for this. The easiest way is using `clusterawsadm` and setting `iamRoleCreation` to true, for instructions see the [prerequisites](../using-clusterawsadm-to-fulfill-prerequisites.md). + +### Additional Control Plane Roles + +You can add additional roles to the control plane role that is created for an EKS cluster. To use this you must enable the **EKSAllowAddRoles** feature flag. This can be done before running `clusterctl init` by using the **CAPA_EKS_ADD_ROLES** environment variable: + +```shell +export CAPA_EKS_IAM=true +export CAPA_EKS_ADD_ROLES=true +clusterctl init --infrastructure aws +``` -- **EXP_EKS** - this is used to set the value of the **EKS** feature flag -- **EXP_EKS_IAM** - this is used to set the value of the **EKSEnableIAM** feature flag -- **EXP_EKS_ADD_ROLES** - this is used to set the value of the **EKSAllowAddRoles** feature flag +NOTE: to use this feature you must also enable the **CAPA_EKS_IAM** feature. -As an example: +### EKS Fargate Profiles -```bash -export EXP_EKS=true -export EXP_EKS_IAM=true -export EXP_EKS_ADD_ROLES=true +You can use Fargate Profiles with EKS. To use this you must enable the **EKSFargate** feature flag. This can be done before running `clusterctl init` by using the **EXP_EKS_FARGATE** environmnet variable: -clusterctl init --infrastructure=aws --control-plane aws-eks --bootstrap aws-eks +```shell +export EXP_EKS_FARGATE=true +clusterctl init --infrastructure aws ``` +NOTE: you will need to enable the creation of the default Fargate IAM role. The easiest way is using `clusterawsadm` and using the `fargate` configuration option, for instructions see the [prerequisites](../using-clusterawsadm-to-fulfill-prerequisites.md). diff --git a/docs/book/src/topics/eks/index.md b/docs/book/src/topics/eks/index.md index c3e5b233d0..9312cc4eaa 100644 --- a/docs/book/src/topics/eks/index.md +++ b/docs/book/src/topics/eks/index.md @@ -1,21 +1,25 @@ # EKS Support in the AWS Provider -- **Feature status:** Experimental +- **Feature status:** Stable - **Feature gate (required):** EKS=true - **Feature gate (optional):** EKSEnableIAM=true,EKSAllowAddRoles=true ## Overview -Experimental support for EKS has been introduced in the AWS provider. Currently the following features are supported: +The AWS provider supports creating EKS based cluster. Currently the following features are supported: - Provisioning/managing an Amazon EKS Cluster - Upgrading the Kubernetes version of the EKS Cluster - Attaching a self-managed machines as nodes to the EKS cluster -- Creating a machine pool and attaching it to the EKS cluster. See [machine pool docs for details](../machinepools.md) +- Creating a machine pool and attaching it to the EKS cluster. See [machine pool docs for details](../machinepools.md). - Creating a managed machine pool and attaching it to the EKS cluster. See [machine pool docs for details](../machinepools.md) - Managing "EKS Addons". See [addons for further details](./addons.md) +- Creating an EKS fargate profile +- Managing aws-iam-authenticator configuration -The implementation introduces new CRD kinds: +Note: machine pools and fargate profiles are still classed as experimental. + +The implementation introduces the following CRD kinds: - AWSManagedControlPlane - specifies the EKS Cluster in AWS and used by the Cluster API AWS Managed Control plane (MACP) - AWSManagedMachinePool - defines the managed node pool for the cluster @@ -23,11 +27,11 @@ The implementation introduces new CRD kinds: And a number of new templates are available in the templates folder for creating a managed workload cluster. - ## SEE ALSO * [Prerequisites](prerequisites.md) * [Enabling EKS Support](enabling.md) +* [Disabling EKS Support](disabling.md) * [Creating a cluster](creating-a-cluster.md) * [Using EKS Console](eks-console.md) * [Using EKS Addons](addons.md) diff --git a/docs/book/src/topics/eks/prerequisites.md b/docs/book/src/topics/eks/prerequisites.md index 6f7ecea117..c00520210f 100644 --- a/docs/book/src/topics/eks/prerequisites.md +++ b/docs/book/src/topics/eks/prerequisites.md @@ -2,6 +2,6 @@ To use EKS you must give the controller the required permissions. The easiest way to do this is by using `clusterawasadm`. For instructions on how to do this see the [prerequisites](../using-clusterawsadm-to-fulfill-prerequisites.md). -When using `clusterawsadm` and enabling EKS support a new IAM role will be created for you called **eks-controlplane.cluster-api-provider-aws.sigs.k8s.io**. This role is the IAM role that will be used for the EKS control plane if you don't specify your own role and if **EKSEnableIAM** isn't enabled. +When using `clusterawsadm` and enabling EKS support a new IAM role will be created for you called **eks-controlplane.cluster-api-provider-aws.sigs.k8s.io**. This role is the IAM role that will be used for the EKS control plane if you don't specify your own role and if **EKSEnableIAM** isn't enabled (see the [enabling docs](enabling.md) for further information). Additionally using `clusterawsadm` will add permissions to the **controllers.cluster-api-provider-aws.sigs.k8s.io** policy for EKS to function properly. diff --git a/docs/book/src/topics/using-clusterawsadm-to-fulfill-prerequisites.md b/docs/book/src/topics/using-clusterawsadm-to-fulfill-prerequisites.md index 0273bdf119..9b039c0c01 100644 --- a/docs/book/src/topics/using-clusterawsadm-to-fulfill-prerequisites.md +++ b/docs/book/src/topics/using-clusterawsadm-to-fulfill-prerequisites.md @@ -71,19 +71,18 @@ These will be added to the control plane and node roles respectively when they a #### With EKS Support -If you want to use the the EKS support in the provider then you will need to enable these features via the configuration file. For example: +The pre-requisities for EKS are enabled by default. However, if you want to use some of the optional features of EKS (see [here](eks/enabling.md) for more information on what these are) then you will need to enable these features via the configuration file. For example: ```yaml apiVersion: bootstrap.aws.infrastructure.cluster.x-k8s.io/v1alpha1 kind: AWSIAMConfiguration spec: eks: - enable: true iamRoleCreation: false # Set to true if you plan to use the EKSEnableIAM feature flag to enable automatic creation of IAM roles - defaultControlPlaneRole: - disable: false # Set to false to enable creation of the default control plane role managedMachinePool: disable: false # Set to false to enable creation of the default node role for managed machine pools + fargate: + disable: false # Set to false to enable creation of the default role for the fargate profiles ``` and then use that configuration file: diff --git a/exp/api/v1alpha3/conversion.go b/exp/api/v1alpha3/conversion.go index 459187090e..b1c2530d67 100644 --- a/exp/api/v1alpha3/conversion.go +++ b/exp/api/v1alpha3/conversion.go @@ -121,34 +121,6 @@ func (r *AWSManagedMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error { return Convert_v1alpha4_AWSManagedMachinePoolList_To_v1alpha3_AWSManagedMachinePoolList(src, r, nil) } -// ConvertTo converts the v1alpha3 AWSManagedCluster receiver to a v1alpha4 AWSManagedCluster. -func (r *AWSManagedCluster) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1alpha4.AWSManagedCluster) - - return Convert_v1alpha3_AWSManagedCluster_To_v1alpha4_AWSManagedCluster(r, dst, nil) -} - -// ConvertFrom converts the v1alpha4 AWSManagedCluster receiver to a v1alpha3 AWSManagedCluster. -func (r *AWSManagedCluster) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1alpha4.AWSManagedCluster) - - return Convert_v1alpha4_AWSManagedCluster_To_v1alpha3_AWSManagedCluster(src, r, nil) -} - -// ConvertTo converts the v1alpha3 AWSManagedClusterList receiver to a v1alpha4 AWSManagedClusterList. -func (r *AWSManagedClusterList) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1alpha4.AWSManagedClusterList) - - return Convert_v1alpha3_AWSManagedClusterList_To_v1alpha4_AWSManagedClusterList(r, dst, nil) -} - -// ConvertFrom converts the v1alpha4 AWSManagedClusterList receiver to a v1alpha3 AWSManagedClusterList. -func (r *AWSManagedClusterList) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1alpha4.AWSManagedClusterList) - - return Convert_v1alpha4_AWSManagedClusterList_To_v1alpha3_AWSManagedClusterList(src, r, nil) -} - // ConvertTo converts the v1alpha3 AWSFargateProfile receiver to a v1alpha4 AWSFargateProfile. func (r *AWSFargateProfile) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*v1alpha4.AWSFargateProfile) diff --git a/exp/api/v1alpha3/conversion_test.go b/exp/api/v1alpha3/conversion_test.go index 2edc74cad2..e1192611d0 100644 --- a/exp/api/v1alpha3/conversion_test.go +++ b/exp/api/v1alpha3/conversion_test.go @@ -35,12 +35,12 @@ func fuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { } } -func AWSMachinePoolFuzzer(obj *AWSMachinePool, c fuzz.Continue){ +func AWSMachinePoolFuzzer(obj *AWSMachinePool, c fuzz.Continue) { c.FuzzNoCustom(obj) // AWSMachinePool.Spec.AWSLaunchTemplate.AMI.ARN and AWSMachinePool.Spec.AWSLaunchTemplate.AMI.Filters has been removed in v1alpha4, so setting it to nil in order to avoid v1alpha3 --> v1alpha4 --> v1alpha3 round trip errors. - obj.Spec.AWSLaunchTemplate.AMI.ARN= nil - obj.Spec.AWSLaunchTemplate.AMI.Filters= nil + obj.Spec.AWSLaunchTemplate.AMI.ARN = nil + obj.Spec.AWSLaunchTemplate.AMI.Filters = nil } func TestFuzzyConversion(t *testing.T) { @@ -50,9 +50,9 @@ func TestFuzzyConversion(t *testing.T) { g.Expect(v1alpha4.AddToScheme(scheme)).To(Succeed()) t.Run("for AWSMachinePool", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1alpha4.AWSMachinePool{}, - Spoke: &AWSMachinePool{}, + Scheme: scheme, + Hub: &v1alpha4.AWSMachinePool{}, + Spoke: &AWSMachinePool{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) @@ -62,12 +62,6 @@ func TestFuzzyConversion(t *testing.T) { Spoke: &AWSManagedMachinePool{}, })) - t.Run("for AWSManagedCluster", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1alpha4.AWSManagedCluster{}, - Spoke: &AWSManagedCluster{}, - })) - t.Run("for AWSFargateProfile", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ Scheme: scheme, Hub: &v1alpha4.AWSFargateProfile{}, diff --git a/exp/api/v1alpha3/webhook_suite_test.go b/exp/api/v1alpha3/webhook_suite_test.go index 016757b241..e343e768b8 100644 --- a/exp/api/v1alpha3/webhook_suite_test.go +++ b/exp/api/v1alpha3/webhook_suite_test.go @@ -74,9 +74,6 @@ func setup() { if err := (&expv1alpha4.AWSMachinePool{}).SetupWebhookWithManager(testEnv); err != nil { panic(fmt.Sprintf("Unable to setup AWSMachinePool webhook: %v", err)) } - if err := (&expv1alpha4.AWSManagedCluster{}).SetupWebhookWithManager(testEnv); err != nil { - panic(fmt.Sprintf("Unable to setup AWSManagedCluster webhook: %v", err)) - } if err := (&expv1alpha4.AWSManagedMachinePool{}).SetupWebhookWithManager(testEnv); err != nil { panic(fmt.Sprintf("Unable to setup AWSManagedMachinePool webhook: %v", err)) } diff --git a/exp/api/v1alpha3/zz_generated.conversion.go b/exp/api/v1alpha3/zz_generated.conversion.go index a60c11bc36..f1817f3b81 100644 --- a/exp/api/v1alpha3/zz_generated.conversion.go +++ b/exp/api/v1alpha3/zz_generated.conversion.go @@ -120,46 +120,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*AWSManagedCluster)(nil), (*v1alpha4.AWSManagedCluster)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_AWSManagedCluster_To_v1alpha4_AWSManagedCluster(a.(*AWSManagedCluster), b.(*v1alpha4.AWSManagedCluster), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.AWSManagedCluster)(nil), (*AWSManagedCluster)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_AWSManagedCluster_To_v1alpha3_AWSManagedCluster(a.(*v1alpha4.AWSManagedCluster), b.(*AWSManagedCluster), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AWSManagedClusterList)(nil), (*v1alpha4.AWSManagedClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_AWSManagedClusterList_To_v1alpha4_AWSManagedClusterList(a.(*AWSManagedClusterList), b.(*v1alpha4.AWSManagedClusterList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.AWSManagedClusterList)(nil), (*AWSManagedClusterList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_AWSManagedClusterList_To_v1alpha3_AWSManagedClusterList(a.(*v1alpha4.AWSManagedClusterList), b.(*AWSManagedClusterList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AWSManagedClusterSpec)(nil), (*v1alpha4.AWSManagedClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_AWSManagedClusterSpec_To_v1alpha4_AWSManagedClusterSpec(a.(*AWSManagedClusterSpec), b.(*v1alpha4.AWSManagedClusterSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.AWSManagedClusterSpec)(nil), (*AWSManagedClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_AWSManagedClusterSpec_To_v1alpha3_AWSManagedClusterSpec(a.(*v1alpha4.AWSManagedClusterSpec), b.(*AWSManagedClusterSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AWSManagedClusterStatus)(nil), (*v1alpha4.AWSManagedClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_AWSManagedClusterStatus_To_v1alpha4_AWSManagedClusterStatus(a.(*AWSManagedClusterStatus), b.(*v1alpha4.AWSManagedClusterStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha4.AWSManagedClusterStatus)(nil), (*AWSManagedClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_AWSManagedClusterStatus_To_v1alpha3_AWSManagedClusterStatus(a.(*v1alpha4.AWSManagedClusterStatus), b.(*AWSManagedClusterStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePool)(nil), (*v1alpha4.AWSManagedMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_AWSManagedMachinePool_To_v1alpha4_AWSManagedMachinePool(a.(*AWSManagedMachinePool), b.(*v1alpha4.AWSManagedMachinePool), scope) }); err != nil { @@ -703,126 +663,6 @@ func Convert_v1alpha4_AWSMachinePoolStatus_To_v1alpha3_AWSMachinePoolStatus(in * return autoConvert_v1alpha4_AWSMachinePoolStatus_To_v1alpha3_AWSMachinePoolStatus(in, out, s) } -func autoConvert_v1alpha3_AWSManagedCluster_To_v1alpha4_AWSManagedCluster(in *AWSManagedCluster, out *v1alpha4.AWSManagedCluster, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_AWSManagedClusterSpec_To_v1alpha4_AWSManagedClusterSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha3_AWSManagedClusterStatus_To_v1alpha4_AWSManagedClusterStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha3_AWSManagedCluster_To_v1alpha4_AWSManagedCluster is an autogenerated conversion function. -func Convert_v1alpha3_AWSManagedCluster_To_v1alpha4_AWSManagedCluster(in *AWSManagedCluster, out *v1alpha4.AWSManagedCluster, s conversion.Scope) error { - return autoConvert_v1alpha3_AWSManagedCluster_To_v1alpha4_AWSManagedCluster(in, out, s) -} - -func autoConvert_v1alpha4_AWSManagedCluster_To_v1alpha3_AWSManagedCluster(in *v1alpha4.AWSManagedCluster, out *AWSManagedCluster, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha4_AWSManagedClusterSpec_To_v1alpha3_AWSManagedClusterSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha4_AWSManagedClusterStatus_To_v1alpha3_AWSManagedClusterStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_AWSManagedCluster_To_v1alpha3_AWSManagedCluster is an autogenerated conversion function. -func Convert_v1alpha4_AWSManagedCluster_To_v1alpha3_AWSManagedCluster(in *v1alpha4.AWSManagedCluster, out *AWSManagedCluster, s conversion.Scope) error { - return autoConvert_v1alpha4_AWSManagedCluster_To_v1alpha3_AWSManagedCluster(in, out, s) -} - -func autoConvert_v1alpha3_AWSManagedClusterList_To_v1alpha4_AWSManagedClusterList(in *AWSManagedClusterList, out *v1alpha4.AWSManagedClusterList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1alpha4.AWSManagedCluster, len(*in)) - for i := range *in { - if err := Convert_v1alpha3_AWSManagedCluster_To_v1alpha4_AWSManagedCluster(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1alpha3_AWSManagedClusterList_To_v1alpha4_AWSManagedClusterList is an autogenerated conversion function. -func Convert_v1alpha3_AWSManagedClusterList_To_v1alpha4_AWSManagedClusterList(in *AWSManagedClusterList, out *v1alpha4.AWSManagedClusterList, s conversion.Scope) error { - return autoConvert_v1alpha3_AWSManagedClusterList_To_v1alpha4_AWSManagedClusterList(in, out, s) -} - -func autoConvert_v1alpha4_AWSManagedClusterList_To_v1alpha3_AWSManagedClusterList(in *v1alpha4.AWSManagedClusterList, out *AWSManagedClusterList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AWSManagedCluster, len(*in)) - for i := range *in { - if err := Convert_v1alpha4_AWSManagedCluster_To_v1alpha3_AWSManagedCluster(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1alpha4_AWSManagedClusterList_To_v1alpha3_AWSManagedClusterList is an autogenerated conversion function. -func Convert_v1alpha4_AWSManagedClusterList_To_v1alpha3_AWSManagedClusterList(in *v1alpha4.AWSManagedClusterList, out *AWSManagedClusterList, s conversion.Scope) error { - return autoConvert_v1alpha4_AWSManagedClusterList_To_v1alpha3_AWSManagedClusterList(in, out, s) -} - -func autoConvert_v1alpha3_AWSManagedClusterSpec_To_v1alpha4_AWSManagedClusterSpec(in *AWSManagedClusterSpec, out *v1alpha4.AWSManagedClusterSpec, s conversion.Scope) error { - if err := Convert_v1alpha3_APIEndpoint_To_v1alpha4_APIEndpoint(&in.ControlPlaneEndpoint, &out.ControlPlaneEndpoint, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha3_AWSManagedClusterSpec_To_v1alpha4_AWSManagedClusterSpec is an autogenerated conversion function. -func Convert_v1alpha3_AWSManagedClusterSpec_To_v1alpha4_AWSManagedClusterSpec(in *AWSManagedClusterSpec, out *v1alpha4.AWSManagedClusterSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_AWSManagedClusterSpec_To_v1alpha4_AWSManagedClusterSpec(in, out, s) -} - -func autoConvert_v1alpha4_AWSManagedClusterSpec_To_v1alpha3_AWSManagedClusterSpec(in *v1alpha4.AWSManagedClusterSpec, out *AWSManagedClusterSpec, s conversion.Scope) error { - if err := Convert_v1alpha4_APIEndpoint_To_v1alpha3_APIEndpoint(&in.ControlPlaneEndpoint, &out.ControlPlaneEndpoint, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha4_AWSManagedClusterSpec_To_v1alpha3_AWSManagedClusterSpec is an autogenerated conversion function. -func Convert_v1alpha4_AWSManagedClusterSpec_To_v1alpha3_AWSManagedClusterSpec(in *v1alpha4.AWSManagedClusterSpec, out *AWSManagedClusterSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_AWSManagedClusterSpec_To_v1alpha3_AWSManagedClusterSpec(in, out, s) -} - -func autoConvert_v1alpha3_AWSManagedClusterStatus_To_v1alpha4_AWSManagedClusterStatus(in *AWSManagedClusterStatus, out *v1alpha4.AWSManagedClusterStatus, s conversion.Scope) error { - out.Ready = in.Ready - out.FailureDomains = *(*apiv1alpha4.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) - return nil -} - -// Convert_v1alpha3_AWSManagedClusterStatus_To_v1alpha4_AWSManagedClusterStatus is an autogenerated conversion function. -func Convert_v1alpha3_AWSManagedClusterStatus_To_v1alpha4_AWSManagedClusterStatus(in *AWSManagedClusterStatus, out *v1alpha4.AWSManagedClusterStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_AWSManagedClusterStatus_To_v1alpha4_AWSManagedClusterStatus(in, out, s) -} - -func autoConvert_v1alpha4_AWSManagedClusterStatus_To_v1alpha3_AWSManagedClusterStatus(in *v1alpha4.AWSManagedClusterStatus, out *AWSManagedClusterStatus, s conversion.Scope) error { - out.Ready = in.Ready - out.FailureDomains = *(*apiv1alpha3.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) - return nil -} - -// Convert_v1alpha4_AWSManagedClusterStatus_To_v1alpha3_AWSManagedClusterStatus is an autogenerated conversion function. -func Convert_v1alpha4_AWSManagedClusterStatus_To_v1alpha3_AWSManagedClusterStatus(in *v1alpha4.AWSManagedClusterStatus, out *AWSManagedClusterStatus, s conversion.Scope) error { - return autoConvert_v1alpha4_AWSManagedClusterStatus_To_v1alpha3_AWSManagedClusterStatus(in, out, s) -} - func autoConvert_v1alpha3_AWSManagedMachinePool_To_v1alpha4_AWSManagedMachinePool(in *AWSManagedMachinePool, out *v1alpha4.AWSManagedMachinePool, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha3_AWSManagedMachinePoolSpec_To_v1alpha4_AWSManagedMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil { diff --git a/exp/api/v1alpha4/awsmanagecluster_webhook.go b/exp/api/v1alpha4/awsmanagecluster_webhook.go deleted file mode 100644 index a9fb57a0c1..0000000000 --- a/exp/api/v1alpha4/awsmanagecluster_webhook.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha4 - -import ( - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" -) - -// SetupWebhookWithManager will setup the webhooks for the AWSManagedCluster. -func (r *AWSManagedCluster) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} - -// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1alpha4-awsmanagedcluster,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters,versions=v1alpha4,name=default.awsmanagedcluster.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1 -// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1alpha4-awsmanagedcluster,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters,versions=v1alpha4,name=validation.awsmanagedcluster.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1beta1 - -var _ webhook.Defaulter = &AWSManagedCluster{} -var _ webhook.Validator = &AWSManagedCluster{} - -// Default will set default values for the AWSManagedCluster. -func (r *AWSManagedCluster) Default() { -} - -// ValidateCreate will do any extra validation when creating a AWSManagedCluster. -func (r *AWSManagedCluster) ValidateCreate() error { - return nil -} - -// ValidateUpdate will do any extra validation when updating a AWSManagedCluster. -func (r *AWSManagedCluster) ValidateUpdate(old runtime.Object) error { - return nil -} - -// ValidateDelete allows you to add any extra validation when deleting. -func (r *AWSManagedCluster) ValidateDelete() error { - return nil -} diff --git a/exp/api/v1alpha4/awsmanagedcluster_types.go b/exp/api/v1alpha4/awsmanagedcluster_types.go deleted file mode 100644 index 255e133cac..0000000000 --- a/exp/api/v1alpha4/awsmanagedcluster_types.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha4 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" -) - -// AWSManagedClusterSpec defines the desired state of AWSManagedCluster -type AWSManagedClusterSpec struct { - // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. - // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` -} - -// AWSManagedClusterStatus defines the observed state of AWSManagedCluster -type AWSManagedClusterStatus struct { - // Ready is when the AWSManagedControlPlane has a API server URL. - // +optional - Ready bool `json:"ready,omitempty"` - - // FailureDomains specifies a list fo available availability zones that can be used - // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=awsmanagedclusters,scope=Namespaced,categories=cluster-api,shortName=awsmc -// +kubebuilder:storageversion -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSManagedControl belongs" -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Control plane infrastructure is ready for worker nodes" -// +kubebuilder:printcolumn:name="VPC",type="string",JSONPath=".spec.network.vpc.id",description="AWS VPC the control plane is using" -// +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="API Endpoint",priority=1 - -// AWSManagedCluster is the Schema for the awsmanagedclusters API -type AWSManagedCluster struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec AWSManagedClusterSpec `json:"spec,omitempty"` - Status AWSManagedClusterStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// AWSManagedClusterList contains a list of AWSManagedCluster. -type AWSManagedClusterList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []AWSManagedCluster `json:"items"` -} - -func init() { - SchemeBuilder.Register(&AWSManagedCluster{}, &AWSManagedClusterList{}) -} diff --git a/exp/api/v1alpha4/conversion.go b/exp/api/v1alpha4/conversion.go index c4520b6cfe..3cade6334a 100644 --- a/exp/api/v1alpha4/conversion.go +++ b/exp/api/v1alpha4/conversion.go @@ -28,12 +28,6 @@ func (*AWSManagedMachinePool) Hub() {} // Hub marks AWSManagedMachinePoolList as a conversion hub. func (*AWSManagedMachinePoolList) Hub() {} -// Hub marks AWSManagedCluster as a conversion hub. -func (*AWSManagedCluster) Hub() {} - -// Hub marks AWSManagedClusterList as a conversion hub. -func (*AWSManagedClusterList) Hub() {} - // Hub marks AWSFargateProfile as a conversion hub. func (*AWSFargateProfile) Hub() {} diff --git a/exp/api/v1alpha4/zz_generated.deepcopy.go b/exp/api/v1alpha4/zz_generated.deepcopy.go index 49310dd202..0c2c7e49af 100644 --- a/exp/api/v1alpha4/zz_generated.deepcopy.go +++ b/exp/api/v1alpha4/zz_generated.deepcopy.go @@ -298,103 +298,6 @@ func (in *AWSMachinePoolStatus) DeepCopy() *AWSMachinePoolStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSManagedCluster) DeepCopyInto(out *AWSManagedCluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedCluster. -func (in *AWSManagedCluster) DeepCopy() *AWSManagedCluster { - if in == nil { - return nil - } - out := new(AWSManagedCluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AWSManagedCluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSManagedClusterList) DeepCopyInto(out *AWSManagedClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AWSManagedCluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterList. -func (in *AWSManagedClusterList) DeepCopy() *AWSManagedClusterList { - if in == nil { - return nil - } - out := new(AWSManagedClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AWSManagedClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSManagedClusterSpec) DeepCopyInto(out *AWSManagedClusterSpec) { - *out = *in - out.ControlPlaneEndpoint = in.ControlPlaneEndpoint -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterSpec. -func (in *AWSManagedClusterSpec) DeepCopy() *AWSManagedClusterSpec { - if in == nil { - return nil - } - out := new(AWSManagedClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSManagedClusterStatus) DeepCopyInto(out *AWSManagedClusterStatus) { - *out = *in - if in.FailureDomains != nil { - in, out := &in.FailureDomains, &out.FailureDomains - *out = make(cluster_apiapiv1alpha4.FailureDomains, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedClusterStatus. -func (in *AWSManagedClusterStatus) DeepCopy() *AWSManagedClusterStatus { - if in == nil { - return nil - } - out := new(AWSManagedClusterStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSManagedMachinePool) DeepCopyInto(out *AWSManagedMachinePool) { *out = *in diff --git a/exp/controllers/awsmanagedcluster_controller.go b/exp/controllers/awsmanagedcluster_controller.go deleted file mode 100644 index afde5d13cd..0000000000 --- a/exp/controllers/awsmanagedcluster_controller.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "time" - - "github.com/go-logr/logr" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/record" - ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha4" - infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha4" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/predicates" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -// AWSManagedClusterReconciler reconciles AWSManagedCluster. -type AWSManagedClusterReconciler struct { - client.Client - Log logr.Logger - Recorder record.EventRecorder - WatchFilterValue string -} - -// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmanagedclusters/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=awsmanagedcontrolplanes;awsmanagedcontrolplanes/status,verbs=get;list;watch -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch -// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete - -func (r *AWSManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { - log := ctrl.LoggerFrom(ctx) - - // Fetch the AWSManagedCluster instance - awsManagedCluster := &infrav1exp.AWSManagedCluster{} - err := r.Get(ctx, req.NamespacedName, awsManagedCluster) - if err != nil { - if apierrors.IsNotFound(err) { - return reconcile.Result{}, nil - } - return reconcile.Result{}, err - } - - // Fetch the Cluster. - cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedCluster.ObjectMeta) - if err != nil { - return reconcile.Result{}, err - } - if cluster == nil { - log.Info("Cluster Controller has not yet set OwnerRef") - return reconcile.Result{}, nil - } - - if annotations.IsPaused(cluster, awsManagedCluster) { - log.Info("AWSManagedCluster or linked Cluster is marked as paused. Won't reconcile") - return reconcile.Result{}, nil - } - - log = log.WithValues("cluster", cluster.Name) - - controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} - controlPlaneRef := types.NamespacedName{ - Name: cluster.Spec.ControlPlaneRef.Name, - Namespace: cluster.Spec.ControlPlaneRef.Namespace, - } - - if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { - return reconcile.Result{}, fmt.Errorf("failed to get control plane ref: %w", err) - } - - log = log.WithValues("controlPlane", controlPlaneRef.Name) - - patchHelper, err := patch.NewHelper(awsManagedCluster, r.Client) - if err != nil { - return reconcile.Result{}, fmt.Errorf("failed to init patch helper: %w", err) - } - - // Set the values from the managed control plane - awsManagedCluster.Status.Ready = controlPlane.Status.Ready - awsManagedCluster.Spec.ControlPlaneEndpoint = controlPlane.Spec.ControlPlaneEndpoint - awsManagedCluster.Status.FailureDomains = controlPlane.Status.FailureDomains - - if err := patchHelper.Patch(ctx, awsManagedCluster); err != nil { - return reconcile.Result{}, fmt.Errorf("failed to patch AWSManagedCluster: %w", err) - } - - log.Info("Successfully reconciled AWSManagedCluster") - - return reconcile.Result{}, nil -} - -func (r *AWSManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { - awsManagedCluster := &infrav1exp.AWSManagedCluster{} - - controller, err := ctrl.NewControllerManagedBy(mgr). - WithOptions(options). - For(awsManagedCluster). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - Build(r) - - if err != nil { - return fmt.Errorf("error creating controller: %w", err) - } - - // Add a watch for clusterv1.Cluster unpaise - if err = controller.Watch( - &source.Kind{Type: &clusterv1.Cluster{}}, - handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(awsManagedCluster.GroupVersionKind())), - predicates.ClusterUnpaused(r.Log), - ); err != nil { - return fmt.Errorf("failed adding a watch for ready clusters: %w", err) - } - - // Add a watch for AWSManagedControlPlane - if err = controller.Watch( - &source.Kind{Type: &ekscontrolplanev1.AWSManagedControlPlane{}}, - handler.EnqueueRequestsFromMapFunc(r.managedControlPlaneToManagedCluster), - ); err != nil { - return fmt.Errorf("failed adding watch on AWSManagedControlPlane: %w", err) - } - - return nil -} - -func (r *AWSManagedClusterReconciler) managedControlPlaneToManagedCluster(o client.Object) []ctrl.Request { - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() - - awsManagedControlPlane, ok := o.(*ekscontrolplanev1.AWSManagedControlPlane) - if !ok { - panic(fmt.Sprintf("Expected a managedControlPlane but got a %T", o)) - } - - if !awsManagedControlPlane.ObjectMeta.DeletionTimestamp.IsZero() { - r.Log.V(4).Info("AWSManagedControlPlane has a deletion timestamp, skipping mapping") - return nil - } - - cluster, err := util.GetOwnerCluster(ctx, r.Client, awsManagedControlPlane.ObjectMeta) - if err != nil { - r.Log.Error(err, "failed to get owning cluster") - return nil - } - if cluster == nil { - r.Log.Info("no owning cluster, skipping mapping") - return nil - } - - managedClusterRef := cluster.Spec.InfrastructureRef - if managedClusterRef == nil || managedClusterRef.Kind != "AWSManagedCluster" { - r.Log.V(4).Info("InfrastructureRef is nil or not AWSManagedCluster, skipping mapping") - return nil - } - - return []ctrl.Request{ - { - NamespacedName: types.NamespacedName{ - Name: managedClusterRef.Name, - Namespace: managedClusterRef.Namespace, - }, - }, - } -} diff --git a/feature/feature.go b/feature/feature.go index 48928c9207..0ed4934a7b 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -43,6 +43,11 @@ const ( // alpha: v0.4 EKSAllowAddRoles featuregate.Feature = "EKSAllowAddRoles" + // EKSFargate is used to enable the usage of EKS fargate profiles + // owner: @richardcase + // alpha: v0.4 + EKSFargate featuregate.Feature = "EKSFargate" + // MachinePool is used to enable ASG support // owner: @mytunguyen // alpha: v0.1 @@ -67,9 +72,10 @@ func init() { // To add a new feature, define a key for it above and add it here. var defaultCAPAFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ // Every feature should be initiated here: - EKS: {Default: false, PreRelease: featuregate.Alpha}, - EKSEnableIAM: {Default: false, PreRelease: featuregate.Alpha}, - EKSAllowAddRoles: {Default: false, PreRelease: featuregate.Alpha}, + EKS: {Default: true, PreRelease: featuregate.Beta}, + EKSEnableIAM: {Default: false, PreRelease: featuregate.Beta}, + EKSAllowAddRoles: {Default: false, PreRelease: featuregate.Beta}, + EKSFargate: {Default: false, PreRelease: featuregate.Alpha}, EventBridgeInstanceState: {Default: false, PreRelease: featuregate.Alpha}, MachinePool: {Default: false, PreRelease: featuregate.Alpha}, AutoControllerIdentityCreator: {Default: true, PreRelease: featuregate.Alpha}, diff --git a/main.go b/main.go index a32121c243..4c41415ee2 100644 --- a/main.go +++ b/main.go @@ -18,6 +18,7 @@ package main import ( "context" + "errors" "flag" "fmt" "math/rand" @@ -27,16 +28,28 @@ import ( "time" "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" cgrecord "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/healthz" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + infrav1alpha3 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" infrav1alpha4 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha4" + bootstrapv1alpha3 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1alpha3" + bootstrapv1alpha4 "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/api/v1alpha4" + bootstrapv1controllers "sigs.k8s.io/cluster-api-provider-aws/bootstrap/eks/controllers" "sigs.k8s.io/cluster-api-provider-aws/controllers" controlplanev1alpha3 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha3" controlplanev1alpha4 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha4" + controlplanev1controllers "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/controllers" infrav1alpha3exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha3" infrav1alpha4exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha4" "sigs.k8s.io/cluster-api-provider-aws/exp/controlleridentitycreator" @@ -47,11 +60,6 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/version" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha4" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/healthz" // +kubebuilder:scaffold:imports ) @@ -70,6 +78,8 @@ func init() { _ = controlplanev1alpha3.AddToScheme(scheme) _ = controlplanev1alpha4.AddToScheme(scheme) _ = clusterv1exp.AddToScheme(scheme) + _ = bootstrapv1alpha3.AddToScheme(scheme) + _ = bootstrapv1alpha4.AddToScheme(scheme) // +kubebuilder:scaffold:scheme } @@ -88,6 +98,8 @@ var ( webhookCertDir string healthAddr string serviceEndpoints string + + errEKSInvalidFlags = errors.New("invalid EKS flag combination") ) func main() { @@ -222,13 +234,21 @@ func main() { } if feature.Gates.Enabled(feature.EKS) { setupLog.Info("enabling EKS webhooks") - if err = (&infrav1alpha4exp.AWSManagedMachinePool{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "AWSManagedMachinePool") + if err := (&controlplanev1alpha4.AWSManagedControlPlane{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AWSManagedControlPlane") os.Exit(1) } - if err = (&infrav1alpha4exp.AWSFargateProfile{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "AWSFargateProfile") - os.Exit(1) + if feature.Gates.Enabled(feature.EKSFargate) { + if err = (&infrav1alpha4exp.AWSFargateProfile{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AWSFargateProfile") + os.Exit(1) + } + } + if feature.Gates.Enabled(feature.MachinePool) { + if err = (&infrav1alpha4exp.AWSManagedMachinePool{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "AWSManagedMachinePool") + os.Exit(1) + } } } if feature.Gates.Enabled(feature.MachinePool) { @@ -263,36 +283,64 @@ func enableGates(ctx context.Context, mgr ctrl.Manager, awsServiceEndpoints []sc setupLog.Info("enabling EKS controllers") enableIAM := feature.Gates.Enabled(feature.EKSEnableIAM) - - if err := (&controllersexp.AWSManagedMachinePoolReconciler{ - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor("awsmanagedmachinepool-reconciler"), - EnableIAM: enableIAM, - Endpoints: awsServiceEndpoints, - WatchFilterValue: watchFilterValue, - }).SetupWithManager(ctx, mgr, controller.Options{}); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "AWSManagedMachinePool") + allowAddRoles := feature.Gates.Enabled(feature.EKSAllowAddRoles) + setupLog.V(2).Info("EKS IAM role creation", "enabled", enableIAM) + setupLog.V(2).Info("EKS IAM additional roles", "enabled", allowAddRoles) + if allowAddRoles && !enableIAM { + setupLog.Error(errEKSInvalidFlags, "cannot use EKSAllowAddRoles flag without EKSEnableIAM") os.Exit(1) } - if err := (&controllersexp.AWSManagedClusterReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("AWSManagedCluster"), - Recorder: mgr.GetEventRecorderFor("awsmanagedcluster-reconciler"), - WatchFilterValue: watchFilterValue, + + setupLog.V(2).Info("enabling EKS control plane controller") + if err := (&controlplanev1controllers.AWSManagedControlPlaneReconciler{ + Client: mgr.GetClient(), + EnableIAM: enableIAM, + AllowAdditionalRoles: allowAddRoles, + Endpoints: awsServiceEndpoints, + WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency}); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "AWSManagedCluster") + setupLog.Error(err, "unable to create controller", "controller", "AWSManagedControlPlane") + os.Exit(1) } - if err := (&controllersexp.AWSFargateProfileReconciler{ + + setupLog.V(2).Info("enabling EKS bootstrap controller") + if err := (&bootstrapv1controllers.EKSConfigReconciler{ Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor("awsfargateprofile-reconciler"), - EnableIAM: enableIAM, - Endpoints: awsServiceEndpoints, WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency}); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "AWSFargateProfile") + setupLog.Error(err, "unable to create controller", "controller", "EKSConfig") + os.Exit(1) + } + + if feature.Gates.Enabled(feature.EKSFargate) { + setupLog.V(2).Info("enabling EKS fargate profile controller") + if err := (&controllersexp.AWSFargateProfileReconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor("awsfargateprofile-reconciler"), + EnableIAM: enableIAM, + Endpoints: awsServiceEndpoints, + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AWSFargateProfile") + } + } + + if feature.Gates.Enabled(feature.MachinePool) { + setupLog.V(2).Info("enabling EKS managed machine pool controller") + if err := (&controllersexp.AWSManagedMachinePoolReconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor("awsmanagedmachinepool-reconciler"), + EnableIAM: enableIAM, + Endpoints: awsServiceEndpoints, + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, controller.Options{}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AWSManagedMachinePool") + os.Exit(1) + } } } if feature.Gates.Enabled(feature.MachinePool) { + setupLog.V(2).Info("enabling machine pool controller") if err := (&controllersexp.AWSMachinePoolReconciler{ Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor("awsmachinepool-controller"), diff --git a/templates/cluster-template-eks-fargate.yaml b/templates/cluster-template-eks-fargate.yaml index 50afe5e19c..3dd5999b40 100644 --- a/templates/cluster-template-eks-fargate.yaml +++ b/templates/cluster-template-eks-fargate.yaml @@ -8,9 +8,9 @@ spec: pods: cidrBlocks: ["192.168.0.0/16"] infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4 - kind: AWSManagedCluster - name: "${CLUSTER_NAME}" + kind: AWSManagedControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 + name: "${CLUSTER_NAME}-control-plane" controlPlaneRef: kind: AWSManagedControlPlane apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 diff --git a/test/e2e/data/e2e_eks_conf.yaml b/test/e2e/data/e2e_eks_conf.yaml index 2557d9e312..9bd51d0c78 100644 --- a/test/e2e/data/e2e_eks_conf.yaml +++ b/test/e2e/data/e2e_eks_conf.yaml @@ -17,10 +17,6 @@ images: # Use local dev images built source tree; - name: gcr.io/k8s-staging-cluster-api/capa-manager:e2e loadBehavior: mustLoad - - name: gcr.io/k8s-staging-cluster-api/capa-eks-bootstrap-manager:e2e - loadBehavior: mustLoad - - name: gcr.io/k8s-staging-cluster-api/capa-eks-controlplane-manager:e2e - loadBehavior: mustLoad ## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS - name: quay.io/jetstack/cert-manager-cainjector:v1.1.0 @@ -56,42 +52,6 @@ providers: - old: --metrics-bind-addr=127.0.0.1:8080 new: --metrics-bind-addr=:8080 - - name: aws-eks - type: ControlPlaneProvider - versions: - - name: v0.6.0 - # Use manifest from source files - value: ../../../controlplane/eks/config/default - files: - - sourcePath: "./shared/v1alpha4_provider/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - old: "--leader-elect" - new: "--leader-elect=false" - - old: --metrics-bind-addr=127.0.0.1:8080 - new: --metrics-bind-addr=:8080 - - old: gcr.io/k8s-staging-cluster-api-aws/eks-controlplane-controller:latest - new: gcr.io/k8s-staging-cluster-api/capa-eks-controlplane-manager:e2e - - - name: aws-eks - type: BootstrapProvider - versions: - - name: v0.6.0 - # Use manifest from source files - value: ../../../bootstrap/eks/config/default - files: - - sourcePath: "./shared/v1alpha4_provider/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - old: "--leader-elect" - new: "--leader-elect=false" - - old: --metrics-bind-addr=127.0.0.1:8080 - new: --metrics-bind-addr=:8080 - - old: gcr.io/k8s-staging-cluster-api-aws/eks-bootstrap-controller:latest - new: gcr.io/k8s-staging-cluster-api/capa-eks-bootstrap-manager:e2e - - name: aws type: InfrastructureProvider versions: @@ -133,7 +93,6 @@ variables: EXP_CLUSTER_RESOURCE_SET: "true" AWS_NODE_MACHINE_TYPE: t3.large AWS_SSH_KEY_NAME: "cluster-api-provider-aws-sigs-k8s-io" - EXP_EKS: "true" EXP_EKS_IAM: "false" EXP_EKS_ADD_ROLES: "false" VPC_ADDON_VERSION: "v1.6.3-eksbuild.1" diff --git a/test/e2e/shared/identity.go b/test/e2e/shared/identity.go index 3a911c521e..e2c7e77b5c 100644 --- a/test/e2e/shared/identity.go +++ b/test/e2e/shared/identity.go @@ -59,24 +59,6 @@ func SetupStaticCredentials(ctx context.Context, namespace *corev1.Namespace, e2 return client.Create(ctx, secret) }, e2eCtx.E2EConfig.GetIntervals("", "wait-create-identity")...).Should(Succeed()) - if e2eCtx.IsManaged { - //TODO: this doesn't feel right to be creating the secret in 2 places. - cpSecret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: credsSecretName, - Namespace: eksNamespace, - }, - StringData: map[string]string{ - "AccessKeyID": *e2eCtx.Environment.BootstrapAccessKey.AccessKeyId, - "SecretAccessKey": *e2eCtx.Environment.BootstrapAccessKey.SecretAccessKey, - }, - } - Byf("Creating credentials secret %s in namespace %s", cpSecret.Name, cpSecret.Namespace) - Eventually(func() error { - return client.Create(ctx, cpSecret) - }, e2eCtx.E2EConfig.GetIntervals("", "wait-create-identity")...).Should(Succeed()) - } - id := &infrav1.AWSClusterStaticIdentity{ ObjectMeta: metav1.ObjectMeta{ Name: idName, diff --git a/test/e2e/shared/template.go b/test/e2e/shared/template.go index 5f633b988c..f71137e22d 100644 --- a/test/e2e/shared/template.go +++ b/test/e2e/shared/template.go @@ -73,7 +73,7 @@ func newBootstrapTemplate(e2eCtx *E2EContext) *cfn_bootstrap.Template { region, err := credentials.ResolveRegion("") Expect(err).NotTo(HaveOccurred()) t.Spec.Region = region - t.Spec.EKS.Enable = true + t.Spec.EKS.Disable = false t.Spec.EKS.AllowIAMRoleCreation = false t.Spec.EKS.DefaultControlPlaneRole.Disable = false t.Spec.EKS.ManagedMachinePool.Disable = false diff --git a/tilt-provider.json b/tilt-provider.json index 0f998cbd8f..efc94a1a04 100644 --- a/tilt-provider.json +++ b/tilt-provider.json @@ -11,30 +11,9 @@ "cmd", "controllers", "exp", - "pkg" - ] - } - }, - { - "name": "eks-bootstrap", - "config": { - "context": "bootstrap/eks", - "image": "gcr.io/k8s-staging-cluster-api-aws/eks-bootstrap-controller", - "live_reload_deps": [ - "main.go", - "api", - "controllers", - "internal" - ] - } - }, - { - "name": "eks-controlplane", - "config": { - "context": "controlplane/eks", - "image": "gcr.io/k8s-staging-cluster-api-aws/eks-controlplane-controller", - "live_reload_deps": [ - "main.go", "api", "controllers", "../../pkg" + "pkg", + "controlplane/eks", + "bootstrap/eks" ] } }