From 240c7c0ecce0787b6cbc223ea08bf4c81267c9c2 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Fri, 10 Jun 2022 14:48:49 +0200 Subject: [PATCH] impl test extension --- .gitignore | 3 + Makefile | 10 + scripts/ci-e2e-lib.sh | 8 + test/e2e/Makefile | 8 +- test/e2e/cluster_upgrade_runtimesdk_test.go | 254 ++++++++++++++ test/e2e/config/docker.yaml | 6 + .../cluster-runtimesdk.yaml | 8 + .../kustomization.yaml | 5 + .../cluster-runtimesdk.yaml | 8 + .../kustomization.yaml | 5 + .../clusterclass-quick-start-runtimesdk.yaml | 302 +++++++++++++++++ test/extension/Dockerfile | 66 ++++ .../config/certmanager/certificate.yaml | 29 ++ .../config/certmanager/kustomization.yaml | 7 + .../config/certmanager/kustomizeconfig.yaml | 19 ++ test/extension/config/default/extension.yaml | 27 ++ .../config/default/extension_image_patch.yaml | 11 + .../config/default/extension_pull_policy.yaml | 11 + .../default/extension_webhook_patch.yaml | 22 ++ .../config/default/kustomization.yaml | 35 ++ .../config/default/kustomizeconfig.yaml | 4 + test/extension/config/default/namespace.yaml | 6 + test/extension/config/default/service.yaml | 11 + test/extension/handler.go | 167 ++++++++++ test/extension/main.go | 312 ++++++++++++++++++ test/framework/convenience.go | 4 + test/go.mod | 3 +- test/go.sum | 1 + 28 files changed, 1350 insertions(+), 2 deletions(-) create mode 100644 test/e2e/cluster_upgrade_runtimesdk_test.go create mode 100644 test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs/cluster-runtimesdk.yaml create mode 100644 test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml create mode 100644 test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-docker/v1beta1/clusterclass-quick-start-runtimesdk.yaml create mode 100644 test/extension/Dockerfile create mode 100644 test/extension/config/certmanager/certificate.yaml create mode 100644 test/extension/config/certmanager/kustomization.yaml create mode 100644 test/extension/config/certmanager/kustomizeconfig.yaml create mode 100644 test/extension/config/default/extension.yaml create mode 100644 test/extension/config/default/extension_image_patch.yaml create mode 100644 test/extension/config/default/extension_pull_policy.yaml create mode 100644 test/extension/config/default/extension_webhook_patch.yaml create mode 100644 test/extension/config/default/kustomization.yaml create mode 100644 test/extension/config/default/kustomizeconfig.yaml create mode 100644 test/extension/config/default/namespace.yaml create mode 100644 test/extension/config/default/service.yaml create mode 100644 test/extension/handler.go create mode 100644 test/extension/main.go diff --git a/.gitignore b/.gitignore index 3fecc4d01a67..d04e6cf999cd 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,9 @@ test/e2e/data/infrastructure-docker/v1alpha3/cluster-template*.yaml test/e2e/data/infrastructure-docker/v1alpha4/cluster-template*.yaml test/e2e/data/infrastructure-docker/v1beta1/cluster-template*.yaml +# E2e test extension deployment +test/e2e/data/test-extension/deployment.yaml + # Output of the go coverage tool, specifically when used with LiteIDE *.out diff --git a/Makefile b/Makefile index c2f623ae64e8..9a13d6707baa 100644 --- a/Makefile +++ b/Makefile @@ -158,6 +158,10 @@ CLUSTERCTL_MANIFEST_DIR := cmd/clusterctl/config CLUSTERCTL_IMAGE_NAME ?= clusterctl CLUSTERCTL_IMG ?= $(REGISTRY)/$(CLUSTERCTL_IMAGE_NAME) +# test extension +TEST_EXTENSION_IMAGE_NAME ?= test-extension +TEST_EXTENSION_IMG ?= $(REGISTRY)/$(TEST_EXTENSION_IMAGE_NAME) + # It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971 TAG ?= dev @@ -516,6 +520,12 @@ docker-build-kubeadm-control-plane: ## Build the docker image for kubeadm contro docker-build-clusterctl: ## Build the docker image for clusterctl with output binary name as 'clusterctl' DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg package=./cmd/clusterctl --build-arg ldflags="$(LDFLAGS)" -f ./cmd/clusterctl/Dockerfile . -t $(CLUSTERCTL_IMG)-$(ARCH):$(TAG) +.PHONY: docker-build-test-extension +docker-build-test-extension: ## Build the docker image for core controller manager + DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(TEST_EXTENSION_IMG)-$(ARCH):$(TAG) --file ./test/extension/Dockerfile + $(MAKE) set-manifest-image MANIFEST_IMG=$(TEST_EXTENSION_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./test/extension/config/default/extension_image_patch.yaml" + $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./test/extension/config/default/extension_pull_policy.yaml" + .PHONY: e2e-framework e2e-framework: ## Builds the CAPI e2e framework cd $(E2E_FRAMEWORK_DIR); go build ./... diff --git a/scripts/ci-e2e-lib.sh b/scripts/ci-e2e-lib.sh index 1a9df93d0740..97843a5e3cdc 100644 --- a/scripts/ci-e2e-lib.sh +++ b/scripts/ci-e2e-lib.sh @@ -39,6 +39,14 @@ capi:buildDockerImages () { else echo "+ CAPD images already present in the system, skipping make" fi + + ## Build test extension images, if missing + if [[ "$(docker images -q "$REGISTRY/test-extension-$ARCH:$TAG" 2> /dev/null)" == "" ]]; then + echo "+ Building test-extension image" + make docker-build-test-extension + else + echo "+ test-extension image already present in the system, skipping make" + fi } # k8s::prepareKindestImages checks all the e2e test variables representing a Kubernetes version, diff --git a/test/e2e/Makefile b/test/e2e/Makefile index af2fa2d9eb17..e5ca2dd39502 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -95,11 +95,17 @@ cluster-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1beta $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-node-drain --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-node-drain.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-cgroupfs --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-cgroupfs.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk.yaml + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ipv6 --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ipv6.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-topology.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ignition --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ignition.yaml +test-extension-deployment: $(KUSTOMIZE) ## Generate cluster templates for v1beta1 + mkdir -p $(REPO_ROOT)/test/e2e/data/test-extension + $(KUSTOMIZE) build $(REPO_ROOT)/test/extension/config/default > $(REPO_ROOT)/test/e2e/data/test-extension/deployment.yaml + ## -------------------------------------- ## Testing ## -------------------------------------- @@ -119,7 +125,7 @@ _SKIP_ARGS := $(foreach arg,$(strip $(GINKGO_SKIP)),-skip="$(arg)") endif .PHONY: run -run: $(GINKGO) cluster-templates ## Run the end-to-end tests +run: $(GINKGO) cluster-templates test-extension-deployment ## Run the end-to-end tests $(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) . -- \ -e2e.artifacts-folder="$(ARTIFACTS)" \ -e2e.config="$(E2E_CONF_FILE)" \ diff --git a/test/e2e/cluster_upgrade_runtimesdk_test.go b/test/e2e/cluster_upgrade_runtimesdk_test.go new file mode 100644 index 000000000000..6f5870cce89c --- /dev/null +++ b/test/e2e/cluster_upgrade_runtimesdk_test.go @@ -0,0 +1,254 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/blang/semver" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" + + runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +var _ = Describe("When upgrading a workload cluster using ClusterClass with RuntimeSDK [PR-Informing]", func() { + clusterUpgradeWithRuntimeSDKConformanceSpec(ctx, func() ClusterUpgradeWithRuntimeSDKConformanceSpecInput { + // "upgrades" is the same as the "topology" flavor but with an additional MachinePool. + flavor := pointer.String("upgrades-runtimesdk") + // For KubernetesVersionUpgradeFrom < v1.24 we have to use upgrades-cgroupfs flavor. + // This is because kind and CAPD only support: + // * cgroupDriver cgroupfs for Kubernetes < v1.24 + // * cgroupDriver systemd for Kubernetes >= v1.24. + // Notes: + // * We always use a ClusterClass-based cluster-template for the upgrade test + // * The ClusterClass will automatically adjust the cgroupDriver for KCP and MDs. + // * We have to handle the MachinePool ourselves + // * The upgrades-cgroupfs flavor uses an MP which is pinned to cgroupfs + // * During the upgrade UpgradeMachinePoolAndWait automatically drops the cgroupfs pinning + // when the target version is >= v1.24. + // We can remove this as soon as we don't test upgrades from Kubernetes < v1.24 anymore with CAPD + // or MachinePools are supported in ClusterClass. + version, err := semver.ParseTolerant(e2eConfig.GetVariable(KubernetesVersionUpgradeFrom)) + Expect(err).ToNot(HaveOccurred(), "Invalid argument, KUBERNETES_VERSION_UPGRADE_FROM is not a valid version") + if version.LT(semver.MustParse("1.24.0")) { + // "upgrades-cgroupfs" is the same as the "topology" flavor but with an additional MachinePool + // with pinned cgroupDriver to cgroupfs. + flavor = pointer.String("upgrades-runtimesdk-cgroupfs") + } + + return ClusterUpgradeWithRuntimeSDKConformanceSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: flavor, + } + }) +}) + +// ClusterUpgradeWithRuntimeSDKConformanceSpecInput is the input for clusterUpgradeWithRuntimeSDKConformanceSpec. +type ClusterUpgradeWithRuntimeSDKConformanceSpecInput struct { + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + BootstrapClusterProxy framework.ClusterProxy + ArtifactFolder string + SkipCleanup bool + + // ControlPlaneMachineCount is used in `config cluster` to configure the count of the control plane machines used in the test. + // Default is 1. + ControlPlaneMachineCount *int64 + // WorkerMachineCount is used in `config cluster` to configure the count of the worker machines used in the test. + // NOTE: If the WORKER_MACHINE_COUNT var is used multiple times in the cluster template, the absolute count of + // worker machines is a multiple of WorkerMachineCount. + // Default is 2. + WorkerMachineCount *int64 + + // Flavor to use when creating the cluster for testing, "upgrades" is used if not specified. + Flavor *string +} + +// ClusterUpgradeConformanceSpec implements a spec that upgrades a cluster and runs the Kubernetes conformance suite. +// Upgrading a cluster refers to upgrading the control-plane and worker nodes (managed by MD and machine pools). +// NOTE: This test only works with a KubeadmControlPlane. +// NOTE: This test works with Clusters with and without ClusterClass. +// When using ClusterClass the ClusterClass must have the variables "etcdImageTag" and "coreDNSImageTag" of type string. +// Those variables should have corresponding patches which set the etcd and CoreDNS tags in KCP. +func clusterUpgradeWithRuntimeSDKConformanceSpec(ctx context.Context, inputGetter func() ClusterUpgradeWithRuntimeSDKConformanceSpecInput) { + const ( + textExtensionPathVariable = "TEST_EXTENSION" + specName = "k8s-upgrade-with-runtimesdk" + ) + var ( + input ClusterUpgradeWithRuntimeSDKConformanceSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + + controlPlaneMachineCount int64 + workerMachineCount int64 + + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + testExtensionPath string + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeFrom)) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeTo)) + Expect(input.E2EConfig.Variables).To(HaveKey(EtcdVersionUpgradeTo)) + Expect(input.E2EConfig.Variables).To(HaveKey(CoreDNSVersionUpgradeTo)) + + if input.ControlPlaneMachineCount == nil { + controlPlaneMachineCount = 1 + } else { + controlPlaneMachineCount = *input.ControlPlaneMachineCount + } + + if input.WorkerMachineCount == nil { + workerMachineCount = 2 + } else { + workerMachineCount = *input.WorkerMachineCount + } + + testExtensionPath = input.E2EConfig.GetVariable(textExtensionPathVariable) + Expect(testExtensionPath).To(BeAnExistingFile(), "The %s variable should resolve to an existing file", textExtensionPathVariable) + + // Setup a Namespace where to host objects for this spec and create a watcher for the Namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder) + clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) + }) + + It("Should create and upgrade a workload cluster", func() { + // FIXME(sbueringer): should we create an additional cluster and then deploy the extension there? (like self-hosted, ...) + By("Deploy Test Extension") + testExtensionDeployment, err := os.ReadFile(testExtensionPath) //nolint:gosec + Expect(err).ToNot(HaveOccurred(), "Failed to read the extension config deployment manifest file") + Expect(testExtensionDeployment).ToNot(BeEmpty(), "Test Extension deployment manifest file should not be empty") + Expect(input.BootstrapClusterProxy.Apply(ctx, testExtensionDeployment)).To(Succeed()) + + By("Deploy Test Extension ExtensionConfig") + err = input.BootstrapClusterProxy.GetClient().Create(ctx, extensionConfig(namespace)) + Expect(err).ToNot(HaveOccurred(), "Failed to create the extension config") + + By("Creating a workload cluster") + + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: pointer.StringDeref(input.Flavor, "upgrades"), + Namespace: namespace.Name, + ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom), + ControlPlaneMachineCount: pointer.Int64Ptr(controlPlaneMachineCount), + WorkerMachineCount: pointer.Int64Ptr(workerMachineCount), + }, + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + }, clusterResources) + + // Cluster is using ClusterClass, upgrade via topology. + By("Upgrading the Cluster topology") + framework.UpgradeClusterTopologyAndWaitForUpgrade(ctx, framework.UpgradeClusterTopologyAndWaitForUpgradeInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + ControlPlane: clusterResources.ControlPlane, + EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo), + DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo), + MachineDeployments: clusterResources.MachineDeployments, + KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), + WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + WaitForKubeProxyUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + WaitForDNSUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + WaitForEtcdUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + }) + + // Only attempt to upgrade MachinePools if they were provided in the template. + if len(clusterResources.MachinePools) > 0 && workerMachineCount > 0 { + By("Upgrading the machinepool instances") + framework.UpgradeMachinePoolAndWait(ctx, framework.UpgradeMachinePoolAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), + WaitForMachinePoolToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-upgrade"), + MachinePools: clusterResources.MachinePools, + }) + } + + By("Waiting until nodes are ready") + workloadProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterResources.Cluster.Name) + workloadClient := workloadProxy.GetClient() + framework.WaitForNodesReady(ctx, framework.WaitForNodesReadyInput{ + Lister: workloadClient, + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), + Count: int(clusterResources.ExpectedTotalNodes()), + WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"), + }) + + By("PASSED!") + }) + + AfterEach(func() { + // Dumps all the resources in the spec Namespace, then cleanups the cluster object and the spec Namespace itself. + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + }) +} + +func extensionConfig(namespace *corev1.Namespace) *runtimev1.ExtensionConfig { + return &runtimev1.ExtensionConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-extension-config", + Annotations: map[string]string{ + "cert-manager.io/inject-ca-from-secret": "test-extension-system/webhook-service-cert", + }, + }, + Spec: runtimev1.ExtensionConfigSpec{ + ClientConfig: runtimev1.ClientConfig{ + Service: &runtimev1.ServiceReference{ + Name: "webhook-service", + Namespace: "test-extension-system", + }, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name:": namespace.Name, + }, + }, + }, + } +} diff --git a/test/e2e/config/docker.yaml b/test/e2e/config/docker.yaml index 397ff98ff42a..fa98c30dc8d9 100644 --- a/test/e2e/config/docker.yaml +++ b/test/e2e/config/docker.yaml @@ -17,6 +17,8 @@ images: loadBehavior: tryLoad - name: gcr.io/k8s-staging-cluster-api/capd-manager-{ARCH}:dev loadBehavior: tryLoad +- name: gcr.io/k8s-staging-cluster-api/test-extension-{ARCH}:dev + loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-cainjector:v1.7.2 loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-webhook:v1.7.2 @@ -188,11 +190,14 @@ providers: - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template-node-drain.yaml" - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template-upgrades.yaml" - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template-upgrades-cgroupfs.yaml" + - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk.yaml" + - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs.yaml" - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template-kcp-scale-in.yaml" - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template-ipv6.yaml" - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template-topology.yaml" - sourcePath: "../data/infrastructure-docker/v1beta1/cluster-template-ignition.yaml" - sourcePath: "../data/infrastructure-docker/v1beta1/clusterclass-quick-start.yaml" + - sourcePath: "../data/infrastructure-docker/v1beta1/clusterclass-quick-start-runtimesdk.yaml" - sourcePath: "../data/shared/v1beta1/metadata.yaml" variables: @@ -214,6 +219,7 @@ variables: DOCKER_POD_IPV6_CIDRS: "fd00:100:96::/48" CNI: "./data/cni/kindnet/kindnet.yaml" KUBETEST_CONFIGURATION: "./data/kubetest/conformance.yaml" + TEST_EXTENSION: "./data/test-extension/deployment.yaml" NODE_DRAIN_TIMEOUT: "60s" # Enabling the feature flags by setting the env variables. EXP_CLUSTER_RESOURCE_SET: "true" diff --git a/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs/cluster-runtimesdk.yaml new file mode 100644 index 000000000000..b0d438e585ae --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs/cluster-runtimesdk.yaml @@ -0,0 +1,8 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: default +spec: + topology: + class: "quick-start-runtimesdk" diff --git a/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs/kustomization.yaml b/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs/kustomization.yaml new file mode 100644 index 000000000000..4e0073d2ce2d --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- ../cluster-template-upgrades-cgroupfs + +patches: +- ./cluster-runtimesdk.yaml diff --git a/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml new file mode 100644 index 000000000000..b0d438e585ae --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -0,0 +1,8 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: default +spec: + topology: + class: "quick-start-runtimesdk" diff --git a/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk/kustomization.yaml b/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk/kustomization.yaml new file mode 100644 index 000000000000..8555d6df9920 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1beta1/cluster-template-upgrades-runtimesdk/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- ../cluster-template-upgrades + +patches: +- ./cluster-runtimesdk.yaml diff --git a/test/e2e/data/infrastructure-docker/v1beta1/clusterclass-quick-start-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/v1beta1/clusterclass-quick-start-runtimesdk.yaml new file mode 100644 index 000000000000..c4d5d7253526 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1beta1/clusterclass-quick-start-runtimesdk.yaml @@ -0,0 +1,302 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: quick-start-runtimesdk +spec: + controlPlane: + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: quick-start-control-plane + machineInfrastructure: + ref: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: quick-start-control-plane + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerClusterTemplate + name: quick-start-cluster + workers: + machineDeployments: + - class: default-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: quick-start-default-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: quick-start-default-worker-machinetemplate + variables: + - name: lbImageRepository + required: true + schema: + openAPIV3Schema: + type: string + default: kindest + - name: imageRepository + required: true + schema: + openAPIV3Schema: + type: string + default: "k8s.gcr.io" + example: "k8s.gcr.io" + description: "imageRepository sets the container registry to pull images from. If empty, `k8s.gcr.io` will be used by default." + - name: etcdImageTag + required: true + schema: + openAPIV3Schema: + type: string + default: "" + example: "3.5.3-0" + description: "etcdImageTag sets the tag for the etcd image." + - name: coreDNSImageTag + required: true + schema: + openAPIV3Schema: + type: string + default: "" + example: "v1.8.5" + description: "coreDNSImageTag sets the tag for the coreDNS image." + - name: kubeadmControlPlaneMaxSurge + required: false + schema: + openAPIV3Schema: + type: string + default: "" + example: "0" + description: "kubeadmControlPlaneMaxSurge is the maximum number of control planes that can be scheduled above or under the desired number of control plane machines." + patches: + - name: lbImageRepository + external: + generateExtension: generate-patches.test-extension-config + validateExtension: validate-topology.test-extension-config +# definitions: +# - selector: +# apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +# kind: DockerClusterTemplate +# matchResources: +# infrastructureCluster: true +# jsonPatches: +# - op: add +# path: "/spec/template/spec/loadBalancer" +# valueFrom: +# template: | +# imageRepository: {{ .lbImageRepository }} + - name: imageRepository + description: "Sets the imageRepository used for the KubeadmControlPlane." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/imageRepository" + valueFrom: + variable: imageRepository + # We have to pin the cgroupDriver to cgroupfs for Kubernetes < v1.24 because kind does not support systemd for those versions, but kubeadm >= 1.21 defaults to systemd. + - name: cgroupDriver-controlPlane + description: | + Sets the cgroupDriver to cgroupfs if a Kubernetes version < v1.24 is referenced. + This is required because kind and the node images do not support the default + systemd cgroupDriver for kubernetes < v1.24. + enabledIf: '{{ semverCompare "<= v1.23" .builtin.controlPlane.version }}' + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver" + value: cgroupfs + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver" + value: cgroupfs + - name: cgroupDriver-machineDeployment + description: | + Sets the cgroupDriver to cgroupfs if a Kubernetes version < v1.24 is referenced. + This is required because kind and the node images do not support the default + systemd cgroupDriver for kubernetes < v1.24. + enabledIf: '{{ semverCompare "<= v1.23" .builtin.machineDeployment.version }}' + definitions: + - selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cgroup-driver" + value: cgroupfs + - name: etcdImageTag + description: "Sets tag to use for the etcd image in the KubeadmControlPlane." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/etcd" + valueFrom: + template: | + local: + imageTag: {{ .etcdImageTag }} + - name: coreDNSImageTag + description: "Sets tag to use for the etcd image in the KubeadmControlPlane." + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/dns" + valueFrom: + template: | + imageTag: {{ .coreDNSImageTag }} + - name: customImage + description: "Sets the container image that is used for running dockerMachines for the controlPlane and default-worker machineDeployments." + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + machineDeploymentClass: + names: + - default-worker + jsonPatches: + - op: add + path: "/spec/template/spec/customImage" + valueFrom: + template: | + kindest/node:{{ .builtin.machineDeployment.version | replace "+" "_" }} + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: "/spec/template/spec/customImage" + valueFrom: + template: | + kindest/node:{{ .builtin.controlPlane.version | replace "+" "_" }} + - name: kubeadmControlPlaneMaxSurge + description: "Sets the maxSurge value used for rolloutStrategy in the KubeadmControlPlane." + enabledIf: '{{ ne .kubeadmControlPlaneMaxSurge "" }}' + definitions: + - selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + jsonPatches: + - op: add + path: /spec/template/spec/rolloutStrategy/rollingUpdate/maxSurge + valueFrom: + template: "{{ .kubeadmControlPlaneMaxSurge }}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerClusterTemplate +metadata: + name: quick-start-cluster +spec: + template: + spec: + failureDomains: + fd1: + controlPlane: true + fd2: + controlPlane: true + fd3: + controlPlane: true + fd4: + controlPlane: false + fd5: + controlPlane: false + fd6: + controlPlane: false + fd7: + controlPlane: false + fd8: + controlPlane: false +--- +kind: KubeadmControlPlaneTemplate +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: quick-start-control-plane +spec: + template: + spec: + machineTemplate: + nodeDrainTimeout: 1s + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: { enable-hostpath-provisioner: 'true' } + apiServer: + # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. + certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] + initConfiguration: + nodeRegistration: + # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + joinConfiguration: + nodeRegistration: + # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: quick-start-control-plane +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: quick-start-default-worker-machinetemplate +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: quick-start-default-worker-bootstraptemplate +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found + criSocket: unix:///var/run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + diff --git a/test/extension/Dockerfile b/test/extension/Dockerfile new file mode 100644 index 000000000000..ea418235356e --- /dev/null +++ b/test/extension/Dockerfile @@ -0,0 +1,66 @@ +# syntax=docker/dockerfile:1.1-experimental + +# Copyright 2022 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the extension binary +# Run this with docker build --build-arg builder_image= +ARG builder_image +FROM ${builder_image} as builder +WORKDIR /workspace + +# Run this with docker build --build-arg goproxy=$(go env GOPROXY) to override the goproxy +ARG goproxy=https://proxy.golang.org +# Run this with docker build --build-arg package=./controlplane/kubeadm or --build-arg package=./bootstrap/kubeadm +ENV GOPROXY=$goproxy + +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum + +# Cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN --mount=type=cache,target=/go/pkg/mod \ + go mod download + +# Copy the sources +COPY ./ ./ + +# Cache the go build into the the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + go build . + +# Build +ARG package=. +ARG ARCH +ARG ldflags + +# Essentially, change directories into test extension +WORKDIR /workspace/test/extension + +# Do not force rebuild of up-to-date packages (do not use -a) and use the compiler cache folder +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \ + go build -trimpath -ldflags "${ldflags} -extldflags '-static'" \ + -o /workspace/extension ${package} + +# Production image +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/extension . +# Use uid of nonroot user (65532) because kubernetes expects numeric user when applying pod security policies +USER 65532 +ENTRYPOINT ["/extension"] diff --git a/test/extension/config/certmanager/certificate.yaml b/test/extension/config/certmanager/certificate.yaml new file mode 100644 index 000000000000..941a71577e95 --- /dev/null +++ b/test/extension/config/certmanager/certificate.yaml @@ -0,0 +1,29 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + # for local testing. + - localhost + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize + subject: + organizations: + - k8s-sig-cluster-lifecycle diff --git a/test/extension/config/certmanager/kustomization.yaml b/test/extension/config/certmanager/kustomization.yaml new file mode 100644 index 000000000000..e0182475f0a2 --- /dev/null +++ b/test/extension/config/certmanager/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/test/extension/config/certmanager/kustomizeconfig.yaml b/test/extension/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 000000000000..28a895a404a9 --- /dev/null +++ b/test/extension/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: +- kind: Certificate + group: cert-manager.io + path: spec/commonName +- kind: Certificate + group: cert-manager.io + path: spec/dnsNames +- kind: Certificate + group: cert-manager.io + path: spec/secretName diff --git a/test/extension/config/default/extension.yaml b/test/extension/config/default/extension.yaml new file mode 100644 index 000000000000..7cf0a142fd30 --- /dev/null +++ b/test/extension/config/default/extension.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-extension + namespace: system +spec: + selector: + matchLabels: + app: test-extension + replicas: 1 + template: + metadata: + labels: + app: test-extension + spec: + containers: + - command: + - /extension + image: controller:latest + name: extension + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane diff --git a/test/extension/config/default/extension_image_patch.yaml b/test/extension/config/default/extension_image_patch.yaml new file mode 100644 index 000000000000..a8924d92cafd --- /dev/null +++ b/test/extension/config/default/extension_image_patch.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-extension + namespace: system +spec: + template: + spec: + containers: + - image: gcr.io/k8s-staging-cluster-api/test-extension:main + name: extension diff --git a/test/extension/config/default/extension_pull_policy.yaml b/test/extension/config/default/extension_pull_policy.yaml new file mode 100644 index 000000000000..67703cd1e955 --- /dev/null +++ b/test/extension/config/default/extension_pull_policy.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-extension + namespace: system +spec: + template: + spec: + containers: + - name: extension + imagePullPolicy: Always diff --git a/test/extension/config/default/extension_webhook_patch.yaml b/test/extension/config/default/extension_webhook_patch.yaml new file mode 100644 index 000000000000..997c0730ac68 --- /dev/null +++ b/test/extension/config/default/extension_webhook_patch.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-extension + namespace: system +spec: + template: + spec: + containers: + - name: extension + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + secretName: $(SERVICE_NAME)-cert diff --git a/test/extension/config/default/kustomization.yaml b/test/extension/config/default/kustomization.yaml new file mode 100644 index 000000000000..9f29cf7a977f --- /dev/null +++ b/test/extension/config/default/kustomization.yaml @@ -0,0 +1,35 @@ +namespace: test-extension-system + +commonLabels: + +resources: +- extension.yaml +- namespace.yaml +- service.yaml + +bases: +- ../certmanager + +patchesStrategicMerge: +# Provide customizable hook for make targets. +- extension_image_patch.yaml +- extension_pull_policy.yaml +# Enable webhook. +- extension_webhook_patch.yaml + +vars: + - name: SERVICE_NAMESPACE # namespace of the service + objref: + kind: Service + version: v1 + name: webhook-service + fieldref: + fieldpath: metadata.namespace + - name: SERVICE_NAME + objref: + kind: Service + version: v1 + name: webhook-service + +configurations: + - kustomizeconfig.yaml diff --git a/test/extension/config/default/kustomizeconfig.yaml b/test/extension/config/default/kustomizeconfig.yaml new file mode 100644 index 000000000000..eb191e64d056 --- /dev/null +++ b/test/extension/config/default/kustomizeconfig.yaml @@ -0,0 +1,4 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +varReference: +- kind: Deployment + path: spec/template/spec/volumes/secret/secretName diff --git a/test/extension/config/default/namespace.yaml b/test/extension/config/default/namespace.yaml new file mode 100644 index 000000000000..952a18c2f2c1 --- /dev/null +++ b/test/extension/config/default/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app: test-extension + name: system diff --git a/test/extension/config/default/service.yaml b/test/extension/config/default/service.yaml new file mode 100644 index 000000000000..f3c9e6bc70cd --- /dev/null +++ b/test/extension/config/default/service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + targetPort: webhook-server + selector: + app: test-extension diff --git a/test/extension/handler.go b/test/extension/handler.go new file mode 100644 index 000000000000..fc691071c58d --- /dev/null +++ b/test/extension/handler.go @@ -0,0 +1,167 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "reflect" + + "github.com/gorilla/mux" + "github.com/pkg/errors" + + runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog" +) + +type Handler interface{} + +type GroupVersionHookName struct { + runtimecatalog.GroupVersionHook + Name string +} + +type HandlerBuilder struct { + catalog *runtimecatalog.Catalog + hookToHandler map[GroupVersionHookName]Handler +} + +func NewHandlerBuilder() *HandlerBuilder { + return &HandlerBuilder{ + hookToHandler: map[GroupVersionHookName]Handler{}, + } +} + +func (bld *HandlerBuilder) WithCatalog(c *runtimecatalog.Catalog) *HandlerBuilder { + bld.catalog = c + return bld +} + +func (bld *HandlerBuilder) AddDiscovery(hook runtimecatalog.Hook, h Handler) *HandlerBuilder { + return bld.AddExtension(hook, "", h) +} + +func (bld *HandlerBuilder) AddExtension(hook runtimecatalog.Hook, name string, h Handler) *HandlerBuilder { + gvh, err := bld.catalog.GroupVersionHook(hook) + if err != nil { + panic(errors.Wrapf(err, "hook does not exist in catalog")) + } + gvhn := GroupVersionHookName{ + GroupVersionHook: gvh, + Name: name, + } + + bld.hookToHandler[gvhn] = h + return bld +} + +func (bld *HandlerBuilder) Build() (http.Handler, error) { + if bld.catalog == nil { + + } + + r := mux.NewRouter() + + for g, h := range bld.hookToHandler { + gvhn := g + handler := h + + in, err := bld.catalog.NewRequest(gvhn.GroupVersionHook) + if err != nil { + return nil, err + } + + out, err := bld.catalog.NewResponse(gvhn.GroupVersionHook) + if err != nil { + return nil, err + } + + // TODO: please use catalog.ValidateRequest/Response. + // TODO: add context + if err := validateF(handler, in, out); err != nil { + return nil, err + } + + fWrapper := func(w http.ResponseWriter, r *http.Request) { + + reqBody, err := ioutil.ReadAll(r.Body) + if err != nil { + // TODO: handle error + } + + request, err := bld.catalog.NewRequest(gvhn.GroupVersionHook) + if err != nil { + // TODO: handle error + } + + if err := json.Unmarshal(reqBody, request); err != nil { + // TODO: handle error + } + + response, err := bld.catalog.NewResponse(gvhn.GroupVersionHook) + if err != nil { + // TODO: handle error + } + + // TODO: build new context with correlation ID and pass it to the call + // TODO: context with Cancel to enforce timeout? enforce timeout on caller side? both? + + v := reflect.ValueOf(handler) + ret := v.Call([]reflect.Value{ + reflect.ValueOf(request), + reflect.ValueOf(response), + }) + + if !ret[0].IsNil() { + // TODO: handle error + } + + respBody, err := json.Marshal(response) + if err != nil { + // TODO: handle error + } + + w.WriteHeader(http.StatusOK) + w.Write(respBody) + } + + r.HandleFunc(runtimecatalog.GVHToPath(gvhn.GroupVersionHook, gvhn.Name), fWrapper).Methods("POST") + } + + return r, nil +} + +func validateF(f interface{}, params ...interface{}) error { + funcType := reflect.TypeOf(f) + + if funcType.NumIn() != len(params) { + return errors.New("InvocationCausedPanic called with a function and an incorrect number of parameter(s).") + } + + for paramIndex, paramValue := range params { + expectedType := funcType.In(paramIndex) + actualType := reflect.TypeOf(paramValue) + + if actualType != expectedType { + return errors.Errorf("InvocationCausedPanic called with a mismatched parameter type [parameter #%v: expected %v; got %v].", paramIndex, expectedType, actualType) + } + } + + // TODO: check return is error + + return nil +} diff --git a/test/extension/main.go b/test/extension/main.go new file mode 100644 index 000000000000..f786bbbbc226 --- /dev/null +++ b/test/extension/main.go @@ -0,0 +1,312 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "net/http" + "os" + "strconv" + + "github.com/pkg/errors" + "github.com/spf13/pflag" + "gomodules.xyz/jsonpatch/v2" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + "k8s.io/utils/pointer" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/variables" + patchvariables "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/variables" + runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" +) + +var ( + catalog = runtimecatalog.New() + scheme = runtime.NewScheme() + decoder runtime.Decoder + + setupLog = ctrl.Log.WithName("setup") + + webhookPort int + webhookCertDir string + logOptions = logs.NewOptions() +) + +func init() { + _ = infrav1.AddToScheme(scheme) + decoder = serializer.NewCodecFactory(scheme).UniversalDecoder(infrav1.GroupVersion) + + _ = runtimehooksv1.AddToCatalog(catalog) + +} + +// InitFlags initializes the flags. +func InitFlags(fs *pflag.FlagSet) { + logs.AddFlags(fs, logs.SkipLoggingConfigurationFlags()) + logOptions.AddFlags(fs) + + fs.IntVar(&webhookPort, "webhook-port", 9443, + "Webhook Server port") + + fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", + "Webhook cert dir, only used when webhook-port is specified.") +} + +func main() { + InitFlags(pflag.CommandLine) + pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := logOptions.ValidateAndApply(); err != nil { + setupLog.Error(err, "unable to start extension") + os.Exit(1) + } + + ctx := ctrl.SetupSignalHandler() + + // FIXME(sbueringer) think about defaults, cf with CR. + srv := webhook.Server{ + Host: "", + Port: webhookPort, + CertDir: webhookCertDir, + CertName: "tls.crt", + KeyName: "tls.key", + WebhookMux: http.NewServeMux(), + TLSMinVersion: "1.2", + } + + operation1Handler, err := NewHandlerBuilder(). + WithCatalog(catalog). + AddDiscovery(runtimehooksv1.Discovery, doDiscovery). // TODO: this is not strongly typed, but there are type checks when the service starts + AddExtension(runtimehooksv1.GeneratePatches, "generate-patches", generatePatches). + AddExtension(runtimehooksv1.ValidateTopology, "validate-topology", validateTopology). + // TODO: test with more services + Build() + if err != nil { + panic(err) + } + + srv.WebhookMux.Handle("/", operation1Handler) + + setupLog.Info("starting RuntimeExtension") + if err := srv.StartStandalone(ctx, nil); err != nil { + panic(err) + } +} + +// TODO: consider registering extensions with all required data and then auto-generating the discovery func based on that. +// If we want folks to write it manually, make it nicer to do. +func doDiscovery(request *runtimehooksv1.DiscoveryRequest, response *runtimehooksv1.DiscoveryResponse) error { + fmt.Println("Discovery/v1alpha1 called") + + response.Status = runtimehooksv1.ResponseStatusSuccess + response.Handlers = append(response.Handlers, runtimehooksv1.ExtensionHandler{ + Name: "generate-patches", + RequestHook: runtimehooksv1.GroupVersionHook{ + APIVersion: runtimehooksv1.GroupVersion.String(), + Hook: "GeneratePatches", + }, + TimeoutSeconds: pointer.Int32(10), + FailurePolicy: toPtr(runtimehooksv1.FailurePolicyFail), + }) + response.Handlers = append(response.Handlers, runtimehooksv1.ExtensionHandler{ + Name: "validate-topology", + RequestHook: runtimehooksv1.GroupVersionHook{ + APIVersion: runtimehooksv1.GroupVersion.String(), + Hook: "ValidateTopology", + }, + TimeoutSeconds: pointer.Int32(10), + FailurePolicy: toPtr(runtimehooksv1.FailurePolicyFail), + }) + + return nil +} + +func generatePatches(req *runtimehooksv1.GeneratePatchesRequest, resp *runtimehooksv1.GeneratePatchesResponse) error { + // FIXME(sbueringer): try to implement actual patching + validation below, includes + // * parse object + // * similar to inline patching e2e test: modify object based on variables (we probably would need some libs if we do it right) + + globalVariables := toMap(req.Variables) + + for _, requestItem := range req.Items { + templateVariables, err := mergeVariableMaps(globalVariables, toMap(requestItem.Variables)) + if err != nil { + return err + } + + obj, _, err := decoder.Decode(requestItem.Object.Raw, nil, requestItem.Object.Object) + if err != nil { + // Continue, object has a type which hasn't been registered with the scheme. + continue + } + + original := obj.DeepCopyObject() + var modified runtime.Object + + switch v := obj.(type) { + case *infrav1.DockerClusterTemplate: + if err := patchDockerClusterTemplate(v, templateVariables); err != nil { + return err + } + modified = v + } + + if modified == nil { + // No patching was done, let's continue with the next object. + continue + } + + patch, err := createPatch(original, modified) + if err != nil { + return err + } + + resp.Items = append(resp.Items, runtimehooksv1.GeneratePatchesResponseItem{ + UID: requestItem.UID, + PatchType: runtimehooksv1.JSONPatchType, + Patch: patch, + }) + + fmt.Printf("Generated patch (uid: %q): %q\n", requestItem.UID, string(patch)) + } + + resp.Status = runtimehooksv1.ResponseStatusSuccess + fmt.Println("GeneratePatches called") + return nil +} + +func createPatch(original, modified runtime.Object) ([]byte, error) { + marshalledOriginal, err := json.Marshal(original) + if err != nil { + return nil, err + } + + marshalledModified, err := json.Marshal(modified) + if err != nil { + return nil, err + } + + patch, err := jsonpatch.CreatePatch(marshalledOriginal, marshalledModified) + if err != nil { + return nil, err + } + + patchBytes, err := json.Marshal(patch) + if err != nil { + return nil, err + } + + return patchBytes, nil +} + +func patchDockerClusterTemplate(dockerClusterTemplate *infrav1.DockerClusterTemplate, templateVariables map[string]apiextensionsv1.JSON) error { + value, err := variables.GetVariableValue(templateVariables, "lbImageRepository") + if err != nil { + // FIXME(sbueringer): need better error semantics like variable not found/set) + return err + } + stringValue, err := strconv.Unquote(string(value.Raw)) + if err != nil { + return err + } + + dockerClusterTemplate.Spec.Template.Spec.LoadBalancer.ImageRepository = stringValue + return nil +} + +func validateTopology(req *runtimehooksv1.ValidateTopologyRequest, resp *runtimehooksv1.ValidateTopologyResponse) error { + fmt.Println("ValidateTopology called") + resp.Status = runtimehooksv1.ResponseStatusSuccess + return nil +} + +func toPtr(f runtimehooksv1.FailurePolicy) *runtimehooksv1.FailurePolicy { + return &f +} + +// FIXME(sbueringer): dedupliate + +// toMap converts a list of Variables to a map of JSON (name is the map key). +func toMap(variables []runtimehooksv1.Variable) map[string]apiextensionsv1.JSON { + variablesMap := map[string]apiextensionsv1.JSON{} + + for i := range variables { + variablesMap[variables[i].Name] = variables[i].Value + } + return variablesMap +} + +// mergeVariableMaps merges variables. +// NOTE: In case a variable exists in multiple maps, the variable from the latter map is preserved. +// NOTE: The builtin variable object is merged instead of simply overwritten. +func mergeVariableMaps(variableMaps ...map[string]apiextensionsv1.JSON) (map[string]apiextensionsv1.JSON, error) { + res := make(map[string]apiextensionsv1.JSON) + + for _, variableMap := range variableMaps { + for variableName, variableValue := range variableMap { + // If the variable already exits and is the builtin variable, merge it. + if _, ok := res[variableName]; ok && variableName == patchvariables.BuiltinsName { + mergedV, err := mergeBuiltinVariables(res[variableName], variableValue) + if err != nil { + return nil, errors.Wrapf(err, "failed to merge builtin variables") + } + res[variableName] = *mergedV + continue + } + res[variableName] = variableValue + } + } + + return res, nil +} + +// mergeBuiltinVariables merges builtin variable objects. +// NOTE: In case a variable exists in multiple builtin variables, the variable from the latter map is preserved. +func mergeBuiltinVariables(variableList ...apiextensionsv1.JSON) (*apiextensionsv1.JSON, error) { + builtins := &patchvariables.Builtins{} + + // Unmarshal all variables into builtins. + // NOTE: This accumulates the fields on the builtins. + // Fields will be overwritten by later Unmarshals if fields are + // set on multiple variables. + for _, variable := range variableList { + if err := json.Unmarshal(variable.Raw, builtins); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal builtin variable") + } + } + + // Marshal builtins to JSON. + builtinVariableJSON, err := json.Marshal(builtins) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal builtin variable") + } + + return &apiextensionsv1.JSON{ + Raw: builtinVariableJSON, + }, nil +} diff --git a/test/framework/convenience.go b/test/framework/convenience.go index 9632dfc158ce..6a883e779b2f 100644 --- a/test/framework/convenience.go +++ b/test/framework/convenience.go @@ -32,6 +32,7 @@ import ( controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" ) // TryAddDefaultSchemes tries to add the following schemes: @@ -68,6 +69,9 @@ func TryAddDefaultSchemes(scheme *runtime.Scheme) { _ = apiextensionsv1beta.AddToScheme(scheme) _ = apiextensionsv1.AddToScheme(scheme) + // Add RuntimeSDK to the scheme. + _ = runtimev1.AddToScheme(scheme) + // Add rbac to the scheme. _ = rbacv1.AddToScheme(scheme) } diff --git a/test/go.mod b/test/go.mod index 52673760f28a..74d9e6dc6c13 100644 --- a/test/go.mod +++ b/test/go.mod @@ -10,11 +10,13 @@ require ( github.com/docker/go-connections v0.4.0 github.com/flatcar-linux/ignition v0.36.1 github.com/go-logr/logr v1.2.0 + github.com/gorilla/mux v1.8.0 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.18.1 github.com/pkg/errors v0.9.1 github.com/spf13/pflag v1.0.5 github.com/vincent-petithory/dataurl v1.0.0 + gomodules.xyz/jsonpatch/v2 v2.2.0 k8s.io/api v0.24.0 k8s.io/apiextensions-apiserver v0.24.0 k8s.io/apimachinery v0.24.0 @@ -115,7 +117,6 @@ require ( golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect google.golang.org/protobuf v1.27.1 // indirect diff --git a/test/go.sum b/test/go.sum index 522c45e9da84..2fa6c775486d 100644 --- a/test/go.sum +++ b/test/go.sum @@ -358,6 +358,7 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=