Skip to content

Commit

Permalink
RuntimeSDK: Add Test Extension and initial e2e test
Browse files Browse the repository at this point in the history
  • Loading branch information
sbueringer committed Jun 13, 2022
1 parent 88dc60e commit 490f972
Show file tree
Hide file tree
Showing 26 changed files with 994 additions and 1 deletion.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ test/e2e/data/infrastructure-docker/v1alpha3/cluster-template*.yaml
test/e2e/data/infrastructure-docker/v1alpha4/cluster-template*.yaml
test/e2e/data/infrastructure-docker/v1beta1/cluster-template*.yaml

# E2e test extension deployment
test/e2e/data/test-extension/deployment.yaml

# Output of the go coverage tool, specifically when used with LiteIDE
*.out

Expand Down
10 changes: 10 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,10 @@ CLUSTERCTL_MANIFEST_DIR := cmd/clusterctl/config
CLUSTERCTL_IMAGE_NAME ?= clusterctl
CLUSTERCTL_IMG ?= $(REGISTRY)/$(CLUSTERCTL_IMAGE_NAME)

# test extension
TEST_EXTENSION_IMAGE_NAME ?= test-extension
TEST_EXTENSION_IMG ?= $(REGISTRY)/$(TEST_EXTENSION_IMAGE_NAME)

# It is set by Prow GIT_TAG, a git-based tag of the form vYYYYMMDD-hash, e.g., v20210120-v0.3.10-308-gc61521971

TAG ?= dev
Expand Down Expand Up @@ -554,6 +558,12 @@ docker-build-clusterctl: ## Build the docker image for clusterctl with output bi
.PHONY: docker-capd-build-all
docker-capd-build-all: $(addprefix docker-capd-build-,$(ALL_ARCH)) ## Build capd docker images for all architectures

.PHONY: docker-build-test-extension
docker-build-test-extension: ## Build the docker image for core controller manager
DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(TEST_EXTENSION_IMG)-$(ARCH):$(TAG) --file ./test/extension/Dockerfile
$(MAKE) set-manifest-image MANIFEST_IMG=$(TEST_EXTENSION_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./test/extension/config/default/extension_image_patch.yaml"
$(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./test/extension/config/default/extension_pull_policy.yaml"

.PHONY: e2e-framework
e2e-framework: ## Builds the CAPI e2e framework
cd $(E2E_FRAMEWORK_DIR); go build ./...
Expand Down
8 changes: 8 additions & 0 deletions scripts/ci-e2e-lib.sh
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,14 @@ capi:buildDockerImages () {
else
echo "+ CAPD images already present in the system, skipping make"
fi

## Build test extension images, if missing
if [[ "$(docker images -q "$REGISTRY/test-extension-$ARCH:$TAG" 2> /dev/null)" == "" ]]; then
echo "+ Building test-extension image"
make docker-build-test-extension
else
echo "+ test-extension image already present in the system, skipping make"
fi
}

# k8s::prepareKindestImages checks all the e2e test variables representing a Kubernetes version,
Expand Down
8 changes: 7 additions & 1 deletion test/e2e/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -95,11 +95,17 @@ cluster-templates-v1beta1: $(KUSTOMIZE) ## Generate cluster templates for v1beta
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-node-drain --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-node-drain.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-cgroupfs --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-cgroupfs.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-upgrades-runtimesdk-cgroupfs.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ipv6 --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ipv6.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-topology.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ignition --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/v1beta1/cluster-template-ignition.yaml

test-extension-deployment: $(KUSTOMIZE) ## Generate deployment for test extension
mkdir -p $(REPO_ROOT)/test/e2e/data/test-extension
$(KUSTOMIZE) build $(REPO_ROOT)/test/extension/config/default > $(REPO_ROOT)/test/e2e/data/test-extension/deployment.yaml

## --------------------------------------
## Testing
## --------------------------------------
Expand All @@ -119,7 +125,7 @@ _SKIP_ARGS := $(foreach arg,$(strip $(GINKGO_SKIP)),-skip="$(arg)")
endif

.PHONY: run
run: $(GINKGO) cluster-templates ## Run the end-to-end tests
run: $(GINKGO) cluster-templates test-extension-deployment ## Run the end-to-end tests
$(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) . -- \
-e2e.artifacts-folder="$(ARTIFACTS)" \
-e2e.config="$(E2E_CONF_FILE)" \
Expand Down
2 changes: 2 additions & 0 deletions test/e2e/cluster_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ type ClusterUpgradeConformanceSpecInput struct {
// ControlPlaneMachineCount is used in `config cluster` to configure the count of the control plane machines used in the test.
// Default is 1.
ControlPlaneMachineCount *int64

// WorkerMachineCount is used in `config cluster` to configure the count of the worker machines used in the test.
// NOTE: If the WORKER_MACHINE_COUNT var is used multiple times in the cluster template, the absolute count of
// worker machines is a multiple of WorkerMachineCount.
Expand All @@ -66,6 +67,7 @@ func ClusterUpgradeConformanceSpec(ctx context.Context, inputGetter func() Clust
kubetestConfigurationVariable = "KUBETEST_CONFIGURATION"
specName = "k8s-upgrade-and-conformance"
)

var (
input ClusterUpgradeConformanceSpecInput
namespace *corev1.Namespace
Expand Down
236 changes: 236 additions & 0 deletions test/e2e/cluster_upgrade_runtimesdk.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,236 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"time"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"

runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
)

// clusterUpgradeWithRuntimeSDKSpecInput is the input for clusterUpgradeWithRuntimeSDKSpec.
type clusterUpgradeWithRuntimeSDKSpecInput struct {
E2EConfig *clusterctl.E2EConfig
ClusterctlConfigPath string
BootstrapClusterProxy framework.ClusterProxy
ArtifactFolder string
SkipCleanup bool

// ControlPlaneMachineCount is used in `config cluster` to configure the count of the control plane machines used in the test.
// Default is 1.
ControlPlaneMachineCount *int64

// WorkerMachineCount is used in `config cluster` to configure the count of the worker machines used in the test.
// NOTE: If the WORKER_MACHINE_COUNT var is used multiple times in the cluster template, the absolute count of
// worker machines is a multiple of WorkerMachineCount.
// Default is 2.
WorkerMachineCount *int64

// Flavor to use when creating the cluster for testing, "upgrades" is used if not specified.
Flavor *string
}

// clusterUpgradeWithRuntimeSDKSpec implements a spec that upgrades a cluster and runs the Kubernetes conformance suite.
// Upgrading a cluster refers to upgrading the control-plane and worker nodes (managed by MD and machine pools).
// NOTE: This test only works with a KubeadmControlPlane.
// NOTE: This test works with Clusters with and without ClusterClass.
// When using ClusterClass the ClusterClass must have the variables "etcdImageTag" and "coreDNSImageTag" of type string.
// Those variables should have corresponding patches which set the etcd and CoreDNS tags in KCP.
func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() clusterUpgradeWithRuntimeSDKSpecInput) {
const (
textExtensionPathVariable = "TEST_EXTENSION"
specName = "k8s-upgrade-with-runtimesdk"
)

var (
input clusterUpgradeWithRuntimeSDKSpecInput
namespace *corev1.Namespace
ext *runtimev1.ExtensionConfig
cancelWatches context.CancelFunc

controlPlaneMachineCount int64
workerMachineCount int64

clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
testExtensionPath string
)

BeforeEach(func() {
Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
input = inputGetter()
Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName)
Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName)
Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)
Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName)

Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeFrom))
Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersionUpgradeTo))
Expect(input.E2EConfig.Variables).To(HaveKey(EtcdVersionUpgradeTo))
Expect(input.E2EConfig.Variables).To(HaveKey(CoreDNSVersionUpgradeTo))

testExtensionPath = input.E2EConfig.GetVariable(textExtensionPathVariable)
Expect(testExtensionPath).To(BeAnExistingFile(), "The %s variable should resolve to an existing file", textExtensionPathVariable)

if input.ControlPlaneMachineCount == nil {
controlPlaneMachineCount = 1
} else {
controlPlaneMachineCount = *input.ControlPlaneMachineCount
}

if input.WorkerMachineCount == nil {
workerMachineCount = 2
} else {
workerMachineCount = *input.WorkerMachineCount
}

// Setup a Namespace where to host objects for this spec and create a watcher for the Namespace events.
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)
clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult)
})

It("Should create and upgrade a workload cluster", func() {
By("Deploy Test Extension")
testExtensionDeploymentTemplate, err := os.ReadFile(testExtensionPath) //nolint:gosec
Expect(err).ToNot(HaveOccurred(), "Failed to read the extension config deployment manifest file")

// Set the SERVICE_NAMESPACE, which is used in the cert-manager Certificate CR.
// We have to dynamically set the namespace here, because it depends on the test run and thus
// cannot be set when rendering the test extension YAML with kustomize.
testExtensionDeployment := strings.ReplaceAll(string(testExtensionDeploymentTemplate), "${SERVICE_NAMESPACE}", namespace.Name)
Expect(testExtensionDeployment).ToNot(BeEmpty(), "Test Extension deployment manifest file should not be empty")

Expect(input.BootstrapClusterProxy.Apply(ctx, []byte(testExtensionDeployment), "--namespace", namespace.Name)).To(Succeed())

By("Deploy Test Extension ExtensionConfig")
ext = extensionConfig(specName, namespace)
err = input.BootstrapClusterProxy.GetClient().Create(ctx, ext)
Expect(err).ToNot(HaveOccurred(), "Failed to create the extension config")

By("Creating a workload cluster")

clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
ClusterctlConfigPath: input.ClusterctlConfigPath,
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: pointer.StringDeref(input.Flavor, "upgrades"),
Namespace: namespace.Name,
ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom),
ControlPlaneMachineCount: pointer.Int64Ptr(controlPlaneMachineCount),
WorkerMachineCount: pointer.Int64Ptr(workerMachineCount),
},
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
}, clusterResources)

// Upgrade the Cluster topology to run through an entire cluster lifecycle to test the lifecycle hooks.
By("Upgrading the Cluster topology")
framework.UpgradeClusterTopologyAndWaitForUpgrade(ctx, framework.UpgradeClusterTopologyAndWaitForUpgradeInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: clusterResources.Cluster,
ControlPlane: clusterResources.ControlPlane,
EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo),
DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo),
MachineDeployments: clusterResources.MachineDeployments,
KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
WaitForKubeProxyUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
WaitForDNSUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
WaitForEtcdUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
})

// Only attempt to upgrade MachinePools if they were provided in the template.
if len(clusterResources.MachinePools) > 0 && workerMachineCount > 0 {
By("Upgrading the machinepool instances")
framework.UpgradeMachinePoolAndWait(ctx, framework.UpgradeMachinePoolAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: clusterResources.Cluster,
UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
WaitForMachinePoolToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-upgrade"),
MachinePools: clusterResources.MachinePools,
})
}

By("Waiting until nodes are ready")
workloadProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterResources.Cluster.Name)
workloadClient := workloadProxy.GetClient()
framework.WaitForNodesReady(ctx, framework.WaitForNodesReadyInput{
Lister: workloadClient,
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
Count: int(clusterResources.ExpectedTotalNodes()),
WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"),
})

By("PASSED!")
})

AfterEach(func() {
// Dumps all the resources in the spec Namespace, then cleanups the cluster object and the spec Namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)

Eventually(func() error {
return input.BootstrapClusterProxy.GetClient().Delete(ctx, ext)
}, 10*time.Second, 1*time.Second).Should(Succeed())
})
}

// extensionConfig generates an ExtensionConfig.
// We make sure this cluster-wide object does not conflict with others by using a random generated
// name and a NamespaceSelector selecting on the namespace of the current test.
// Thus, this object is "namespaced" to the current test even though it's a cluster-wide object.
func extensionConfig(specName string, namespace *corev1.Namespace) *runtimev1.ExtensionConfig {
return &runtimev1.ExtensionConfig{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
Annotations: map[string]string{
"cert-manager.io/inject-ca-from-secret": fmt.Sprintf("%s/webhook-service-cert", namespace.Name),
},
},
Spec: runtimev1.ExtensionConfigSpec{
ClientConfig: runtimev1.ClientConfig{
Service: &runtimev1.ServiceReference{
Name: "webhook-service",
Namespace: namespace.Name,
},
},
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubernetes.io/metadata.name:": namespace.Name,
},
},
},
}
}
62 changes: 62 additions & 0 deletions test/e2e/cluster_upgrade_runtimesdk_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
//go:build e2e
// +build e2e

/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"github.com/blang/semver"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/utils/pointer"
)

var _ = Describe("When upgrading a workload cluster using ClusterClass with RuntimeSDK [PR-Informing] [ClusterClass]", func() {
clusterUpgradeWithRuntimeSDKSpec(ctx, func() clusterUpgradeWithRuntimeSDKSpecInput {
// "upgrades" is the same as the "topology" flavor but with an additional MachinePool.
flavor := pointer.String("upgrades-runtimesdk")
// For KubernetesVersionUpgradeFrom < v1.24 we have to use upgrades-cgroupfs flavor.
// This is because kind and CAPD only support:
// * cgroupDriver cgroupfs for Kubernetes < v1.24
// * cgroupDriver systemd for Kubernetes >= v1.24.
// Notes:
// * We always use a ClusterClass-based cluster-template for the upgrade test
// * The ClusterClass will automatically adjust the cgroupDriver for KCP and MDs.
// * We have to handle the MachinePool ourselves
// * The upgrades-cgroupfs flavor uses an MP which is pinned to cgroupfs
// * During the upgrade UpgradeMachinePoolAndWait automatically drops the cgroupfs pinning
// when the target version is >= v1.24.
// TODO: We can remove this after the v1.25 release as we then only test the v1.24=>v1.25 upgrade.
version, err := semver.ParseTolerant(e2eConfig.GetVariable(KubernetesVersionUpgradeFrom))
Expect(err).ToNot(HaveOccurred(), "Invalid argument, KUBERNETES_VERSION_UPGRADE_FROM is not a valid version")
if version.LT(semver.MustParse("1.24.0")) {
// "upgrades-cgroupfs" is the same as the "topology" flavor but with an additional MachinePool
// with pinned cgroupDriver to cgroupfs.
flavor = pointer.String("upgrades-runtimesdk-cgroupfs")
}

return clusterUpgradeWithRuntimeSDKSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
Flavor: flavor,
}
})
})
Loading

0 comments on commit 490f972

Please sign in to comment.