From 276075c6ea4e70496fc549ba1574adcc0b675453 Mon Sep 17 00:00:00 2001 From: jan-est Date: Tue, 16 Mar 2021 14:43:43 +0200 Subject: [PATCH] Add E2E for scale in rollout --- test/e2e/Makefile | 2 +- test/e2e/config/docker.yaml | 1 + .../v1alpha4/bases/cluster-with-kcp.yaml | 2 +- .../cluster-with-kcp.yaml | 9 +++++ .../kustomization.yaml | 7 ++++ test/e2e/kcp_upgrade.go | 39 ++++++++++++++++++- 6 files changed, 56 insertions(+), 4 deletions(-) create mode 100644 test/e2e/data/infrastructure-docker/v1alpha4/cluster-template-kcp-scale-in/cluster-with-kcp.yaml create mode 100644 test/e2e/data/infrastructure-docker/v1alpha4/cluster-template-kcp-scale-in/kustomization.yaml diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 111d00b4068b..654e3bc0ff52 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -67,7 +67,7 @@ cluster-templates-v1alpha4: $(KUSTOMIZE) ## Generate cluster templates for v1alp $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-kcp-adoption/step2 --load_restrictor none >> $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-kcp-adoption.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-machine-pool --load_restrictor none > $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-machine-pool.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-node-drain --load_restrictor none > $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-node-drain.yaml - + $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-kcp-scale-in --load_restrictor none > $(DOCKER_TEMPLATES)/v1alpha4/cluster-template-kcp-scale-in.yaml ## -------------------------------------- ## Testing ## -------------------------------------- diff --git a/test/e2e/config/docker.yaml b/test/e2e/config/docker.yaml index a1f4ddc4fa09..58075792c344 100644 --- a/test/e2e/config/docker.yaml +++ b/test/e2e/config/docker.yaml @@ -79,6 +79,7 @@ providers: - sourcePath: "../data/infrastructure-docker/v1alpha4/cluster-template-kcp-adoption.yaml" - sourcePath: "../data/infrastructure-docker/v1alpha4/cluster-template-machine-pool.yaml" - sourcePath: "../data/infrastructure-docker/v1alpha4/cluster-template-node-drain.yaml" + - sourcePath: "../data/infrastructure-docker/v1alpha4/cluster-template-kcp-scale-in.yaml" - sourcePath: "../data/shared/v1alpha4/metadata.yaml" variables: diff --git a/test/e2e/data/infrastructure-docker/v1alpha4/bases/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/v1alpha4/bases/cluster-with-kcp.yaml index a0082e45decc..e883dadbbdcf 100644 --- a/test/e2e/data/infrastructure-docker/v1alpha4/bases/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-docker/v1alpha4/bases/cluster-with-kcp.yaml @@ -71,4 +71,4 @@ spec: nodeRegistration: criSocket: /var/run/containerd/containerd.sock kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} - version: "${KUBERNETES_VERSION}" + version: "${KUBERNETES_VERSION}" \ No newline at end of file diff --git a/test/e2e/data/infrastructure-docker/v1alpha4/cluster-template-kcp-scale-in/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/v1alpha4/cluster-template-kcp-scale-in/cluster-with-kcp.yaml new file mode 100644 index 000000000000..f1d6e65a4aa5 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1alpha4/cluster-template-kcp-scale-in/cluster-with-kcp.yaml @@ -0,0 +1,9 @@ +# KubeadmControlPlane referenced by the Cluster object with +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1alpha4 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + rolloutStrategy: + rollingUpdate: + maxSurge: 0 \ No newline at end of file diff --git a/test/e2e/data/infrastructure-docker/v1alpha4/cluster-template-kcp-scale-in/kustomization.yaml b/test/e2e/data/infrastructure-docker/v1alpha4/cluster-template-kcp-scale-in/kustomization.yaml new file mode 100644 index 000000000000..50546094a8a1 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/v1alpha4/cluster-template-kcp-scale-in/kustomization.yaml @@ -0,0 +1,7 @@ +bases: +- ../bases/crs.yaml +- ../bases/md.yaml +- ../bases/cluster-with-kcp.yaml + +patchesStrategicMerge: +- ./cluster-with-kcp.yaml \ No newline at end of file diff --git a/test/e2e/kcp_upgrade.go b/test/e2e/kcp_upgrade.go index bee323481817..b0d746fbafbf 100644 --- a/test/e2e/kcp_upgrade.go +++ b/test/e2e/kcp_upgrade.go @@ -108,9 +108,7 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) }) It("Should successfully upgrade Kubernetes, DNS, kube-proxy, and etcd in a HA cluster", func() { - By("Creating a workload cluster") - clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ ClusterProxy: input.BootstrapClusterProxy, ConfigCluster: clusterctl.ConfigClusterInput{ @@ -146,6 +144,43 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput) By("PASSED!") }) + It("Should successfully upgrade Kubernetes, DNS, kube-proxy, and etcd in a HA cluster using scale in rollout", func() { + By("Creating a workload cluster") + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: "kcp-scale-in", + Namespace: namespace.Name, + ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeFrom), + ControlPlaneMachineCount: pointer.Int64Ptr(3), + WorkerMachineCount: pointer.Int64Ptr(1), + }, + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }, clusterResources) + + By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions") + framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + ControlPlane: clusterResources.ControlPlane, + EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo), + DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo), + KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), + WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + WaitForDNSUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + WaitForEtcdUpgrade: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + }) + + By("PASSED!") + }) + AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)