Skip to content

Commit

Permalink
clusterctl-upgrade-e2e-test
Browse files Browse the repository at this point in the history
  • Loading branch information
fabriziopandini committed Nov 19, 2020
1 parent e6b0039 commit 66d342e
Show file tree
Hide file tree
Showing 8 changed files with 445 additions and 20 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ out
*.test

# E2E test templates
test/e2e/data/infrastructure-docker/*-template
test/e2e/data/infrastructure-docker/cluster-template*.yaml

# Output of the go coverage tool, specifically when used with LiteIDE
*.out
Expand Down
228 changes: 228 additions & 0 deletions test/e2e/clusterctl_upgrade.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,228 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"
"fmt"
"os"
"path/filepath"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/cluster-api/cmd/clusterctl/client/config"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/bootstrap"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
)

// ClusterctlUpgradeSpecInput is the input for ClusterctlUpgradeSpec.
type ClusterctlUpgradeSpecInput struct {
E2EConfig *clusterctl.E2EConfig
ClusterctlConfigPath string
BootstrapClusterProxy framework.ClusterProxy
ArtifactFolder string
SkipCleanup bool
}

const (
ClusterctlUpgradeContract = "CLUSTERCTL_CONTRACT_UPGRADE_TO"
)

// ClusterctlUpgradeSpec implements a test that verifies clusterctl upgrade of a management cluster.
func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpgradeSpecInput) {
var (
specName = "clusterctl-upgrade"
input ClusterctlUpgradeSpecInput

managementClusterNamespace *corev1.Namespace
managementClusterCancelWatches context.CancelFunc
managementClusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
managementClusterProxy framework.ClusterProxy

testNamespace *corev1.Namespace
testCancelWatches context.CancelFunc
testClusterResources *clusterctl.ApplyClusterTemplateAndWaitResult
)

BeforeEach(func() {
Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
input = inputGetter()
Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName)
Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName)
Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)
Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName)
Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion))
Expect(input.E2EConfig.Variables).To(HaveKey(ClusterctlUpgradeContract))

// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
managementClusterNamespace, managementClusterCancelWatches = setupSpecNamespace(context.TODO(), specName, input.BootstrapClusterProxy, input.ArtifactFolder)
})

It("Should create a management cluster and then upgrade all the providers", func() {

By("Creating a workload cluster to be used as a new management cluster")
// NOTE: given that the bootstrap cluster is shared by many tests, it is not possible to use it for testing clusterctl upgrades.
// So we are creating a workload cluster that will be used as a new management cluster where to install older version of providers

managementClusterResources = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
ClusterctlConfigPath: input.ClusterctlConfigPath,
KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: clusterctl.DefaultFlavor,
Namespace: managementClusterNamespace.Name,
ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
})

By("Turning the workload cluster into a management cluster with older versions of providers")

// In case of the cluster id a DockerCluster, we should load controller images into the nodes.
// Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using
// this approach because this allows to have a single source of truth for images, the e2e config
// Nb. the images for official version of the providers will be pulled from internet, but the latest images must be
// built locally and loaded into kind
cluster := managementClusterResources.Cluster
if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" {
Expect(bootstrap.LoadImagesToKindCluster(context.TODO(), bootstrap.LoadImagesToKindClusterInput{
Name: cluster.Name,
Images: input.E2EConfig.Images,
})).To(Succeed())
}

// Get a ClusterBroker so we can interact with the workload cluster
managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(context.TODO(), cluster.Namespace, cluster.Name)

By("Initializing the workload cluster with older versions of providers")
clusterctl.InitManagementClusterAndWatchControllerLogs(context.TODO(), clusterctl.InitManagementClusterAndWatchControllerLogsInput{
ClusterProxy: managementClusterProxy,
ClusterctlConfigPath: input.ClusterctlConfigPath,
CoreProvider: input.E2EConfig.GetProvidersWithOldestVersion(config.ClusterAPIProviderName)[0],
BootstrapProviders: input.E2EConfig.GetProvidersWithOldestVersion(config.KubeadmBootstrapProviderName),
ControlPlaneProviders: input.E2EConfig.GetProvidersWithOldestVersion(config.KubeadmControlPlaneProviderName),
InfrastructureProviders: input.E2EConfig.GetProvidersWithOldestVersion(input.E2EConfig.InfrastructureProviders()...),
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)

By("THE MANAGEMENT CLUSTER WITH THE OLDER VERSION OF PROVIDERS IS UP&RUNNING!")

Byf("Creating a namespace for hosting the %s test workload cluster", specName)
testNamespace, testCancelWatches = framework.CreateNamespaceAndWatchEvents(context.TODO(), framework.CreateNamespaceAndWatchEventsInput{
Creator: managementClusterProxy.GetClient(),
ClientSet: managementClusterProxy.GetClientSet(),
Name: specName,
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", "bootstrap"),
})

By("Creating a test workload cluster")
// NOTE: this workload cluster is used to check the management cluster works fine

testClusterResources = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: managementClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", managementClusterProxy.GetName()),
ClusterctlConfigPath: input.ClusterctlConfigPath,
KubeconfigPath: managementClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: clusterctl.DefaultInfrastructureProvider,
Flavor: clusterctl.DefaultFlavor,
Namespace: testNamespace.Name,
ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)),
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
})

By("THE MANAGEMENT CLUSTER WITH OLDER VERSION OF PROVIDERS WORKS!")

By("Upgrading providers to the latest version available")
clusterctl.UpgradeManagementClusterAndWait(context.TODO(), clusterctl.UpgradeManagementClusterAndWaitInput{
ClusterctlConfigPath: input.ClusterctlConfigPath,
ClusterProxy: managementClusterProxy,
ManagementGroup: fmt.Sprintf("capi-system/%s", config.ClusterAPIProviderName),
Contract: input.E2EConfig.GetVariable(ClusterctlUpgradeContract),
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name),
}, input.E2EConfig.GetIntervals(specName, "wait-controllers")...)

By("THE MANAGEMENT CLUSTER WAS SUCCESSFULLY UPGRADED!")

testMachineDeployments := framework.GetMachineDeploymentsByCluster(context.TODO(), framework.GetMachineDeploymentsByClusterInput{
Lister: managementClusterProxy.GetClient(),
ClusterName: testClusterResources.Cluster.Name,
Namespace: testNamespace.Name,
})

framework.ScaleAndWaitMachineDeployment(context.TODO(), framework.ScaleAndWaitMachineDeploymentInput{
ClusterProxy: managementClusterProxy,
Cluster: testClusterResources.Cluster,
MachineDeployment: testMachineDeployments[0],
Replicas: 2,
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
})

By("THE UPGRADED MANAGEMENT CLUSTER WORKS!")

By("PASSED!")
})

AfterEach(func() {
if testNamespace != nil {
// Dump all the logs from the workload cluster before deleting them.
managementClusterProxy.CollectWorkloadClusterLogs(ctx, testNamespace.Name, testClusterResources.Cluster.Name, filepath.Join(input.ArtifactFolder, "clusters", testClusterResources.Cluster.Name, "machines"))

// Dump all Cluster API related resources to artifacts before pivoting back.
framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{
Lister: managementClusterProxy.GetClient(),
Namespace: managementClusterNamespace.Name,
LogPath: filepath.Join(input.ArtifactFolder, "clusters", managementClusterResources.Cluster.Name, "resources"),
})

if !input.SkipCleanup {
Byf("Deleting cluster %s/%s", testNamespace.Name, testClusterResources.Cluster.Name)
framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{
Client: managementClusterProxy.GetClient(),
Namespace: testNamespace.Name,
}, input.E2EConfig.GetIntervals(specName, "wait-delete-cluster")...)

Byf("Deleting namespace used for hosting the %q test", specName)
framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{
Deleter: managementClusterProxy.GetClient(),
Name: testNamespace.Name,
})
}
testCancelWatches()
}

// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
})
}
39 changes: 39 additions & 0 deletions test/e2e/clusterctl_upgrade_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
// +build e2e

/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"context"

. "github.com/onsi/ginkgo"
)

var _ = Describe("When testing clusterctl upgrades", func() {

ClusterctlUpgradeSpec(context.TODO(), func() ClusterctlUpgradeSpecInput {
return ClusterctlUpgradeSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
}
})

})
37 changes: 29 additions & 8 deletions test/e2e/config/docker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,13 @@ providers:
- name: cluster-api
type: CoreProvider
versions:
- name: v0.3.0
# Use manifest from source files
- name: v0.3.10 # latest published release
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.10/core-components.yaml"
type: "url"
replacements:
- old: --metrics-addr=127.0.0.1:8080
new: --metrics-addr=:8080
- name: v0.3.11 # next; use manifest from source files
value: ../../../config
replacements:
- old: --metrics-addr=127.0.0.1:8080
Expand All @@ -39,8 +44,13 @@ providers:
- name: kubeadm
type: BootstrapProvider
versions:
- name: v0.3.0
# Use manifest from source files
- name: v0.3.10 # latest published release
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.10/bootstrap-components.yaml"
type: "url"
replacements:
- old: --metrics-addr=127.0.0.1:8080
new: --metrics-addr=:8080
- name: v0.3.11 # next; use manifest from source files
value: ../../../bootstrap/kubeadm/config
replacements:
- old: --metrics-addr=127.0.0.1:8080
Expand All @@ -49,8 +59,13 @@ providers:
- name: kubeadm
type: ControlPlaneProvider
versions:
- name: v0.3.0
# Use manifest from source files
- name: v0.3.10 # latest published release
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.10/control-plane-components.yaml"
type: "url"
replacements:
- old: --metrics-addr=127.0.0.1:8080
new: --metrics-addr=:8080
- name: v0.3.11 # next; use manifest from source files
value: ../../../controlplane/kubeadm/config
replacements:
- old: --metrics-addr=127.0.0.1:8080
Expand All @@ -59,8 +74,13 @@ providers:
- name: docker
type: InfrastructureProvider
versions:
- name: v0.3.0
# Use manifest from source files
- name: v0.3.10 # latest published release
value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.10/infrastructure-components-development.yaml"
type: "url"
replacements:
- old: --metrics-addr=127.0.0.1:8080
new: --metrics-addr=:8080
- name: v0.3.11 # next; use manifest from source files
value: ../../../test/infrastructure/docker/config
replacements:
- old: --metrics-addr=127.0.0.1:8080
Expand All @@ -85,6 +105,7 @@ variables:
CNI: "./data/cni/kindnet/kindnet.yaml"
EXP_CLUSTER_RESOURCE_SET: "true"
EXP_MACHINE_POOL: "true"
CLUSTERCTL_CONTRACT_UPGRADE_TO: "v1alpha3"

intervals:
default/wait-controllers: ["3m", "10s"]
Expand Down
32 changes: 32 additions & 0 deletions test/framework/clusterctl/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,38 @@ func Init(ctx context.Context, input InitInput) {
Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl init")
}

// UpgradeInput is the input for Upgrade.
type UpgradeInput struct {
LogFolder string
ClusterctlConfigPath string
KubeconfigPath string
ManagementGroup string
Contract string
}

// Upgrade calls clusterctl upgrade apply with the list of providers defined in the local repository
func Upgrade(ctx context.Context, input UpgradeInput) {
log.Logf("clusterctl upgrade apply --management-group %s --contract %s",
input.ManagementGroup,
input.Contract,
)

upgradeOpt := clusterctlclient.ApplyUpgradeOptions{
Kubeconfig: clusterctlclient.Kubeconfig{
Path: input.KubeconfigPath,
Context: "",
},
ManagementGroup: input.ManagementGroup,
Contract: input.Contract,
}

clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-upgrade.log", input.LogFolder)
defer log.Close()

err := clusterctlClient.ApplyUpgrade(upgradeOpt)
Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl upgrade")
}

// ConfigClusterInput is the input for ConfigCluster.
type ConfigClusterInput struct {
LogFolder string
Expand Down
Loading

0 comments on commit 66d342e

Please sign in to comment.