Skip to content

Commit

Permalink
🌱 add machine pool wait support to test framework
Browse files Browse the repository at this point in the history
  • Loading branch information
devigned committed Sep 9, 2020
1 parent 73624ed commit 45fd2ac
Show file tree
Hide file tree
Showing 9 changed files with 135 additions and 42 deletions.
1 change: 1 addition & 0 deletions test/e2e/config/docker-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ intervals:
default/wait-cluster: ["3m", "10s"]
default/wait-control-plane: ["10m", "10s"]
default/wait-worker-nodes: ["5m", "10s"]
default/wait-machine-pool-nodes: ["5m", "10s"]
default/wait-delete-cluster: ["3m", "10s"]
default/wait-machine-upgrade: ["20m", "10s"]
default/wait-machine-remediation: ["5m", "10s"]
1 change: 1 addition & 0 deletions test/e2e/config/docker-dev.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ intervals:
default/wait-cluster: ["3m", "10s"]
default/wait-control-plane: ["10m", "10s"]
default/wait-worker-nodes: ["5m", "10s"]
default/wait-machine-pool-nodes: ["5m", "10s"]
default/wait-delete-cluster: ["3m", "10s"]
default/wait-machine-upgrade: ["20m", "10s"]
default/wait-machine-remediation: ["5m", "10s"]
20 changes: 9 additions & 11 deletions test/e2e/kcp_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@ import (

corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3"

"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
Expand All @@ -50,8 +49,7 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput)
input KCPUpgradeSpecInput
namespace *corev1.Namespace
cancelWatches context.CancelFunc
cluster *clusterv1.Cluster
controlPlane *controlplanev1.KubeadmControlPlane
result *clusterctl.ApplyClusterTemplateAndWaitResult
)

BeforeEach(func() {
Expand All @@ -73,7 +71,7 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput)
It("Should successfully upgrade Kubernetes, DNS, kube-proxy, and etcd in a single control plane cluster", func() {

By("Creating a workload cluster")
cluster, controlPlane, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
Expand All @@ -95,8 +93,8 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput)
By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions")
framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: cluster,
ControlPlane: controlPlane,
Cluster: result.Cluster,
ControlPlane: result.ControlPlane,
EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo),
DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo),
KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
Expand All @@ -112,7 +110,7 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput)

By("Creating a workload cluster")

cluster, controlPlane, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
Expand All @@ -134,8 +132,8 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput)
By("Upgrading Kubernetes, DNS, kube-proxy, and etcd versions")
framework.UpgradeControlPlaneAndWaitForUpgrade(ctx, framework.UpgradeControlPlaneAndWaitForUpgradeInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: cluster,
ControlPlane: controlPlane,
Cluster: result.Cluster,
ControlPlane: result.ControlPlane,
EtcdImageTag: input.E2EConfig.GetVariable(EtcdVersionUpgradeTo),
DNSImageTag: input.E2EConfig.GetVariable(CoreDNSVersionUpgradeTo),
KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
Expand All @@ -149,6 +147,6 @@ func KCPUpgradeSpec(ctx context.Context, inputGetter func() KCPUpgradeSpecInput)

AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, result.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
})
}
18 changes: 8 additions & 10 deletions test/e2e/md_upgrades.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (

corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"

"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
Expand All @@ -49,7 +49,7 @@ func MachineDeploymentUpgradesSpec(ctx context.Context, inputGetter func() Machi
input MachineDeploymentUpgradesSpecInput
namespace *corev1.Namespace
cancelWatches context.CancelFunc
cluster *clusterv1.Cluster
result *clusterctl.ApplyClusterTemplateAndWaitResult
)

BeforeEach(func() {
Expand All @@ -71,9 +71,7 @@ func MachineDeploymentUpgradesSpec(ctx context.Context, inputGetter func() Machi
It("Should successfully upgrade Machines upon changes in relevant MachineDeployment fields", func() {

By("Creating a workload cluster")

var mds []*clusterv1.MachineDeployment
cluster, _, mds = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
Expand All @@ -95,24 +93,24 @@ func MachineDeploymentUpgradesSpec(ctx context.Context, inputGetter func() Machi
By("Upgrading MachineDeployment's Kubernetes version to a valid version")
framework.UpgradeMachineDeploymentsAndWait(context.TODO(), framework.UpgradeMachineDeploymentsAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: cluster,
Cluster: result.Cluster,
UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersion),
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
MachineDeployments: mds,
MachineDeployments: result.MachineDeployments,
})

By("Upgrading MachineDeployment Infrastructure ref and wait for rolling upgrade")
framework.UpgradeMachineDeploymentInfrastructureRefAndWait(context.TODO(), framework.UpgradeMachineDeploymentInfrastructureRefAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: cluster,
Cluster: result.Cluster,
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
MachineDeployments: mds,
MachineDeployments: result.MachineDeployments,
})
By("PASSED!")
})

AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, result.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
})
}
10 changes: 5 additions & 5 deletions test/e2e/mhc_remediations.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (

corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"

"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
Expand All @@ -49,7 +49,7 @@ func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemed
input MachineRemediationSpecInput
namespace *corev1.Namespace
cancelWatches context.CancelFunc
cluster *clusterv1.Cluster
result *clusterctl.ApplyClusterTemplateAndWaitResult
)

BeforeEach(func() {
Expand All @@ -69,7 +69,7 @@ func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemed

By("Creating a workload cluster")

cluster, _, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
Expand All @@ -91,7 +91,7 @@ func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemed
By("Waiting for MachineHealthCheck remediation")
framework.DiscoverMachineHealthChecksAndWaitForRemediation(ctx, framework.DiscoverMachineHealthCheckAndWaitForRemediationInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: cluster,
Cluster: result.Cluster,
WaitForMachineRemediation: input.E2EConfig.GetIntervals(specName, "wait-machine-remediation"),
})

Expand All @@ -100,6 +100,6 @@ func MachineRemediationSpec(ctx context.Context, inputGetter func() MachineRemed

AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, result.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
})
}
8 changes: 4 additions & 4 deletions test/e2e/quick_start.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (

corev1 "k8s.io/api/core/v1"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"

"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
Expand All @@ -51,7 +51,7 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput)
input QuickStartSpecInput
namespace *corev1.Namespace
cancelWatches context.CancelFunc
cluster *clusterv1.Cluster
result *clusterctl.ApplyClusterTemplateAndWaitResult
)

BeforeEach(func() {
Expand All @@ -72,7 +72,7 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput)

By("Creating a workload cluster")

cluster, _, _ = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
result = clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
Expand All @@ -96,6 +96,6 @@ func QuickStartSpec(ctx context.Context, inputGetter func() QuickStartSpecInput)

AfterEach(func() {
// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, result.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
})
}
11 changes: 6 additions & 5 deletions test/e2e/self_hosted.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)
input SelfHostedSpecInput
namespace *corev1.Namespace
cancelWatches context.CancelFunc
cluster *clusterv1.Cluster
result *clusterctl.ApplyClusterTemplateAndWaitResult

selfHostedClusterProxy framework.ClusterProxy
selfHostedNamespace *corev1.Namespace
Expand All @@ -76,7 +76,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)

By("Creating a workload cluster")

cluster, _, _ = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{
result = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()),
Expand All @@ -101,6 +101,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)
// In case of the cluster id a DockerCluster, we should load controller images into the nodes.
// Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using
// this approach because this allows to have a single source of truth for images, the e2e config
cluster := result.Cluster
if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" {
bootstrap.LoadImagesToKindCluster(context.TODO(), bootstrap.LoadImagesToKindClusterInput{
Name: cluster.Name,
Expand Down Expand Up @@ -175,17 +176,17 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput)
})

log.Logf("Waiting for the cluster infrastructure to be provisioned")
cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{
result.Cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{
Getter: input.BootstrapClusterProxy.GetClient(),
Namespace: namespace.Name,
Name: cluster.Name,
Name: result.Cluster.Name,
}, input.E2EConfig.GetIntervals(specName, "wait-cluster")...)
}
if selfHostedCancelWatches != nil {
selfHostedCancelWatches()
}

// Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself.
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, cancelWatches, result.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup)
})
}
27 changes: 24 additions & 3 deletions test/framework/clusterctl/clusterctl_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/cluster-api/cmd/clusterctl/client/config"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3"
clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1alpha3"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/internal/log"
)
Expand Down Expand Up @@ -109,11 +110,19 @@ type ApplyClusterTemplateAndWaitInput struct {
WaitForClusterIntervals []interface{}
WaitForControlPlaneIntervals []interface{}
WaitForMachineDeployments []interface{}
WaitForMachinePools []interface{}
}

type ApplyClusterTemplateAndWaitResult struct {
Cluster *clusterv1.Cluster
ControlPlane *controlplanev1.KubeadmControlPlane
MachineDeployments []*clusterv1.MachineDeployment
MachinePools []*clusterv1exp.MachinePool
}

// ApplyClusterTemplateAndWait gets a cluster template using clusterctl, and waits for the cluster to be ready.
// Important! this method assumes the cluster uses a KubeadmControlPlane and MachineDeployments.
func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplateAndWaitInput) (*clusterv1.Cluster, *controlplanev1.KubeadmControlPlane, []*clusterv1.MachineDeployment) {
func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplateAndWaitInput) *ApplyClusterTemplateAndWaitResult {
Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyClusterTemplateAndWait")

Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyClusterTemplateAndWait")
Expand Down Expand Up @@ -174,11 +183,23 @@ func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplate
ControlPlane: controlPlane,
}, input.WaitForControlPlaneIntervals...)

log.Logf("Waiting for the worker machines to be provisioned")
log.Logf("Waiting for the machine deployments to be provisioned")
machineDeployments := framework.DiscoveryAndWaitForMachineDeployments(ctx, framework.DiscoveryAndWaitForMachineDeploymentsInput{
Lister: input.ClusterProxy.GetClient(),
Cluster: cluster,
}, input.WaitForMachineDeployments...)

return cluster, controlPlane, machineDeployments
log.Logf("Waiting for the machine pools to be provisioned")
machinePools := framework.DiscoveryAndWaitForMachinePools(ctx, framework.DiscoveryAndWaitForMachinePoolsInput{
Getter: input.ClusterProxy.GetClient(),
Lister: input.ClusterProxy.GetClient(),
Cluster: cluster,
}, input.WaitForMachineDeployments...)

return &ApplyClusterTemplateAndWaitResult{
Cluster: cluster,
ControlPlane: controlPlane,
MachineDeployments: machineDeployments,
MachinePools: machinePools,
}
}
Loading

0 comments on commit 45fd2ac

Please sign in to comment.