From dda38ae860a82b9200d94d5cdce19d15e307e9a3 Mon Sep 17 00:00:00 2001 From: Chuck Ha Date: Sat, 8 Feb 2020 13:19:02 -0500 Subject: [PATCH] Refactor the e2e framework Signed-off-by: Chuck Ha --- test/framework/control_plane.go | 319 +++++++++++------- test/framework/deprecated.go | 270 +++++++++++++++ test/framework/types.go | 19 -- .../docker/e2e/custom_assertions.go | 62 ++++ .../docker/e2e/docker_suite_test.go | 26 -- test/infrastructure/docker/e2e/docker_test.go | 136 ++++++-- 6 files changed, 640 insertions(+), 192 deletions(-) create mode 100644 test/framework/deprecated.go create mode 100644 test/infrastructure/docker/e2e/custom_assertions.go diff --git a/test/framework/control_plane.go b/test/framework/control_plane.go index e7ea7dccdd5f..01306e7fb5a1 100644 --- a/test/framework/control_plane.go +++ b/test/framework/control_plane.go @@ -19,13 +19,13 @@ package framework import ( "context" "fmt" - "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" @@ -34,85 +34,144 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -const ( - // eventuallyInterval is the polling interval used by gomega's Eventually - eventuallyInterval = 10 * time.Second -) +// Interfaces to scope down client.Client -// ControlplaneClusterInput defines the necessary dependencies to run a multi-node control plane cluster. -type ControlplaneClusterInput struct { - Management ManagementCluster - Cluster *clusterv1.Cluster - InfraCluster runtime.Object - Nodes []Node - MachineDeployment MachineDeployment - RelatedResources []runtime.Object - CreateTimeout time.Duration - DeleteTimeout time.Duration +// Getter can get resources. +type Getter interface { + Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error +} - ControlPlane *controlplanev1.KubeadmControlPlane - MachineTemplate runtime.Object +// Creator can creates resources. +type Creator interface { + Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOption) error } -// SetDefaults defaults the struct fields if necessary. -func (input *ControlplaneClusterInput) SetDefaults() { - if input.CreateTimeout == 0 { - input.CreateTimeout = 10 * time.Minute - } +// Lister can lists resources. +type Lister interface { + List(ctx context.Context, list runtime.Object, opts ...client.ListOption) error +} - if input.DeleteTimeout == 0 { - input.DeleteTimeout = 5 * time.Minute - } +// Deleter can delete resources. +type Deleter interface { + Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOption) error +} + +// CreateRelatedResourcesInput is the input type for CreateRelatedResources. +type CreateRelatedResourcesInput struct { + Creator Creator + RelatedResources []runtime.Object } -// ControlPlaneCluster creates an n node control plane cluster. -// Assertions: -// * The number of nodes in the created cluster will equal the number -// of control plane nodes plus the number of replicas in the machine -// deployment. -func (input *ControlplaneClusterInput) ControlPlaneCluster() { - ctx := context.Background() - Expect(input.Management).ToNot(BeNil()) +// CreateRelatedResources is used to create runtime.Objects. +func CreateRelatedResources(ctx context.Context, input CreateRelatedResourcesInput, intervals ...interface{}) { + By("creating related resources") + for i := range input.RelatedResources { + obj := input.RelatedResources[i] + By(fmt.Sprintf("creating a/an %s resource", obj.GetObjectKind().GroupVersionKind())) + Eventually(func() error { + return input.Creator.Create(ctx, obj) + }, intervals...).Should(BeNil()) + } +} - mgmtClient, err := input.Management.GetClient() - Expect(err).NotTo(HaveOccurred(), "stack: %+v", err) +// CreateClusterInput is the input for CreateCluster. +type CreateClusterInput struct { + Creator Creator + Cluster *clusterv1.Cluster + InfraCluster runtime.Object +} +// CreateCluster will create the Cluster and InfraCluster objects. +func CreateCluster(ctx context.Context, input CreateClusterInput, intervals ...interface{}) { By("creating an InfrastructureCluster resource") - Expect(mgmtClient.Create(ctx, input.InfraCluster)).To(Succeed()) + Expect(input.Creator.Create(ctx, input.InfraCluster)).To(Succeed()) // This call happens in an eventually because of a race condition with the // webhook server. If the latter isn't fully online then this call will // fail. By("creating a Cluster resource linked to the InfrastructureCluster resource") Eventually(func() error { - if err := mgmtClient.Create(ctx, input.Cluster); err != nil { + if err := input.Creator.Create(ctx, input.Cluster); err != nil { fmt.Printf("%+v\n", err) return err } return nil - }, input.CreateTimeout, eventuallyInterval).Should(BeNil()) + }, intervals...).Should(BeNil()) +} - By("creating related resources") - for i := range input.RelatedResources { - obj := input.RelatedResources[i] - By(fmt.Sprintf("creating a/an %s resource", obj.GetObjectKind().GroupVersionKind())) - Eventually(func() error { - return mgmtClient.Create(ctx, obj) - }, input.CreateTimeout, eventuallyInterval).Should(BeNil()) - } +// CreateKubeadmControlPlaneInput is the input for CreateKubeadmControlPlane. +type CreateKubeadmControlPlaneInput struct { + Creator Creator + ControlPlane *controlplanev1.KubeadmControlPlane + MachineTemplate runtime.Object +} +// CreateKubeadmControlPlane creates the control plane object and necessary dependencies. +func CreateKubeadmControlPlane(ctx context.Context, input CreateKubeadmControlPlaneInput, intervals ...interface{}) { By("creating the machine template") - Expect(mgmtClient.Create(ctx, input.MachineTemplate)).To(Succeed()) + Expect(input.Creator.Create(ctx, input.MachineTemplate)).To(Succeed()) By("creating a KubeadmControlPlane") Eventually(func() error { - err := mgmtClient.Create(ctx, input.ControlPlane) + err := input.Creator.Create(ctx, input.ControlPlane) if err != nil { fmt.Println(err) } return err - }, input.CreateTimeout, 10*time.Second).Should(BeNil()) + }, intervals...).Should(Succeed()) +} + +// CreateMachineDeploymentInput is the input for CreateMachineDeployment. +type CreateMachineDeploymentInput struct { + Creator Creator + MachineDeployment *clusterv1.MachineDeployment + BootstrapConfigTemplate runtime.Object + InfraMachineTemplate runtime.Object +} + +// CreateMachineDeployment creates the machine deployment and dependencies. +func CreateMachineDeployment(ctx context.Context, input CreateMachineDeploymentInput) { + By("creating a core MachineDeployment resource") + Expect(input.Creator.Create(ctx, input.MachineDeployment)).To(Succeed()) + + By("creating a BootstrapConfigTemplate resource") + Expect(input.Creator.Create(ctx, input.BootstrapConfigTemplate)).To(Succeed()) + + By("creating an InfrastructureMachineTemplate resource") + Expect(input.Creator.Create(ctx, input.InfraMachineTemplate)).To(Succeed()) +} + +// WaitForMachineDeploymentNodesToExistInput is the input for WaitForMachineDeploymentNodesToExist. +type WaitForMachineDeploymentNodesToExistInput struct { + Management ManagementCluster + Cluster *clusterv1.Cluster + MachineDeployment *clusterv1.MachineDeployment +} +// WaitForMachineDeploymentNodesToExist waits until all nodes associated with a machine deployment exist. +func WaitForMachineDeploymentNodesToExist(ctx context.Context, input WaitForMachineDeploymentNodesToExistInput, intervals ...interface{}) { + By("waiting for the workload nodes to exist") + Eventually(func() ([]v1.Node, error) { + workloadClient, err := input.Management.GetWorkloadClient(ctx, input.Cluster.Namespace, input.Cluster.Name) + if err != nil { + return nil, errors.Wrap(err, "failed to get workload client") + } + nodeList := v1.NodeList{} + if err := workloadClient.List(ctx, &nodeList); err != nil { + return nil, err + } + return nodeList.Items, nil + }, intervals...).Should(HaveLen(int(*input.MachineDeployment.Spec.Replicas))) +} + +// WaitForClusterToProvisionInput is the input for WaitForClusterToProvision. +type WaitForClusterToProvisionInput struct { + Getter Getter + Cluster *clusterv1.Cluster +} + +// WaitForClusterToProvision will wait for a cluster to have a phase status of provisioned. +func WaitForClusterToProvision(ctx context.Context, input WaitForClusterToProvisionInput, intervals ...interface{}) { By("waiting for cluster to enter the provisioned phase") Eventually(func() (string, error) { cluster := &clusterv1.Cluster{} @@ -120,127 +179,131 @@ func (input *ControlplaneClusterInput) ControlPlaneCluster() { Namespace: input.Cluster.GetNamespace(), Name: input.Cluster.GetName(), } - if err := mgmtClient.Get(ctx, key, cluster); err != nil { + if err := input.Getter.Get(ctx, key, cluster); err != nil { return "", err } return cluster.Status.Phase, nil - }, input.CreateTimeout, eventuallyInterval).Should(Equal(string(clusterv1.ClusterPhaseProvisioned))) - - // Create the machine deployment if the replica count >0. - if machineDeployment := input.MachineDeployment.MachineDeployment; machineDeployment != nil { - if replicas := machineDeployment.Spec.Replicas; replicas != nil && *replicas > 0 { - By("creating a core MachineDeployment resource") - Expect(mgmtClient.Create(ctx, machineDeployment)).To(Succeed()) - - By("creating a BootstrapConfigTemplate resource") - Expect(mgmtClient.Create(ctx, input.MachineDeployment.BootstrapConfigTemplate)).To(Succeed()) + }, intervals...).Should(Equal(string(clusterv1.ClusterPhaseProvisioned))) +} - By("creating an InfrastructureMachineTemplate resource") - Expect(mgmtClient.Create(ctx, input.MachineDeployment.InfraMachineTemplate)).To(Succeed()) - } +// WaitForKubeadmControlPlaneMachinesToExistInput is the input for WaitForKubeadmControlPlaneMachinesToExist. +type WaitForKubeadmControlPlaneMachinesToExistInput struct { + Lister Lister + Cluster *clusterv1.Cluster + ControlPlane *controlplanev1.KubeadmControlPlane +} - By("waiting for the workload nodes to exist") - Eventually(func() ([]v1.Node, error) { - workloadClient, err := input.Management.GetWorkloadClient(ctx, input.Cluster.Namespace, input.Cluster.Name) - if err != nil { - return nil, errors.Wrap(err, "failed to get workload client") - } - nodeList := v1.NodeList{} - if err := workloadClient.List(ctx, &nodeList); err != nil { - return nil, err - } - return nodeList.Items, nil - }, input.CreateTimeout, 10*time.Second).Should(HaveLen(int(*machineDeployment.Spec.Replicas))) +// WaitForKubeadmControlPlaneMachinesToExist will wait until all control plane machines exist. +func WaitForKubeadmControlPlaneMachinesToExist(ctx context.Context, input WaitForKubeadmControlPlaneMachinesToExistInput, intervals ...interface{}) { + By("waiting for all control plane nodes to exist") + inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace) + // ControlPlane labels + matchClusterListOption := client.MatchingLabels{ + clusterv1.MachineControlPlaneLabelName: "", + clusterv1.ClusterLabelName: input.Cluster.Name, } - By("waiting for all machines to be running") - inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace) - matchClusterListOption := client.MatchingLabels{clusterv1.ClusterLabelName: input.Cluster.Name} - Eventually(func() (bool, error) { - // Get a list of all the Machine resources that belong to the Cluster. + Eventually(func() (int, error) { machineList := &clusterv1.MachineList{} - if err := mgmtClient.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil { - return false, err - } - for _, machine := range machineList.Items { - if machine.Status.Phase != string(clusterv1.MachinePhaseRunning) { - return false, errors.Errorf("machine %s is not running, it's %s", machine.Name, machine.Status.Phase) - } + if err := input.Lister.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil { + fmt.Println(err) + return 0, err } - return true, nil - }, input.CreateTimeout, eventuallyInterval).Should(BeTrue()) - // wait for the control plane to be ready + return len(machineList.Items), nil + }, intervals...).Should(Equal(int(*input.ControlPlane.Spec.Replicas))) +} + +// WaitForControlPlaneToBeReadyInput is the input for WaitForControlPlaneToBeReady. +type WaitForControlPlaneToBeReadyInput struct { + Getter Getter + ControlPlane *controlplanev1.KubeadmControlPlane +} + +// WaitForControlPlaneToBeReady will wait for a control plane to be ready. +// TODO(chuckha): Once we implement control plane Ready, then we should update this to wait actually wait for ready. +// TODO(chuckha): In the meantime this uses initialized as a placeholder for Ready. +func WaitForControlPlaneToBeReady(ctx context.Context, input WaitForControlPlaneToBeReadyInput, intervals ...interface{}) { By("waiting for the control plane to be ready") - Eventually(func() bool { + Eventually(func() (bool, error) { controlplane := &controlplanev1.KubeadmControlPlane{} key := client.ObjectKey{ Namespace: input.ControlPlane.GetNamespace(), Name: input.ControlPlane.GetName(), } - if err := mgmtClient.Get(ctx, key, controlplane); err != nil { - fmt.Println(err.Error()) - return false + if err := input.Getter.Get(ctx, key, controlplane); err != nil { + return false, err } - return controlplane.Status.Initialized - }, input.CreateTimeout, 10*time.Second).Should(BeTrue()) -} - -// CleanUpCoreArtifacts deletes the cluster and waits for everything to be gone. -// Assertions: -// * Deletes Machines -// * Deletes MachineSets -// * Deletes MachineDeployments -// * Deletes KubeadmConfigs -// * Deletes Secrets -func (input *ControlplaneClusterInput) CleanUpCoreArtifacts() { - input.SetDefaults() - ctx := context.Background() - mgmtClient, err := input.Management.GetClient() - Expect(err).NotTo(HaveOccurred(), "stack: %+v", err) + // TODO: Return status.Ready instead... + return controlplane.Status.Initialized, nil + }, intervals...).Should(BeTrue()) +} + +// DeleteClusterInput is the input for DeleteCluster. +type DeleteClusterInput struct { + Deleter Deleter + Cluster *clusterv1.Cluster +} +// DeleteCluster deletes the cluster and waits for everything the cluster owned to actually be gone. +func DeleteCluster(ctx context.Context, input DeleteClusterInput) { By(fmt.Sprintf("deleting cluster %s", input.Cluster.GetName())) - Expect(mgmtClient.Delete(ctx, input.Cluster)).To(Succeed()) + Expect(input.Deleter.Delete(ctx, input.Cluster)).To(Succeed()) +} +// WaitForClusterDeletedInput is the input for WaitForClusterDeleted. +type WaitForClusterDeletedInput struct { + Getter Getter + Cluster *clusterv1.Cluster +} + +// WaitForClusterDeleted waits until the cluster object has been deleted. +func WaitForClusterDeleted(ctx context.Context, input WaitForClusterDeletedInput, intervals ...interface{}) { + By(fmt.Sprintf("waiting for cluster %s to be deleted", input.Cluster.GetName())) Eventually(func() bool { - clusters := clusterv1.ClusterList{} - if err := mgmtClient.List(ctx, &clusters); err != nil { - fmt.Println(err.Error()) - return false + cluster := &clusterv1.Cluster{} + key := client.ObjectKey{ + Namespace: input.Cluster.GetNamespace(), + Name: input.Cluster.GetName(), } - return len(clusters.Items) == 0 - }, input.DeleteTimeout, eventuallyInterval).Should(BeTrue()) + return apierrors.IsNotFound(input.Getter.Get(ctx, key, cluster)) + }, intervals...).Should(BeTrue()) +} + +// AssertAllClusterAPIResourcesAreGoneInput is the input for AssertAllClusterAPIResourcesAreGone. +type AssertAllClusterAPIResourcesAreGoneInput struct { + Lister Lister + Cluster *clusterv1.Cluster +} +// AssertAllClusterAPIResourcesAreGone ensures that all known Cluster API resources have been remvoed. +func AssertAllClusterAPIResourcesAreGone(ctx context.Context, input AssertAllClusterAPIResourcesAreGoneInput) { lbl, err := labels.Parse(fmt.Sprintf("%s=%s", clusterv1.ClusterLabelName, input.Cluster.GetClusterName())) Expect(err).ToNot(HaveOccurred()) - listOpts := &client.ListOptions{LabelSelector: lbl} + opt := &client.ListOptions{LabelSelector: lbl} By("ensuring all CAPI artifacts have been deleted") - ensureArtifactsDeleted(ctx, mgmtClient, listOpts) -} -func ensureArtifactsDeleted(ctx context.Context, mgmtClient client.Client, opt client.ListOption) { - // assertions ml := &clusterv1.MachineList{} - Expect(mgmtClient.List(ctx, ml, opt)).To(Succeed()) + Expect(input.Lister.List(ctx, ml, opt)).To(Succeed()) Expect(ml.Items).To(HaveLen(0)) msl := &clusterv1.MachineSetList{} - Expect(mgmtClient.List(ctx, msl, opt)).To(Succeed()) + Expect(input.Lister.List(ctx, msl, opt)).To(Succeed()) Expect(msl.Items).To(HaveLen(0)) mdl := &clusterv1.MachineDeploymentList{} - Expect(mgmtClient.List(ctx, mdl, opt)).To(Succeed()) + Expect(input.Lister.List(ctx, mdl, opt)).To(Succeed()) Expect(mdl.Items).To(HaveLen(0)) kcpl := &controlplanev1.KubeadmControlPlaneList{} - Expect(mgmtClient.List(ctx, kcpl, opt)).To(Succeed()) + Expect(input.Lister.List(ctx, kcpl, opt)).To(Succeed()) Expect(kcpl.Items).To(HaveLen(0)) kcl := &cabpkv1.KubeadmConfigList{} - Expect(mgmtClient.List(ctx, kcl, opt)).To(Succeed()) + Expect(input.Lister.List(ctx, kcl, opt)).To(Succeed()) Expect(kcl.Items).To(HaveLen(0)) sl := &corev1.SecretList{} - Expect(mgmtClient.List(ctx, sl, opt)).To(Succeed()) + Expect(input.Lister.List(ctx, sl, opt)).To(Succeed()) Expect(sl.Items).To(HaveLen(0)) } diff --git a/test/framework/deprecated.go b/test/framework/deprecated.go new file mode 100644 index 000000000000..a129d0c7a2ea --- /dev/null +++ b/test/framework/deprecated.go @@ -0,0 +1,270 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" + + "k8s.io/apimachinery/pkg/runtime" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // eventuallyInterval is the polling interval used by gomega's Eventually + // Deprecated + eventuallyInterval = 10 * time.Second +) + +// MachineDeployment contains the objects needed to create a +// CAPI MachineDeployment resource and its associated template +// resources. +// Deprecated. Please use the individual create/assert methods. +type MachineDeployment struct { + MachineDeployment *clusterv1.MachineDeployment + BootstrapConfigTemplate runtime.Object + InfraMachineTemplate runtime.Object +} + +// Node contains all the pieces necessary to make a single node +// Deprecated. +type Node struct { + Machine *clusterv1.Machine + InfraMachine runtime.Object + BootstrapConfig runtime.Object +} + +// ControlplaneClusterInput defines the necessary dependencies to run a multi-node control plane cluster. +// Deprecated. +type ControlplaneClusterInput struct { + Management ManagementCluster + Cluster *clusterv1.Cluster + InfraCluster runtime.Object + Nodes []Node + MachineDeployment MachineDeployment + RelatedResources []runtime.Object + CreateTimeout time.Duration + DeleteTimeout time.Duration + + ControlPlane *controlplanev1.KubeadmControlPlane + MachineTemplate runtime.Object +} + +// SetDefaults defaults the struct fields if necessary. +// Deprecated. +func (input *ControlplaneClusterInput) SetDefaults() { + if input.CreateTimeout == 0 { + input.CreateTimeout = 10 * time.Minute + } + + if input.DeleteTimeout == 0 { + input.DeleteTimeout = 5 * time.Minute + } +} + +// ControlPlaneCluster creates an n node control plane cluster. +// Assertions: +// * The number of nodes in the created cluster will equal the number +// of control plane nodes plus the number of replicas in the machine +// deployment. +// Deprecated. Please use the supplied functions below to get the exact behavior desired. +func (input *ControlplaneClusterInput) ControlPlaneCluster() { + ctx := context.Background() + Expect(input.Management).ToNot(BeNil()) + + mgmtClient, err := input.Management.GetClient() + Expect(err).NotTo(HaveOccurred(), "stack: %+v", err) + + By("creating an InfrastructureCluster resource") + Expect(mgmtClient.Create(ctx, input.InfraCluster)).To(Succeed()) + + // This call happens in an eventually because of a race condition with the + // webhook server. If the latter isn't fully online then this call will + // fail. + By("creating a Cluster resource linked to the InfrastructureCluster resource") + Eventually(func() error { + if err := mgmtClient.Create(ctx, input.Cluster); err != nil { + fmt.Printf("%+v\n", err) + return err + } + return nil + }, input.CreateTimeout, eventuallyInterval).Should(BeNil()) + + By("creating related resources") + for i := range input.RelatedResources { + obj := input.RelatedResources[i] + By(fmt.Sprintf("creating a/an %s resource", obj.GetObjectKind().GroupVersionKind())) + Eventually(func() error { + return mgmtClient.Create(ctx, obj) + }, input.CreateTimeout, eventuallyInterval).Should(BeNil()) + } + + By("creating the machine template") + Expect(mgmtClient.Create(ctx, input.MachineTemplate)).To(Succeed()) + + By("creating a KubeadmControlPlane") + Eventually(func() error { + err := mgmtClient.Create(ctx, input.ControlPlane) + if err != nil { + fmt.Println(err) + } + return err + }, input.CreateTimeout, 10*time.Second).Should(BeNil()) + + By("waiting for cluster to enter the provisioned phase") + Eventually(func() (string, error) { + cluster := &clusterv1.Cluster{} + key := client.ObjectKey{ + Namespace: input.Cluster.GetNamespace(), + Name: input.Cluster.GetName(), + } + if err := mgmtClient.Get(ctx, key, cluster); err != nil { + return "", err + } + return cluster.Status.Phase, nil + }, input.CreateTimeout, eventuallyInterval).Should(Equal(string(clusterv1.ClusterPhaseProvisioned))) + + // Create the machine deployment if the replica count >0. + if machineDeployment := input.MachineDeployment.MachineDeployment; machineDeployment != nil { + if replicas := machineDeployment.Spec.Replicas; replicas != nil && *replicas > 0 { + By("creating a core MachineDeployment resource") + Expect(mgmtClient.Create(ctx, machineDeployment)).To(Succeed()) + + By("creating a BootstrapConfigTemplate resource") + Expect(mgmtClient.Create(ctx, input.MachineDeployment.BootstrapConfigTemplate)).To(Succeed()) + + By("creating an InfrastructureMachineTemplate resource") + Expect(mgmtClient.Create(ctx, input.MachineDeployment.InfraMachineTemplate)).To(Succeed()) + } + + By("waiting for the workload nodes to exist") + Eventually(func() ([]v1.Node, error) { + workloadClient, err := input.Management.GetWorkloadClient(ctx, input.Cluster.Namespace, input.Cluster.Name) + if err != nil { + return nil, errors.Wrap(err, "failed to get workload client") + } + nodeList := v1.NodeList{} + if err := workloadClient.List(ctx, &nodeList); err != nil { + return nil, err + } + return nodeList.Items, nil + }, input.CreateTimeout, 10*time.Second).Should(HaveLen(int(*machineDeployment.Spec.Replicas))) + } + + By("waiting for all machines to be running") + inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace) + matchClusterListOption := client.MatchingLabels{clusterv1.ClusterLabelName: input.Cluster.Name} + Eventually(func() (bool, error) { + // Get a list of all the Machine resources that belong to the Cluster. + machineList := &clusterv1.MachineList{} + if err := mgmtClient.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil { + return false, err + } + for _, machine := range machineList.Items { + if machine.Status.Phase != string(clusterv1.MachinePhaseRunning) { + return false, errors.Errorf("machine %s is not running, it's %s", machine.Name, machine.Status.Phase) + } + } + return true, nil + }, input.CreateTimeout, eventuallyInterval).Should(BeTrue()) + // wait for the control plane to be ready + By("waiting for the control plane to be ready") + Eventually(func() bool { + controlplane := &controlplanev1.KubeadmControlPlane{} + key := client.ObjectKey{ + Namespace: input.ControlPlane.GetNamespace(), + Name: input.ControlPlane.GetName(), + } + if err := mgmtClient.Get(ctx, key, controlplane); err != nil { + fmt.Println(err.Error()) + return false + } + return controlplane.Status.Initialized + }, input.CreateTimeout, 10*time.Second).Should(BeTrue()) +} + +// CleanUpCoreArtifacts deletes the cluster and waits for everything to be gone. +// Assertions made on objects owned by the Cluster: +// * All Machines are removed +// * All MachineSets are removed +// * All MachineDeployments are removed +// * All KubeadmConfigs are removed +// * All Secrets are removed +// Deprecated +func (input *ControlplaneClusterInput) CleanUpCoreArtifacts() { + input.SetDefaults() + ctx := context.Background() + mgmtClient, err := input.Management.GetClient() + Expect(err).NotTo(HaveOccurred(), "stack: %+v", err) + + By(fmt.Sprintf("deleting cluster %s", input.Cluster.GetName())) + Expect(mgmtClient.Delete(ctx, input.Cluster)).To(Succeed()) + + Eventually(func() bool { + clusters := clusterv1.ClusterList{} + if err := mgmtClient.List(ctx, &clusters); err != nil { + fmt.Println(err.Error()) + return false + } + return len(clusters.Items) == 0 + }, input.DeleteTimeout, eventuallyInterval).Should(BeTrue()) + + lbl, err := labels.Parse(fmt.Sprintf("%s=%s", clusterv1.ClusterLabelName, input.Cluster.GetClusterName())) + Expect(err).ToNot(HaveOccurred()) + listOpts := &client.ListOptions{LabelSelector: lbl} + + By("ensuring all CAPI artifacts have been deleted") + ensureArtifactsDeleted(ctx, mgmtClient, listOpts) +} + +// Deprecated +func ensureArtifactsDeleted(ctx context.Context, mgmtClient Lister, opt client.ListOption) { + // assertions + ml := &clusterv1.MachineList{} + Expect(mgmtClient.List(ctx, ml, opt)).To(Succeed()) + Expect(ml.Items).To(HaveLen(0)) + + msl := &clusterv1.MachineSetList{} + Expect(mgmtClient.List(ctx, msl, opt)).To(Succeed()) + Expect(msl.Items).To(HaveLen(0)) + + mdl := &clusterv1.MachineDeploymentList{} + Expect(mgmtClient.List(ctx, mdl, opt)).To(Succeed()) + Expect(mdl.Items).To(HaveLen(0)) + + kcpl := &controlplanev1.KubeadmControlPlaneList{} + Expect(mgmtClient.List(ctx, kcpl, opt)).To(Succeed()) + Expect(kcpl.Items).To(HaveLen(0)) + + kcl := &cabpkv1.KubeadmConfigList{} + Expect(mgmtClient.List(ctx, kcl, opt)).To(Succeed()) + Expect(kcl.Items).To(HaveLen(0)) + + sl := &v1.SecretList{} + Expect(mgmtClient.List(ctx, sl, opt)).To(Succeed()) + Expect(sl.Items).To(HaveLen(0)) +} diff --git a/test/framework/types.go b/test/framework/types.go index ec302eee94ff..a203972ec95e 100644 --- a/test/framework/types.go +++ b/test/framework/types.go @@ -18,29 +18,10 @@ package framework import ( "reflect" - - "k8s.io/apimachinery/pkg/runtime" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" ) -// Node contains all the pieces necessary to make a single node -type Node struct { - Machine *clusterv1.Machine - InfraMachine runtime.Object - BootstrapConfig runtime.Object -} - // TypeToKind returns the Kind without the package prefix. Pass in a pointer to a struct // This will panic if used incorrectly. func TypeToKind(i interface{}) string { return reflect.ValueOf(i).Elem().Type().Name() } - -// MachineDeployment contains the objects needed to create a -// CAPI MachineDeployment resource and its associated template -// resources. -type MachineDeployment struct { - MachineDeployment *clusterv1.MachineDeployment - BootstrapConfigTemplate runtime.Object - InfraMachineTemplate runtime.Object -} diff --git a/test/infrastructure/docker/e2e/custom_assertions.go b/test/infrastructure/docker/e2e/custom_assertions.go new file mode 100644 index 000000000000..f0b2c2e413ef --- /dev/null +++ b/test/infrastructure/docker/e2e/custom_assertions.go @@ -0,0 +1,62 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/labels" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + "sigs.k8s.io/cluster-api/test/framework" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ensureDockerArtifactsDeletedInput is an example of a provider specific assertion. +type ensureDockerArtifactsDeletedInput struct { + Lister framework.Lister + Cluster *clusterv1.Cluster +} + +// ensureDockerArtifactsDeleted ensure we have cleaned up provider specific objects. +func ensureDockerArtifactsDeleted(input ensureDockerArtifactsDeletedInput) { + By("Ensuring docker artifacts have been deleted") + ctx := context.Background() + + lbl, err := labels.Parse(fmt.Sprintf("%s=%s", clusterv1.ClusterLabelName, input.Cluster.GetClusterName())) + Expect(err).ToNot(HaveOccurred()) + opt := &client.ListOptions{LabelSelector: lbl} + + dcl := &infrav1.DockerClusterList{} + Expect(input.Lister.List(ctx, dcl, opt)).To(Succeed()) + Expect(dcl.Items).To(HaveLen(0)) + + dml := &infrav1.DockerMachineList{} + Expect(input.Lister.List(ctx, dml, opt)).To(Succeed()) + Expect(dml.Items).To(HaveLen(0)) + + dmtl := &infrav1.DockerMachineTemplateList{} + Expect(input.Lister.List(ctx, dmtl, opt)).To(Succeed()) + Expect(dmtl.Items).To(HaveLen(0)) + By("Succeeding in deleting all docker artifacts") +} diff --git a/test/infrastructure/docker/e2e/docker_suite_test.go b/test/infrastructure/docker/e2e/docker_suite_test.go index 6b068dfef748..c638836ad7be 100644 --- a/test/infrastructure/docker/e2e/docker_suite_test.go +++ b/test/infrastructure/docker/e2e/docker_suite_test.go @@ -37,9 +37,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/test/framework" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" "sigs.k8s.io/cluster-api/util" @@ -117,30 +115,6 @@ var _ = AfterSuite(func() { mgmt.Teardown(ctx) }) -func ensureDockerArtifactsDeleted(input *framework.ControlplaneClusterInput) { - By("Ensuring docker artifacts have been deleted") - ctx := context.Background() - mgmtClient, err := input.Management.GetClient() - Expect(err).NotTo(HaveOccurred(), "stack: %+v", err) - - lbl, err := labels.Parse(fmt.Sprintf("%s=%s", clusterv1.ClusterLabelName, input.Cluster.GetClusterName())) - Expect(err).ToNot(HaveOccurred()) - opt := &client.ListOptions{LabelSelector: lbl} - - dcl := &infrav1.DockerClusterList{} - Expect(mgmtClient.List(ctx, dcl, opt)).To(Succeed()) - Expect(dcl.Items).To(HaveLen(0)) - - dml := &infrav1.DockerMachineList{} - Expect(mgmtClient.List(ctx, dml, opt)).To(Succeed()) - Expect(dml.Items).To(HaveLen(0)) - - dmtl := &infrav1.DockerMachineTemplateList{} - Expect(mgmtClient.List(ctx, dmtl, opt)).To(Succeed()) - Expect(dmtl.Items).To(HaveLen(0)) - By("Succeeding in deleting all docker artifacts") -} - func writeLogs(mgmt *CAPDCluster, namespace, deploymentName, logDir string) error { c, err := mgmt.GetClient() if err != nil { diff --git a/test/infrastructure/docker/e2e/docker_test.go b/test/infrastructure/docker/e2e/docker_test.go index f838b762d9a6..cb884810124a 100644 --- a/test/infrastructure/docker/e2e/docker_test.go +++ b/test/infrastructure/docker/e2e/docker_test.go @@ -24,38 +24,69 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" "sigs.k8s.io/cluster-api/test/framework" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" - "sigs.k8s.io/controller-runtime/pkg/client" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) var _ = Describe("Docker", func() { Describe("Cluster Creation", func() { var ( namespace string - input *framework.ControlplaneClusterInput + client ctrlclient.Client clusterGen = &ClusterGenerator{} + cluster *clusterv1.Cluster ) + SetDefaultEventuallyTimeout(3 * time.Minute) + SetDefaultEventuallyPollingInterval(10 * time.Second) BeforeEach(func() { namespace = "default" }) AfterEach(func() { - ensureDockerArtifactsDeleted(input) + deleteClusterInput := framework.DeleteClusterInput{ + Deleter: client, + Cluster: cluster, + } + framework.DeleteCluster(ctx, deleteClusterInput) + + waitForClusterDeletedInput := framework.WaitForClusterDeletedInput{ + Getter: client, + Cluster: cluster, + } + framework.WaitForClusterDeleted(ctx, waitForClusterDeletedInput) + + assertAllClusterAPIResourcesAreGoneInput := framework.AssertAllClusterAPIResourcesAreGoneInput{ + Lister: client, + Cluster: cluster, + } + framework.AssertAllClusterAPIResourcesAreGone(ctx, assertAllClusterAPIResourcesAreGoneInput) + + ensureDockerDeletedInput := ensureDockerArtifactsDeletedInput{ + Lister: client, + Cluster: cluster, + } + ensureDockerArtifactsDeleted(ensureDockerDeletedInput) }) Context("Multi-node controlplane cluster", func() { + It("should create a multi-node controlplane cluster", func() { replicas := 3 - cluster, infraCluster, controlPlane, template := clusterGen.GenerateCluster(namespace, int32(replicas)) + var ( + infraCluster *infrav1.DockerCluster + template *infrav1.DockerMachineTemplate + controlPlane *controlplanev1.KubeadmControlPlane + ) + cluster, infraCluster, controlPlane, template = clusterGen.GenerateCluster(namespace, int32(replicas)) // Set failure domains here infraCluster.Spec.FailureDomains = clusterv1.FailureDomains{ "domain-one": {ControlPlane: true}, @@ -65,25 +96,70 @@ var _ = Describe("Docker", func() { } md, infraTemplate, bootstrapTemplate := GenerateMachineDeployment(cluster, 1) - input = &framework.ControlplaneClusterInput{ - Management: mgmt, - Cluster: cluster, - InfraCluster: infraCluster, - CreateTimeout: 5 * time.Minute, - MachineDeployment: framework.MachineDeployment{ - MachineDeployment: md, - InfraMachineTemplate: infraTemplate, - BootstrapConfigTemplate: bootstrapTemplate, - }, + + // Set up the client to the management cluster + client, err := mgmt.GetClient() + Expect(err).NotTo(HaveOccurred()) + + // Set up the cluster object + createClusterInput := framework.CreateClusterInput{ + Creator: client, + Cluster: cluster, + InfraCluster: infraCluster, + } + framework.CreateCluster(ctx, createClusterInput) + + // Set up the KubeadmControlPlane + createKubeadmControlPlaneInput := framework.CreateKubeadmControlPlaneInput{ + Creator: client, ControlPlane: controlPlane, MachineTemplate: template, } - input.ControlPlaneCluster() + framework.CreateKubeadmControlPlane(ctx, createKubeadmControlPlaneInput) + + // Wait for the cluster to provision. + assertClusterProvisionsInput := framework.WaitForClusterToProvisionInput{ + Getter: client, + Cluster: cluster, + } + framework.WaitForClusterToProvision(ctx, assertClusterProvisionsInput) + + // Create the workload nodes + createMachineDeploymentinput := framework.CreateMachineDeploymentInput{ + Creator: client, + MachineDeployment: md, + BootstrapConfigTemplate: bootstrapTemplate, + InfraMachineTemplate: infraTemplate, + } + framework.CreateMachineDeployment(ctx, createMachineDeploymentinput) + + // Wait for the controlplane nodes to exist + assertKubeadmControlPlaneNodesExistInput := framework.WaitForKubeadmControlPlaneMachinesToExistInput{ + Lister: client, + Cluster: cluster, + ControlPlane: controlPlane, + } + framework.WaitForKubeadmControlPlaneMachinesToExist(ctx, assertKubeadmControlPlaneNodesExistInput) + + // Wait for the workload nodes to exist + waitForMachineDeploymentNodesToExistInput := framework.WaitForMachineDeploymentNodesToExistInput{ + Management: mgmt, + Cluster: cluster, + MachineDeployment: md, + } + framework.WaitForMachineDeploymentNodesToExist(ctx, waitForMachineDeploymentNodesToExistInput) + + // Wait for the control plane to be ready + waitForControlPlaneToBeReadyInput := framework.WaitForControlPlaneToBeReadyInput{ + Getter: client, + ControlPlane: controlPlane, + } + framework.WaitForControlPlaneToBeReady(ctx, waitForControlPlaneToBeReadyInput) // Custom expectations around Failure Domains By("waiting for all machines to be running") - inClustersNamespaceListOption := client.InNamespace(cluster.GetNamespace()) - matchClusterListOption := client.MatchingLabels{ + inClustersNamespaceListOption := ctrlclient.InNamespace(cluster.GetNamespace()) + matchClusterListOption := ctrlclient.MatchingLabels{ clusterv1.ClusterLabelName: cluster.GetName(), clusterv1.MachineControlPlaneLabelName: "", } @@ -115,7 +191,29 @@ var _ = Describe("Docker", func() { Expect(failureDomainCounts[id]).To(Equal(1), "each failure domain should have exactly one control plane: %v", failureDomainCounts) } - input.CleanUpCoreArtifacts() + deleteClusterInput := framework.DeleteClusterInput{ + Deleter: client, + Cluster: cluster, + } + framework.DeleteCluster(ctx, deleteClusterInput) + + waitForClusterDeletedInput := framework.WaitForClusterDeletedInput{ + Getter: client, + Cluster: cluster, + } + framework.WaitForClusterDeleted(ctx, waitForClusterDeletedInput) + + assertAllClusterAPIResourcesAreGoneInput := framework.AssertAllClusterAPIResourcesAreGoneInput{ + Lister: client, + Cluster: cluster, + } + framework.AssertAllClusterAPIResourcesAreGone(ctx, assertAllClusterAPIResourcesAreGoneInput) + + ensureDockerDeletedInput := ensureDockerArtifactsDeletedInput{ + Lister: client, + Cluster: cluster, + } + ensureDockerArtifactsDeleted(ensureDockerDeletedInput) }) }) })