diff --git a/test/infrastructure/docker/Makefile b/test/infrastructure/docker/Makefile index 198535aab7b6..f217393ddd5b 100644 --- a/test/infrastructure/docker/Makefile +++ b/test/infrastructure/docker/Makefile @@ -92,7 +92,7 @@ test-e2e: ## Run the end-to-end tests E2E_CONF_FILE ?= e2e/local-e2e.conf SKIP_RESOURCE_CLEANUP ?= false run-e2e: - go test ./e2e -v -ginkgo.v -ginkgo.trace -count=1 -timeout=20m -tags=e2e -e2e.config="$(abspath $(E2E_CONF_FILE))" -skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) + go test ./e2e -v -ginkgo.v -ginkgo.trace -count=1 -timeout=35m -tags=e2e -e2e.config="$(abspath $(E2E_CONF_FILE))" -skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) ## -------------------------------------- ## Binaries diff --git a/test/infrastructure/docker/e2e/docker_test.go b/test/infrastructure/docker/e2e/docker_test.go index ef1c241b7908..7cf27824cb08 100644 --- a/test/infrastructure/docker/e2e/docker_test.go +++ b/test/infrastructure/docker/e2e/docker_test.go @@ -19,7 +19,6 @@ limitations under the License. package e2e import ( - "errors" "fmt" "net/http" "time" @@ -28,10 +27,8 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/runtime" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" @@ -39,218 +36,166 @@ import ( "sigs.k8s.io/cluster-api/test/framework" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha3" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/patch" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) -var _ = Describe("Docker", func() { - Describe("Cluster Creation", func() { +var _ = Describe("Docker Create", func() { + var ( + namespace = "default" + clusterGen = newClusterGenerator("create") + mgmtClient ctrlclient.Client + cluster *clusterv1.Cluster + ) + SetDefaultEventuallyTimeout(10 * time.Minute) + SetDefaultEventuallyPollingInterval(10 * time.Second) + + AfterEach(func() { + // Delete the workload cluster + deleteClusterInput := framework.DeleteClusterInput{ + Deleter: mgmtClient, + Cluster: cluster, + } + framework.DeleteCluster(ctx, deleteClusterInput) + + waitForClusterDeletedInput := framework.WaitForClusterDeletedInput{ + Getter: mgmtClient, + Cluster: cluster, + } + framework.WaitForClusterDeleted(ctx, waitForClusterDeletedInput) + + assertAllClusterAPIResourcesAreGoneInput := framework.AssertAllClusterAPIResourcesAreGoneInput{ + Lister: mgmtClient, + Cluster: cluster, + } + framework.AssertAllClusterAPIResourcesAreGone(ctx, assertAllClusterAPIResourcesAreGoneInput) + + ensureDockerDeletedInput := ensureDockerArtifactsDeletedInput{ + Lister: mgmtClient, + Cluster: cluster, + } + ensureDockerArtifactsDeleted(ensureDockerDeletedInput) + + // Dump cluster API and docker related resources to artifacts before deleting them. + Expect(framework.DumpResources(mgmt, resourcesPath, GinkgoWriter)).To(Succeed()) + resources := map[string]runtime.Object{ + "DockerCluster": &infrav1.DockerClusterList{}, + "DockerMachine": &infrav1.DockerMachineList{}, + "DockerMachineTemplate": &infrav1.DockerMachineTemplateList{}, + } + Expect(framework.DumpProviderResources(mgmt, resources, resourcesPath, GinkgoWriter)).To(Succeed()) + }) + + Specify("multi-node cluster with failure domains", func() { + replicas := 3 var ( - namespace = "default" - clusterGen = newClusterGenerator("") - workloadClient ctrlclient.Client - mgmtClient ctrlclient.Client - cluster *clusterv1.Cluster + infraCluster *infrav1.DockerCluster + template *infrav1.DockerMachineTemplate + controlPlane *controlplanev1.KubeadmControlPlane + err error ) - SetDefaultEventuallyTimeout(10 * time.Minute) - SetDefaultEventuallyPollingInterval(10 * time.Second) - - AfterEach(func() { - // Delete the workload cluster - deleteClusterInput := framework.DeleteClusterInput{ - Deleter: mgmtClient, - Cluster: cluster, - } - framework.DeleteCluster(ctx, deleteClusterInput) - - waitForClusterDeletedInput := framework.WaitForClusterDeletedInput{ - Getter: mgmtClient, - Cluster: cluster, - } - framework.WaitForClusterDeleted(ctx, waitForClusterDeletedInput) - - assertAllClusterAPIResourcesAreGoneInput := framework.AssertAllClusterAPIResourcesAreGoneInput{ - Lister: mgmtClient, - Cluster: cluster, - } - framework.AssertAllClusterAPIResourcesAreGone(ctx, assertAllClusterAPIResourcesAreGoneInput) - - ensureDockerDeletedInput := ensureDockerArtifactsDeletedInput{ - Lister: mgmtClient, - Cluster: cluster, - } - ensureDockerArtifactsDeleted(ensureDockerDeletedInput) - - // Dump cluster API and docker related resources to artifacts before deleting them. - Expect(framework.DumpResources(mgmt, resourcesPath, GinkgoWriter)).To(Succeed()) - resources := map[string]runtime.Object{ - "DockerCluster": &infrav1.DockerClusterList{}, - "DockerMachine": &infrav1.DockerMachineList{}, - "DockerMachineTemplate": &infrav1.DockerMachineTemplateList{}, - } - Expect(framework.DumpProviderResources(mgmt, resources, resourcesPath, GinkgoWriter)).To(Succeed()) - }) - - Describe("Multi-node controlplane cluster", func() { - var controlPlane *controlplanev1.KubeadmControlPlane - - Specify("Basic create", func() { - replicas := 3 - var ( - infraCluster *infrav1.DockerCluster - template *infrav1.DockerMachineTemplate - err error - ) - cluster, infraCluster, controlPlane, template = clusterGen.GenerateCluster(namespace, int32(replicas)) - // Set failure domains here - infraCluster.Spec.FailureDomains = clusterv1.FailureDomains{ - "domain-one": {ControlPlane: true}, - "domain-two": {ControlPlane: true}, - "domain-three": {ControlPlane: true}, - "domain-four": {ControlPlane: false}, - } - - md, infraTemplate, bootstrapTemplate := GenerateMachineDeployment(cluster, 1) - - // Set up the client to the management cluster - mgmtClient, err = mgmt.GetClient() - Expect(err).NotTo(HaveOccurred()) - - // Set up the cluster object - createClusterInput := framework.CreateClusterInput{ - Creator: mgmtClient, - Cluster: cluster, - InfraCluster: infraCluster, - } - framework.CreateCluster(ctx, createClusterInput) - - // Set up the KubeadmControlPlane - createKubeadmControlPlaneInput := framework.CreateKubeadmControlPlaneInput{ - Creator: mgmtClient, - ControlPlane: controlPlane, - MachineTemplate: template, - } - framework.CreateKubeadmControlPlane(ctx, createKubeadmControlPlaneInput) - - // Wait for the cluster to provision. - assertClusterProvisionsInput := framework.WaitForClusterToProvisionInput{ - Getter: mgmtClient, - Cluster: cluster, - } - framework.WaitForClusterToProvision(ctx, assertClusterProvisionsInput) - - // Wait for at least one control plane node to be ready - waitForOneKubeadmControlPlaneMachineToExistInput := framework.WaitForOneKubeadmControlPlaneMachineToExistInput{ - Lister: mgmtClient, - Cluster: cluster, - ControlPlane: controlPlane, - } - framework.WaitForOneKubeadmControlPlaneMachineToExist(ctx, waitForOneKubeadmControlPlaneMachineToExistInput, "5m") - - // Insatll a networking solution on the workload cluster - workloadClient, err = mgmt.GetWorkloadClient(ctx, cluster.Namespace, cluster.Name) - Expect(err).ToNot(HaveOccurred()) - applyYAMLURLInput := framework.ApplyYAMLURLInput{ - Client: workloadClient, - HTTPGetter: http.DefaultClient, - NetworkingURL: "https://docs.projectcalico.org/manifests/calico.yaml", - Scheme: mgmt.Scheme, - } - framework.ApplyYAMLURL(ctx, applyYAMLURLInput) - - // Wait for the controlplane nodes to exist - assertKubeadmControlPlaneNodesExistInput := framework.WaitForKubeadmControlPlaneMachinesToExistInput{ - Lister: mgmtClient, - Cluster: cluster, - ControlPlane: controlPlane, - } - framework.WaitForKubeadmControlPlaneMachinesToExist(ctx, assertKubeadmControlPlaneNodesExistInput, "10m", "10s") - - // Create the workload nodes - createMachineDeploymentinput := framework.CreateMachineDeploymentInput{ - Creator: mgmtClient, - MachineDeployment: md, - BootstrapConfigTemplate: bootstrapTemplate, - InfraMachineTemplate: infraTemplate, - } - framework.CreateMachineDeployment(ctx, createMachineDeploymentinput) - - // Wait for the workload nodes to exist - waitForMachineDeploymentNodesToExistInput := framework.WaitForMachineDeploymentNodesToExistInput{ - Lister: mgmtClient, - Cluster: cluster, - MachineDeployment: md, - } - framework.WaitForMachineDeploymentNodesToExist(ctx, waitForMachineDeploymentNodesToExistInput) - - // Wait for the control plane to be ready - waitForControlPlaneToBeReadyInput := framework.WaitForControlPlaneToBeReadyInput{ - Getter: mgmtClient, - ControlPlane: controlPlane, - } - framework.WaitForControlPlaneToBeReady(ctx, waitForControlPlaneToBeReadyInput) - - // Assert failure domain is working as expected - assertControlPlaneFailureDomainInput := framework.AssertControlPlaneFailureDomainsInput{ - GetLister: mgmtClient, - ClusterKey: util.ObjectKey(cluster), - ExpectedFailureDomains: map[string]int{ - "domain-one": 1, - "domain-two": 1, - "domain-three": 1, - "domain-four": 0, - }, - } - framework.AssertControlPlaneFailureDomains(ctx, assertControlPlaneFailureDomainInput) - }) - - Specify("Full upgrade", func() { - By("upgrading the control plane object to a new version") - patchHelper, err := patch.NewHelper(controlPlane, mgmtClient) - Expect(err).ToNot(HaveOccurred()) - controlPlane.Spec.Version = "v1.17.2" - Expect(patchHelper.Patch(ctx, controlPlane)).To(Succeed()) - By("waiting for all control plane nodes to exist") - inClustersNamespaceListOption := ctrlclient.InNamespace(cluster.Namespace) - // ControlPlane labels - matchClusterListOption := ctrlclient.MatchingLabels{ - clusterv1.MachineControlPlaneLabelName: "", - clusterv1.ClusterLabelName: cluster.Name, - } - - Eventually(func() (int, error) { - machineList := &clusterv1.MachineList{} - if err := mgmtClient.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil { - fmt.Println(err) - return 0, err - } - upgraded := 0 - for _, machine := range machineList.Items { - if *machine.Spec.Version == controlPlane.Spec.Version { - upgraded++ - } - } - if len(machineList.Items) > upgraded { - return 0, errors.New("old nodes remain") - } - return upgraded, nil - }, "10m", "30s").Should(Equal(int(*controlPlane.Spec.Replicas))) - Eventually(func() (bool, error) { - ds := &appsv1.DaemonSet{} - - if err := workloadClient.Get(ctx, ctrlclient.ObjectKey{Name: "kube-proxy", Namespace: metav1.NamespaceSystem}, ds); err != nil { - return false, err - } - if ds.Spec.Template.Spec.Containers[0].Image == "k8s.gcr.io/kube-proxy:v1.17.2" { - return true, nil - } - - return false, nil - }, "10m", "30s").Should(BeTrue()) - - }) - }) + cluster, infraCluster, controlPlane, template = clusterGen.GenerateCluster(namespace, int32(replicas)) + // Set failure domains here + infraCluster.Spec.FailureDomains = clusterv1.FailureDomains{ + "domain-one": {ControlPlane: true}, + "domain-two": {ControlPlane: true}, + "domain-three": {ControlPlane: true}, + "domain-four": {ControlPlane: false}, + } + + md, infraTemplate, bootstrapTemplate := GenerateMachineDeployment(cluster, 1) + + // Set up the client to the management cluster + mgmtClient, err = mgmt.GetClient() + Expect(err).NotTo(HaveOccurred()) + + // Set up the cluster object + createClusterInput := framework.CreateClusterInput{ + Creator: mgmtClient, + Cluster: cluster, + InfraCluster: infraCluster, + } + framework.CreateCluster(ctx, createClusterInput) + + // Set up the KubeadmControlPlane + createKubeadmControlPlaneInput := framework.CreateKubeadmControlPlaneInput{ + Creator: mgmtClient, + ControlPlane: controlPlane, + MachineTemplate: template, + } + framework.CreateKubeadmControlPlane(ctx, createKubeadmControlPlaneInput) + + // Wait for the cluster to provision. + assertClusterProvisionsInput := framework.WaitForClusterToProvisionInput{ + Getter: mgmtClient, + Cluster: cluster, + } + framework.WaitForClusterToProvision(ctx, assertClusterProvisionsInput) + + // Wait for at least one control plane node to be ready + waitForOneKubeadmControlPlaneMachineToExistInput := framework.WaitForOneKubeadmControlPlaneMachineToExistInput{ + Lister: mgmtClient, + Cluster: cluster, + ControlPlane: controlPlane, + } + framework.WaitForOneKubeadmControlPlaneMachineToExist(ctx, waitForOneKubeadmControlPlaneMachineToExistInput, "5m") + + // Insatll a networking solution on the workload cluster + workloadClient, err := mgmt.GetWorkloadClient(ctx, cluster.Namespace, cluster.Name) + Expect(err).ToNot(HaveOccurred()) + applyYAMLURLInput := framework.ApplyYAMLURLInput{ + Client: workloadClient, + HTTPGetter: http.DefaultClient, + NetworkingURL: "https://docs.projectcalico.org/manifests/calico.yaml", + Scheme: mgmt.Scheme, + } + framework.ApplyYAMLURL(ctx, applyYAMLURLInput) + + // Wait for the controlplane nodes to exist + assertKubeadmControlPlaneNodesExistInput := framework.WaitForKubeadmControlPlaneMachinesToExistInput{ + Lister: mgmtClient, + Cluster: cluster, + ControlPlane: controlPlane, + } + framework.WaitForKubeadmControlPlaneMachinesToExist(ctx, assertKubeadmControlPlaneNodesExistInput, "15m", "10s") + + // Create the workload nodes + createMachineDeploymentinput := framework.CreateMachineDeploymentInput{ + Creator: mgmtClient, + MachineDeployment: md, + BootstrapConfigTemplate: bootstrapTemplate, + InfraMachineTemplate: infraTemplate, + } + framework.CreateMachineDeployment(ctx, createMachineDeploymentinput) + + // Wait for the workload nodes to exist + waitForMachineDeploymentNodesToExistInput := framework.WaitForMachineDeploymentNodesToExistInput{ + Lister: mgmtClient, + Cluster: cluster, + MachineDeployment: md, + } + framework.WaitForMachineDeploymentNodesToExist(ctx, waitForMachineDeploymentNodesToExistInput) + + // Wait for the control plane to be ready + waitForControlPlaneToBeReadyInput := framework.WaitForControlPlaneToBeReadyInput{ + Getter: mgmtClient, + ControlPlane: controlPlane, + } + framework.WaitForControlPlaneToBeReady(ctx, waitForControlPlaneToBeReadyInput) + + // Assert failure domain is working as expected + assertControlPlaneFailureDomainInput := framework.AssertControlPlaneFailureDomainsInput{ + GetLister: mgmtClient, + ClusterKey: util.ObjectKey(cluster), + ExpectedFailureDomains: map[string]int{ + "domain-one": 1, + "domain-two": 1, + "domain-three": 1, + "domain-four": 0, + }, + } + framework.AssertControlPlaneFailureDomains(ctx, assertControlPlaneFailureDomainInput) }) + }) func GenerateMachineDeployment(cluster *clusterv1.Cluster, replicas int32) (*clusterv1.MachineDeployment, *infrav1.DockerMachineTemplate, *bootstrapv1.KubeadmConfigTemplate) { diff --git a/test/infrastructure/docker/e2e/docker_upgrade_test.go b/test/infrastructure/docker/e2e/docker_upgrade_test.go index 33eada6b7442..c59dfc589193 100644 --- a/test/infrastructure/docker/e2e/docker_upgrade_test.go +++ b/test/infrastructure/docker/e2e/docker_upgrade_test.go @@ -19,11 +19,14 @@ limitations under the License. package e2e import ( + "errors" + "fmt" "net/http" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -176,22 +179,11 @@ var _ = Describe("Docker Upgrade", func() { Expect(framework.DumpProviderResources(mgmt, resources, resourcesPath, GinkgoWriter)).To(Succeed()) }) - It("upgrades etcd", func() { - // Before patching ensure all pods are ready in workload - // cluster - workloadClient, err := mgmt.GetWorkloadClient(ctx, cluster.Namespace, cluster.Name) - Expect(err).ToNot(HaveOccurred()) - By("waiting for workload cluster pods to be Running") - waitForPodListConditionInput := framework.WaitForPodListConditionInput{ - Lister: workloadClient, - ListOptions: &client.ListOptions{Namespace: metav1.NamespaceSystem}, - Condition: framework.PhasePodCondition(corev1.PodRunning), - } - framework.WaitForPodListCondition(ctx, waitForPodListConditionInput) - - By("patching KubeadmConfigSpec etcd image tag in the kubeadmControlPlane") + It("upgrades kubernetes, kube-proxy and etcd", func() { + By("upgrading kubernetes version and etcd image tag") patchHelper, err := patch.NewHelper(controlPlane, mgmtClient) Expect(err).ToNot(HaveOccurred()) + controlPlane.Spec.Version = "v1.17.2" controlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.Etcd = v1beta1.Etcd{ Local: &v1beta1.LocalEtcd{ ImageMeta: v1beta1.ImageMeta{ @@ -206,7 +198,60 @@ var _ = Describe("Docker Upgrade", func() { } Expect(patchHelper.Patch(ctx, controlPlane)).To(Succeed()) - By("waiting for etcd pods to have the expected image tag") + inClustersNamespaceListOption := ctrlclient.InNamespace(cluster.Namespace) + // ControlPlane labels + matchClusterListOption := ctrlclient.MatchingLabels{ + clusterv1.MachineControlPlaneLabelName: "", + clusterv1.ClusterLabelName: cluster.Name, + } + + By("ensuring all machines have upgraded kubernetes") + Eventually(func() (int, error) { + machineList := &clusterv1.MachineList{} + if err := mgmtClient.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil { + fmt.Println(err) + return 0, err + } + upgraded := 0 + for _, machine := range machineList.Items { + if *machine.Spec.Version == controlPlane.Spec.Version { + upgraded++ + } + } + if len(machineList.Items) > upgraded { + return 0, errors.New("old nodes remain") + } + return upgraded, nil + }, "10m", "30s").Should(Equal(int(*controlPlane.Spec.Replicas))) + + workloadClient, err := mgmt.GetWorkloadClient(ctx, cluster.Namespace, cluster.Name) + Expect(err).ToNot(HaveOccurred()) + + By("ensuring kube-proxy has the correct image") + Eventually(func() (bool, error) { + ds := &appsv1.DaemonSet{} + + if err := workloadClient.Get(ctx, ctrlclient.ObjectKey{Name: "kube-proxy", Namespace: metav1.NamespaceSystem}, ds); err != nil { + return false, err + } + if ds.Spec.Template.Spec.Containers[0].Image == "k8s.gcr.io/kube-proxy:v1.17.2" { + return true, nil + } + + return false, nil + }, "10m", "30s").Should(BeTrue()) + + // Before patching ensure all pods are ready in workload cluster + // Might not need this step any more. + By("waiting for workload cluster pods to be Running") + waitForPodListConditionInput := framework.WaitForPodListConditionInput{ + Lister: workloadClient, + ListOptions: &client.ListOptions{Namespace: metav1.NamespaceSystem}, + Condition: framework.PhasePodCondition(corev1.PodRunning), + } + framework.WaitForPodListCondition(ctx, waitForPodListConditionInput) + + By("ensuring etcd pods have the correct image tag") lblSelector, err := labels.Parse("component=etcd") Expect(err).ToNot(HaveOccurred()) opt := &client.ListOptions{LabelSelector: lblSelector}