From 40b331ac9779f7fe3aa4164671dd9e995d6dd57a Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Wed, 12 May 2021 14:27:57 -0600 Subject: [PATCH] =?UTF-8?q?=F0=9F=8C=B1=20Refactor=20tests=20to=20plain=20?= =?UTF-8?q?go=20in=20controllers?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- controllers/cluster_controller_test.go | 137 ++++++------ controllers/machine_controller_phases_test.go | 199 ++++++++++-------- .../machinedeployment_controller_test.go | 152 ++++++------- controllers/machineset_controller_test.go | 81 +++---- controllers/suite_test.go | 27 +-- controllers/suite_util_test.go | 38 ++-- 6 files changed, 327 insertions(+), 307 deletions(-) diff --git a/controllers/cluster_controller_test.go b/controllers/cluster_controller_test.go index be10f3da5ff7..453d10e27a2c 100644 --- a/controllers/cluster_controller_test.go +++ b/controllers/cluster_controller_test.go @@ -19,12 +19,8 @@ package controllers import ( "testing" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "sigs.k8s.io/cluster-api/util/conditions" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/pointer" @@ -32,15 +28,17 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -var _ = Describe("Cluster Reconciler", func() { +func TestClusterReconciler(t *testing.T) { + t.Run("Should create a Cluster", func(t *testing.T) { + g := NewWithT(t) - It("Should create a Cluster", func() { instance := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test1-", @@ -50,15 +48,15 @@ var _ = Describe("Cluster Reconciler", func() { } // Create the Cluster object and expect the Reconcile and Deployment to be created - Expect(testEnv.Create(ctx, instance)).ToNot(HaveOccurred()) + g.Expect(testEnv.Create(ctx, instance)).To(Succeed()) key := client.ObjectKey{Namespace: instance.Namespace, Name: instance.Name} defer func() { err := testEnv.Delete(ctx, instance) - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) }() // Make sure the Cluster exists. - Eventually(func() bool { + g.Eventually(func() bool { if err := testEnv.Get(ctx, key, instance); err != nil { return false } @@ -66,7 +64,9 @@ var _ = Describe("Cluster Reconciler", func() { }, timeout).Should(BeTrue()) }) - It("Should successfully patch a cluster object if the status diff is empty but the spec diff is not", func() { + t.Run("Should successfully patch a cluster object if the status diff is empty but the spec diff is not", func(t *testing.T) { + g := NewWithT(t) + // Setup cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -74,15 +74,15 @@ var _ = Describe("Cluster Reconciler", func() { Namespace: "default", }, } - Expect(testEnv.Create(ctx, cluster)).To(BeNil()) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} defer func() { err := testEnv.Delete(ctx, cluster) - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) }() // Wait for reconciliation to happen. - Eventually(func() bool { + g.Eventually(func() bool { if err := testEnv.Get(ctx, key, cluster); err != nil { return false } @@ -90,17 +90,17 @@ var _ = Describe("Cluster Reconciler", func() { }, timeout).Should(BeTrue()) // Patch - Eventually(func() bool { + g.Eventually(func() bool { ph, err := patch.NewHelper(cluster, testEnv) - Expect(err).ShouldNot(HaveOccurred()) - cluster.Spec.InfrastructureRef = &v1.ObjectReference{Name: "test"} - cluster.Spec.ControlPlaneRef = &v1.ObjectReference{Name: "test-too"} - Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).ShouldNot(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) + cluster.Spec.InfrastructureRef = &corev1.ObjectReference{Name: "test"} + cluster.Spec.ControlPlaneRef = &corev1.ObjectReference{Name: "test-too"} + g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed()) return true }, timeout).Should(BeTrue()) // Assertions - Eventually(func() bool { + g.Eventually(func() bool { instance := &clusterv1.Cluster{} if err := testEnv.Get(ctx, key, instance); err != nil { return false @@ -110,7 +110,9 @@ var _ = Describe("Cluster Reconciler", func() { }, timeout).Should(BeTrue()) }) - It("Should successfully patch a cluster object if the spec diff is empty but the status diff is not", func() { + t.Run("Should successfully patch a cluster object if the spec diff is empty but the status diff is not", func(t *testing.T) { + g := NewWithT(t) + // Setup cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -118,15 +120,15 @@ var _ = Describe("Cluster Reconciler", func() { Namespace: "default", }, } - Expect(testEnv.Create(ctx, cluster)).To(BeNil()) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} defer func() { err := testEnv.Delete(ctx, cluster) - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) }() // Wait for reconciliation to happen. - Eventually(func() bool { + g.Eventually(func() bool { if err := testEnv.Get(ctx, key, cluster); err != nil { return false } @@ -134,16 +136,16 @@ var _ = Describe("Cluster Reconciler", func() { }, timeout).Should(BeTrue()) // Patch - Eventually(func() bool { + g.Eventually(func() bool { ph, err := patch.NewHelper(cluster, testEnv) - Expect(err).ShouldNot(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) cluster.Status.InfrastructureReady = true - Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).ShouldNot(HaveOccurred()) + g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed()) return true }, timeout).Should(BeTrue()) // Assertions - Eventually(func() bool { + g.Eventually(func() bool { instance := &clusterv1.Cluster{} if err := testEnv.Get(ctx, key, instance); err != nil { return false @@ -152,7 +154,9 @@ var _ = Describe("Cluster Reconciler", func() { }, timeout).Should(BeTrue()) }) - It("Should successfully patch a cluster object if both the spec diff and status diff are non empty", func() { + t.Run("Should successfully patch a cluster object if both the spec diff and status diff are non empty", func(t *testing.T) { + g := NewWithT(t) + // Setup cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -160,15 +164,16 @@ var _ = Describe("Cluster Reconciler", func() { Namespace: "default", }, } - Expect(testEnv.Create(ctx, cluster)).To(BeNil()) + + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} defer func() { err := testEnv.Delete(ctx, cluster) - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) }() // Wait for reconciliation to happen. - Eventually(func() bool { + g.Eventually(func() bool { if err := testEnv.Get(ctx, key, cluster); err != nil { return false } @@ -176,17 +181,17 @@ var _ = Describe("Cluster Reconciler", func() { }, timeout).Should(BeTrue()) // Patch - Eventually(func() bool { + g.Eventually(func() bool { ph, err := patch.NewHelper(cluster, testEnv) - Expect(err).ShouldNot(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) cluster.Status.InfrastructureReady = true - cluster.Spec.InfrastructureRef = &v1.ObjectReference{Name: "test"} - Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).ShouldNot(HaveOccurred()) + cluster.Spec.InfrastructureRef = &corev1.ObjectReference{Name: "test"} + g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed()) return true }, timeout).Should(BeTrue()) // Assertions - Eventually(func() bool { + g.Eventually(func() bool { instance := &clusterv1.Cluster{} if err := testEnv.Get(ctx, key, instance); err != nil { return false @@ -197,7 +202,9 @@ var _ = Describe("Cluster Reconciler", func() { }, timeout).Should(BeTrue()) }) - It("Should re-apply finalizers if removed", func() { + t.Run("Should re-apply finalizers if removed", func(t *testing.T) { + g := NewWithT(t) + // Setup cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -205,15 +212,15 @@ var _ = Describe("Cluster Reconciler", func() { Namespace: "default", }, } - Expect(testEnv.Create(ctx, cluster)).To(BeNil()) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} defer func() { err := testEnv.Delete(ctx, cluster) - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) }() // Wait for reconciliation to happen. - Eventually(func() bool { + g.Eventually(func() bool { if err := testEnv.Get(ctx, key, cluster); err != nil { return false } @@ -221,18 +228,18 @@ var _ = Describe("Cluster Reconciler", func() { }, timeout).Should(BeTrue()) // Remove finalizers - Eventually(func() bool { + g.Eventually(func() bool { ph, err := patch.NewHelper(cluster, testEnv) - Expect(err).ShouldNot(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) cluster.SetFinalizers([]string{}) - Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).ShouldNot(HaveOccurred()) + g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed()) return true }, timeout).Should(BeTrue()) - Expect(cluster.Finalizers).Should(BeEmpty()) + g.Expect(cluster.Finalizers).Should(BeEmpty()) // Check finalizers are re-applied - Eventually(func() []string { + g.Eventually(func() []string { instance := &clusterv1.Cluster{} if err := testEnv.Get(ctx, key, instance); err != nil { return []string{"not-empty"} @@ -241,24 +248,26 @@ var _ = Describe("Cluster Reconciler", func() { }, timeout).ShouldNot(BeEmpty()) }) - It("Should successfully set ControlPlaneInitialized on the cluster object if controlplane is ready", func() { + t.Run("Should successfully set ControlPlaneInitialized on the cluster object if controlplane is ready", func(t *testing.T) { + g := NewWithT(t) + cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test6-", - Namespace: v1.NamespaceDefault, + Namespace: corev1.NamespaceDefault, }, } - Expect(testEnv.Create(ctx, cluster)).To(BeNil()) + g.Expect(testEnv.Create(ctx, cluster)).To(Succeed()) key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} defer func() { err := testEnv.Delete(ctx, cluster) - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) }() - Expect(testEnv.CreateKubeconfigSecret(ctx, cluster)).To(Succeed()) + g.Expect(testEnv.CreateKubeconfigSecret(ctx, cluster)).To(Succeed()) // Wait for reconciliation to happen. - Eventually(func() bool { + g.Eventually(func() bool { if err := testEnv.Get(ctx, key, cluster); err != nil { return false } @@ -267,21 +276,21 @@ var _ = Describe("Cluster Reconciler", func() { // Create a node so we can speed up reconciliation. Otherwise, the machine reconciler will requeue the machine // after 10 seconds, potentially slowing down this test. - node := &v1.Node{ + node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "id-node-1", }, - Spec: v1.NodeSpec{ + Spec: corev1.NodeSpec{ ProviderID: "aws:///id-node-1", }, } - Expect(testEnv.Create(ctx, node)).To(Succeed()) + g.Expect(testEnv.Create(ctx, node)).To(Succeed()) machine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test6-", - Namespace: v1.NamespaceDefault, + Namespace: corev1.NamespaceDefault, Labels: map[string]string{ clusterv1.MachineControlPlaneLabelName: "", }, @@ -295,11 +304,11 @@ var _ = Describe("Cluster Reconciler", func() { }, } machine.Spec.Bootstrap.DataSecretName = pointer.StringPtr("test6-bootstrapdata") - Expect(testEnv.Create(ctx, machine)).To(BeNil()) + g.Expect(testEnv.Create(ctx, machine)).To(Succeed()) key = client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace} defer func() { err := testEnv.Delete(ctx, machine) - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) }() // Wait for machine to be ready. @@ -309,7 +318,7 @@ var _ = Describe("Cluster Reconciler", func() { // timeout) for the machine reconciler to add the finalizer and for the change to be persisted to etcd. If // we continue to see test timeouts here, that will likely point to something else being the problem, but // I've yet to determine any other possibility for the test flakes. - Eventually(func() bool { + g.Eventually(func() bool { if err := testEnv.Get(ctx, key, machine); err != nil { return false } @@ -318,16 +327,16 @@ var _ = Describe("Cluster Reconciler", func() { // Assertion key = client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace} - Eventually(func() bool { + g.Eventually(func() bool { if err := testEnv.Get(ctx, key, cluster); err != nil { return false } return conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) }, timeout).Should(BeTrue()) }) -}) +} -func TestClusterReconciler(t *testing.T) { +func TestClusterReconcilerNodeRef(t *testing.T) { t.Run("machine to cluster", func(t *testing.T) { cluster := &clusterv1.Cluster{ TypeMeta: metav1.TypeMeta{ @@ -357,7 +366,7 @@ func TestClusterReconciler(t *testing.T) { ClusterName: "test-cluster", }, Status: clusterv1.MachineStatus{ - NodeRef: &v1.ObjectReference{ + NodeRef: &corev1.ObjectReference{ Kind: "Node", Namespace: "test-node", }, @@ -394,7 +403,7 @@ func TestClusterReconciler(t *testing.T) { ClusterName: "test-cluster", }, Status: clusterv1.MachineStatus{ - NodeRef: &v1.ObjectReference{ + NodeRef: &corev1.ObjectReference{ Kind: "Node", Namespace: "test-node", }, @@ -730,6 +739,6 @@ func TestReconcileControlPlaneInitializedControlPlaneRef(t *testing.T) { r := &ClusterReconciler{} res, err := r.reconcileControlPlaneInitialized(ctx, c) g.Expect(res.IsZero()).To(BeTrue()) - g.Expect(err).ToNot(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) g.Expect(conditions.Has(c, clusterv1.ControlPlaneInitializedCondition)).To(BeFalse()) } diff --git a/controllers/machine_controller_phases_test.go b/controllers/machine_controller_phases_test.go index 793d159a9feb..a1e51f4ee918 100644 --- a/controllers/machine_controller_phases_test.go +++ b/controllers/machine_controller_phases_test.go @@ -20,7 +20,6 @@ import ( "testing" "time" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -46,7 +45,7 @@ func init() { externalReadyWait = 1 * time.Second } -var _ = Describe("Reconcile Machine Phases", func() { +func TestReconcileMachinePhases(t *testing.T) { deletionTimestamp := metav1.Now() var defaultKubeconfigSecret *corev1.Secret @@ -108,11 +107,10 @@ var _ = Describe("Reconcile Machine Phases", func() { }, } - BeforeEach(func() { - defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(&rest.Config{}, defaultCluster)) - }) + t.Run("Should set OwnerReference and cluster name label on external objects", func(t *testing.T) { + g := NewWithT(t) - It("Should set OwnerReference and cluster name label on external objects", func() { + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(&rest.Config{}, defaultCluster)) machine := defaultMachine.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() @@ -131,23 +129,26 @@ var _ = Describe("Reconcile Machine Phases", func() { } res, err := r.reconcile(ctx, defaultCluster, machine) - Expect(err).NotTo(HaveOccurred()) - Expect(res.RequeueAfter).To(Equal(externalReadyWait)) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res.RequeueAfter).To(Equal(externalReadyWait)) r.reconcilePhase(ctx, machine) - Expect(r.Client.Get(ctx, types.NamespacedName{Name: bootstrapConfig.GetName(), Namespace: bootstrapConfig.GetNamespace()}, bootstrapConfig)).To(Succeed()) + g.Expect(r.Client.Get(ctx, types.NamespacedName{Name: bootstrapConfig.GetName(), Namespace: bootstrapConfig.GetNamespace()}, bootstrapConfig)).To(Succeed()) - Expect(bootstrapConfig.GetOwnerReferences()).To(HaveLen(1)) - Expect(bootstrapConfig.GetLabels()[clusterv1.ClusterLabelName]).To(BeEquivalentTo("test-cluster")) + g.Expect(bootstrapConfig.GetOwnerReferences()).To(HaveLen(1)) + g.Expect(bootstrapConfig.GetLabels()[clusterv1.ClusterLabelName]).To(BeEquivalentTo("test-cluster")) - Expect(r.Client.Get(ctx, types.NamespacedName{Name: infraConfig.GetName(), Namespace: infraConfig.GetNamespace()}, infraConfig)).To(Succeed()) + g.Expect(r.Client.Get(ctx, types.NamespacedName{Name: infraConfig.GetName(), Namespace: infraConfig.GetNamespace()}, infraConfig)).To(Succeed()) - Expect(infraConfig.GetOwnerReferences()).To(HaveLen(1)) - Expect(infraConfig.GetLabels()[clusterv1.ClusterLabelName]).To(BeEquivalentTo("test-cluster")) + g.Expect(infraConfig.GetOwnerReferences()).To(HaveLen(1)) + g.Expect(infraConfig.GetLabels()[clusterv1.ClusterLabelName]).To(BeEquivalentTo("test-cluster")) }) - It("Should set `Pending` with a new Machine", func() { + t.Run("Should set `Pending` with a new Machine", func(t *testing.T) { + g := NewWithT(t) + + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(&rest.Config{}, defaultCluster)) machine := defaultMachine.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() @@ -166,27 +167,30 @@ var _ = Describe("Reconcile Machine Phases", func() { } res, err := r.reconcile(ctx, defaultCluster, machine) - Expect(err).NotTo(HaveOccurred()) - Expect(res.RequeueAfter).To(Equal(externalReadyWait)) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res.RequeueAfter).To(Equal(externalReadyWait)) r.reconcilePhase(ctx, machine) - Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhasePending)) + g.Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhasePending)) // LastUpdated should be set as the phase changes - Expect(machine.Status.LastUpdated).ToNot(BeNil()) + g.Expect(machine.Status.LastUpdated).NotTo(BeNil()) }) - It("Should set `Provisioning` when bootstrap is ready", func() { + t.Run("Should set `Provisioning` when bootstrap is ready", func(t *testing.T) { + g := NewWithT(t) + + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(&rest.Config{}, defaultCluster)) machine := defaultMachine.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() // Set bootstrap ready. err := unstructured.SetNestedField(bootstrapConfig.Object, true, "status", "ready") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(bootstrapConfig.Object, "secret-data", "status", "dataSecretName") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) // Set the LastUpdated to be able to verify it is updated when the phase changes lastUpdated := metav1.NewTime(time.Now().Add(-10 * time.Second)) @@ -206,38 +210,41 @@ var _ = Describe("Reconcile Machine Phases", func() { } res, err := r.reconcile(ctx, defaultCluster, machine) - Expect(err).NotTo(HaveOccurred()) - Expect(res.Requeue).To(BeFalse()) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res.Requeue).To(BeFalse()) r.reconcilePhase(ctx, machine) - Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseProvisioning)) + g.Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseProvisioning)) // Verify that the LastUpdated timestamp was updated - Expect(machine.Status.LastUpdated).ToNot(BeNil()) - Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) + g.Expect(machine.Status.LastUpdated).NotTo(BeNil()) + g.Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) }) - It("Should set `Running` when bootstrap and infra is ready", func() { + t.Run("Should set `Running` when bootstrap and infra is ready", func(t *testing.T) { + g := NewWithT(t) + + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(&rest.Config{}, defaultCluster)) machine := defaultMachine.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() // Set bootstrap ready. err := unstructured.SetNestedField(bootstrapConfig.Object, true, "status", "ready") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(bootstrapConfig.Object, "secret-data", "status", "dataSecretName") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) // Set infra ready. err = unstructured.SetNestedField(infraConfig.Object, true, "status", "ready") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(infraConfig.Object, "test://id-1", "spec", "providerID") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(infraConfig.Object, "us-east-2a", "spec", "failureDomain") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(infraConfig.Object, []interface{}{ map[string]interface{}{ @@ -249,7 +256,7 @@ var _ = Describe("Reconcile Machine Phases", func() { "address": "10.0.0.2", }, }, "status", "addresses") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) // Set NodeRef. machine.Status.NodeRef = &corev1.ObjectReference{Kind: "Node", Name: "machine-test-node"} @@ -282,37 +289,40 @@ var _ = Describe("Reconcile Machine Phases", func() { } res, err := r.reconcile(ctx, defaultCluster, machine) - Expect(err).NotTo(HaveOccurred()) - Expect(res.Requeue).To(BeFalse()) - Expect(machine.Status.Addresses).To(HaveLen(2)) - Expect(*machine.Spec.FailureDomain).To(Equal("us-east-2a")) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res.Requeue).To(BeFalse()) + g.Expect(machine.Status.Addresses).To(HaveLen(2)) + g.Expect(*machine.Spec.FailureDomain).To(Equal("us-east-2a")) r.reconcilePhase(ctx, machine) - Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseRunning)) + g.Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseRunning)) // Verify that the LastUpdated timestamp was updated - Expect(machine.Status.LastUpdated).ToNot(BeNil()) - Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) + g.Expect(machine.Status.LastUpdated).NotTo(BeNil()) + g.Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) }) - It("Should set `Running` when bootstrap and infra is ready with no Status.Addresses", func() { + t.Run("Should set `Running` when bootstrap and infra is ready with no Status.Addresses", func(t *testing.T) { + g := NewWithT(t) + + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(&rest.Config{}, defaultCluster)) machine := defaultMachine.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() // Set bootstrap ready. err := unstructured.SetNestedField(bootstrapConfig.Object, true, "status", "ready") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(bootstrapConfig.Object, "secret-data", "status", "dataSecretName") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) // Set infra ready. err = unstructured.SetNestedField(infraConfig.Object, true, "status", "ready") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(infraConfig.Object, "test://id-1", "spec", "providerID") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) // Set NodeRef. machine.Status.NodeRef = &corev1.ObjectReference{Kind: "Node", Name: "machine-test-node"} @@ -345,36 +355,39 @@ var _ = Describe("Reconcile Machine Phases", func() { } res, err := r.reconcile(ctx, defaultCluster, machine) - Expect(err).NotTo(HaveOccurred()) - Expect(res.Requeue).To(BeFalse()) - Expect(machine.Status.Addresses).To(HaveLen(0)) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res.Requeue).To(BeFalse()) + g.Expect(machine.Status.Addresses).To(HaveLen(0)) r.reconcilePhase(ctx, machine) - Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseRunning)) + g.Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseRunning)) // Verify that the LastUpdated timestamp was updated - Expect(machine.Status.LastUpdated).ToNot(BeNil()) - Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) + g.Expect(machine.Status.LastUpdated).NotTo(BeNil()) + g.Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) }) - It("Should set `Running` when bootstrap, infra, and NodeRef is ready", func() { + t.Run("Should set `Running` when bootstrap, infra, and NodeRef is ready", func(t *testing.T) { + g := NewWithT(t) + + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(&rest.Config{}, defaultCluster)) machine := defaultMachine.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() // Set bootstrap ready. err := unstructured.SetNestedField(bootstrapConfig.Object, true, "status", "ready") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(bootstrapConfig.Object, "secret-data", "status", "dataSecretName") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) // Set infra ready. err = unstructured.SetNestedField(infraConfig.Object, "test://id-1", "spec", "providerID") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(infraConfig.Object, true, "status", "ready") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(infraConfig.Object, []interface{}{ map[string]interface{}{ @@ -386,7 +399,7 @@ var _ = Describe("Reconcile Machine Phases", func() { "address": "10.0.0.2", }, }, "addresses") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) // Set NodeRef. machine.Status.NodeRef = &corev1.ObjectReference{Kind: "Node", Name: "machine-test-node"} @@ -418,28 +431,31 @@ var _ = Describe("Reconcile Machine Phases", func() { } res, err := r.reconcile(ctx, defaultCluster, machine) - Expect(err).NotTo(HaveOccurred()) - Expect(res.Requeue).To(BeFalse()) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res.Requeue).To(BeFalse()) r.reconcilePhase(ctx, machine) - Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseRunning)) + g.Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseRunning)) // Verify that the LastUpdated timestamp was updated - Expect(machine.Status.LastUpdated).ToNot(BeNil()) - Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) + g.Expect(machine.Status.LastUpdated).NotTo(BeNil()) + g.Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) }) - It("Should set `Provisioned` when there is a NodeRef but infra is not ready ", func() { + t.Run("Should set `Provisioned` when there is a NodeRef but infra is not ready ", func(t *testing.T) { + g := NewWithT(t) + + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(&rest.Config{}, defaultCluster)) machine := defaultMachine.DeepCopy() bootstrapConfig := defaultBootstrap.DeepCopy() infraConfig := defaultInfra.DeepCopy() // Set bootstrap ready. err := unstructured.SetNestedField(bootstrapConfig.Object, true, "status", "ready") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(bootstrapConfig.Object, "secret-data", "status", "dataSecretName") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) // Set NodeRef. machine.Status.NodeRef = &corev1.ObjectReference{Kind: "Node", Name: "machine-test-node"} @@ -462,18 +478,21 @@ var _ = Describe("Reconcile Machine Phases", func() { } res, err := r.reconcile(ctx, defaultCluster, machine) - Expect(err).NotTo(HaveOccurred()) - Expect(res.RequeueAfter).To(Equal(externalReadyWait)) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res.RequeueAfter).To(Equal(externalReadyWait)) r.reconcilePhase(ctx, machine) - Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseProvisioned)) + g.Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseProvisioned)) // Verify that the LastUpdated timestamp was updated - Expect(machine.Status.LastUpdated).ToNot(BeNil()) - Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) + g.Expect(machine.Status.LastUpdated).NotTo(BeNil()) + g.Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) }) - It("Should set `Deleting` when Machine is being deleted", func() { + t.Run("Should set `Deleting` when Machine is being deleted", func(t *testing.T) { + g := NewWithT(t) + + defaultKubeconfigSecret = kubeconfig.GenerateSecret(defaultCluster, kubeconfig.FromEnvTestConfig(&rest.Config{}, defaultCluster)) machine := defaultMachine.DeepCopy() // Need the second Machine to allow deletion of one. machineSecond := defaultMachine.DeepCopy() @@ -483,17 +502,17 @@ var _ = Describe("Reconcile Machine Phases", func() { // Set bootstrap ready. err := unstructured.SetNestedField(bootstrapConfig.Object, true, "status", "ready") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(bootstrapConfig.Object, "secret-data", "status", "dataSecretName") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) // Set infra ready. err = unstructured.SetNestedField(infraConfig.Object, "test://id-1", "spec", "providerID") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(infraConfig.Object, true, "status", "ready") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) err = unstructured.SetNestedField(infraConfig.Object, []interface{}{ map[string]interface{}{ @@ -505,7 +524,7 @@ var _ = Describe("Reconcile Machine Phases", func() { "address": "10.0.0.2", }, }, "addresses") - Expect(err).NotTo(HaveOccurred()) + g.Expect(err).NotTo(HaveOccurred()) // Set Cluster label. machine.Labels[clusterv1.ClusterLabelName] = machine.Spec.ClusterName @@ -541,21 +560,21 @@ var _ = Describe("Reconcile Machine Phases", func() { } res, err := r.reconcileDelete(ctx, defaultCluster, machine) - Expect(err).NotTo(HaveOccurred()) - Expect(res.Requeue).To(BeFalse()) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(res.Requeue).To(BeFalse()) r.reconcilePhase(ctx, machine) - Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseDeleting)) + g.Expect(machine.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseDeleting)) nodeHealthyCondition := conditions.Get(machine, clusterv1.MachineNodeHealthyCondition) - Expect(nodeHealthyCondition.Status).To(Equal(corev1.ConditionFalse)) - Expect(nodeHealthyCondition.Reason).To(Equal(clusterv1.DeletingReason)) + g.Expect(nodeHealthyCondition.Status).To(Equal(corev1.ConditionFalse)) + g.Expect(nodeHealthyCondition.Reason).To(Equal(clusterv1.DeletingReason)) // Verify that the LastUpdated timestamp was updated - Expect(machine.Status.LastUpdated).ToNot(BeNil()) - Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) + g.Expect(machine.Status.LastUpdated).NotTo(BeNil()) + g.Expect(machine.Status.LastUpdated.After(lastUpdated.Time)).To(BeTrue()) }) -}) +} func TestReconcileBootstrap(t *testing.T) { defaultMachine := clusterv1.Machine{ @@ -611,7 +630,7 @@ func TestReconcileBootstrap(t *testing.T) { expectError: false, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.BootstrapReady).To(BeTrue()) - g.Expect(m.Spec.Bootstrap.DataSecretName).ToNot(BeNil()) + g.Expect(m.Spec.Bootstrap.DataSecretName).NotTo(BeNil()) g.Expect(*m.Spec.Bootstrap.DataSecretName).To(ContainSubstring("secret-data")) }, }, @@ -848,7 +867,7 @@ func TestReconcileBootstrap(t *testing.T) { res, err := r.reconcileBootstrap(ctx, defaultCluster, tc.machine) g.Expect(res).To(Equal(tc.expectResult)) if tc.expectError { - g.Expect(err).ToNot(BeNil()) + g.Expect(err).NotTo(BeNil()) } else { g.Expect(err).To(BeNil()) } @@ -994,8 +1013,8 @@ func TestReconcileInfrastructure(t *testing.T) { expectError: true, expected: func(g *WithT, m *clusterv1.Machine) { g.Expect(m.Status.InfrastructureReady).To(BeTrue()) - g.Expect(m.Status.FailureMessage).ToNot(BeNil()) - g.Expect(m.Status.FailureReason).ToNot(BeNil()) + g.Expect(m.Status.FailureMessage).NotTo(BeNil()) + g.Expect(m.Status.FailureReason).NotTo(BeNil()) g.Expect(m.Status.GetTypedPhase()).To(Equal(clusterv1.MachinePhaseFailed)) }, }, @@ -1062,7 +1081,7 @@ func TestReconcileInfrastructure(t *testing.T) { r.reconcilePhase(ctx, tc.machine) g.Expect(result).To(Equal(tc.expectResult)) if tc.expectError { - g.Expect(err).ToNot(BeNil()) + g.Expect(err).NotTo(BeNil()) } else { g.Expect(err).To(BeNil()) } diff --git a/controllers/machinedeployment_controller_test.go b/controllers/machinedeployment_controller_test.go index 91a764eeff30..d2e4b1e941b1 100644 --- a/controllers/machinedeployment_controller_test.go +++ b/controllers/machinedeployment_controller_test.go @@ -19,7 +19,6 @@ package controllers import ( "testing" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -39,27 +38,31 @@ import ( var _ reconcile.Reconciler = &MachineDeploymentReconciler{} -var _ = Describe("MachineDeployment Reconciler", func() { +func TestMachineDeploymentReconciler(t *testing.T) { namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "md-test"}} testCluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: namespace.Name, Name: "test-cluster"}} - BeforeEach(func() { - By("Creating the namespace") - Expect(testEnv.Create(ctx, namespace)).To(Succeed()) - By("Creating the Cluster") - Expect(testEnv.Create(ctx, testCluster)).To(Succeed()) - By("Creating the Cluster Kubeconfig Secret") - Expect(testEnv.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) - }) + setup := func(t *testing.T, g *WithT) { + t.Log("Creating the namespace") + g.Expect(testEnv.Create(ctx, namespace)).To(Succeed()) + t.Log("Creating the Cluster") + g.Expect(testEnv.Create(ctx, testCluster)).To(Succeed()) + t.Log("Creating the Cluster Kubeconfig Secret") + g.Expect(testEnv.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) + } - AfterEach(func() { - By("Deleting the Cluster") - Expect(testEnv.Delete(ctx, testCluster)).To(Succeed()) - By("Deleting the namespace") - Expect(testEnv.Delete(ctx, namespace)).To(Succeed()) - }) + teardown := func(t *testing.T, g *WithT) { + t.Log("Deleting the Cluster") + g.Expect(testEnv.Delete(ctx, testCluster)).To(Succeed()) + t.Log("Deleting the namespace") + g.Expect(testEnv.Delete(ctx, namespace)).To(Succeed()) + } + + t.Run("Should reconcile a MachineDeployment", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + defer teardown(t, g) - It("Should reconcile a MachineDeployment", func() { labels := map[string]string{ "foo": "bar", clusterv1.ClusterLabelName: testCluster.Name, @@ -135,19 +138,19 @@ var _ = Describe("MachineDeployment Reconciler", func() { infraTmpl.SetAPIVersion("infrastructure.cluster.x-k8s.io/v1alpha4") infraTmpl.SetName("md-template") infraTmpl.SetNamespace(namespace.Name) - By("Creating the infrastructure template") - Expect(testEnv.Create(ctx, infraTmpl)).To(Succeed()) + t.Log("Creating the infrastructure template") + g.Expect(testEnv.Create(ctx, infraTmpl)).To(Succeed()) // Create the MachineDeployment object and expect Reconcile to be called. - By("Creating the MachineDeployment") - Expect(testEnv.Create(ctx, deployment)).To(Succeed()) + t.Log("Creating the MachineDeployment") + g.Expect(testEnv.Create(ctx, deployment)).To(Succeed()) defer func() { - By("Deleting the MachineDeployment") - Expect(testEnv.Delete(ctx, deployment)).To(Succeed()) + t.Log("Deleting the MachineDeployment") + g.Expect(testEnv.Delete(ctx, deployment)).To(Succeed()) }() - By("Verifying the MachineDeployment has a cluster label and ownerRef") - Eventually(func() bool { + t.Log("Verifying the MachineDeployment has a cluster label and ownerRef") + g.Eventually(func() bool { key := client.ObjectKey{Name: deployment.Name, Namespace: deployment.Namespace} if err := testEnv.Get(ctx, key, deployment); err != nil { return false @@ -162,21 +165,20 @@ var _ = Describe("MachineDeployment Reconciler", func() { }, timeout).Should(BeTrue()) // Verify that the MachineSet was created. - By("Verifying the MachineSet was created") + t.Log("Verifying the MachineSet was created") machineSets := &clusterv1.MachineSetList{} - Eventually(func() int { + g.Eventually(func() int { if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { return -1 } return len(machineSets.Items) }, timeout).Should(BeEquivalentTo(1)) - By("Verifying that the deployment's deletePolicy was propagated to the machineset", func() { - Expect(machineSets.Items[0].Spec.DeletePolicy).Should(Equal("Oldest")) - }) + t.Log("Verifying that the deployment's deletePolicy was propagated to the machineset") + g.Expect(machineSets.Items[0].Spec.DeletePolicy).To(Equal("Oldest")) - By("Verifying the linked infrastructure template has a cluster owner reference") - Eventually(func() bool { + t.Log("Verifying the linked infrastructure template has a cluster owner reference") + g.Eventually(func() bool { obj, err := external.Get(ctx, testEnv, &deployment.Spec.Template.Spec.InfrastructureRef, deployment.Namespace) if err != nil { return false @@ -188,13 +190,12 @@ var _ = Describe("MachineDeployment Reconciler", func() { Name: testCluster.Name, UID: testCluster.UID, }) - }, timeout).Should(BeTrue()) // Verify that expected number of machines are created - By("Verify expected number of machines are created") + t.Log("Verify expected number of machines are created") machines := &clusterv1.MachineList{} - Eventually(func() int { + g.Eventually(func() int { if err := testEnv.List(ctx, machines, client.InNamespace(namespace.Name)); err != nil { return -1 } @@ -202,21 +203,21 @@ var _ = Describe("MachineDeployment Reconciler", func() { }, timeout).Should(BeEquivalentTo(*deployment.Spec.Replicas)) // Verify that machines has MachineSetLabelName and MachineDeploymentLabelName labels - By("Verify machines have expected MachineSetLabelName and MachineDeploymentLabelName") + t.Log("Verify machines have expected MachineSetLabelName and MachineDeploymentLabelName") for _, m := range machines.Items { - Expect(m.Labels[clusterv1.ClusterLabelName]).To(Equal(testCluster.Name)) + g.Expect(m.Labels[clusterv1.ClusterLabelName]).To(Equal(testCluster.Name)) } firstMachineSet := machineSets.Items[0] - Expect(*firstMachineSet.Spec.Replicas).To(BeEquivalentTo(2)) - Expect(*firstMachineSet.Spec.Template.Spec.Version).To(BeEquivalentTo("v1.10.3")) + g.Expect(*firstMachineSet.Spec.Replicas).To(BeEquivalentTo(2)) + g.Expect(*firstMachineSet.Spec.Template.Spec.Version).To(BeEquivalentTo("v1.10.3")) // // Delete firstMachineSet and expect Reconcile to be called to replace it. // - By("Deleting the initial MachineSet") - Expect(testEnv.Delete(ctx, &firstMachineSet)).To(Succeed()) - Eventually(func() bool { + t.Log("Deleting the initial MachineSet") + g.Expect(testEnv.Delete(ctx, &firstMachineSet)).To(Succeed()) + g.Eventually(func() bool { if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { return false } @@ -232,10 +233,10 @@ var _ = Describe("MachineDeployment Reconciler", func() { // Scale the MachineDeployment and expect Reconcile to be called. // secondMachineSet := machineSets.Items[0] - By("Scaling the MachineDeployment to 3 replicas") + t.Log("Scaling the MachineDeployment to 3 replicas") modifyFunc := func(d *clusterv1.MachineDeployment) { d.Spec.Replicas = pointer.Int32Ptr(3) } - Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) - Eventually(func() int { + g.Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) + g.Eventually(func() int { key := client.ObjectKey{Name: secondMachineSet.Name, Namespace: secondMachineSet.Namespace} if err := testEnv.Get(ctx, key, &secondMachineSet); err != nil { return -1 @@ -246,34 +247,34 @@ var _ = Describe("MachineDeployment Reconciler", func() { // // Update a MachineDeployment, expect Reconcile to be called and a new MachineSet to appear. // - By("Setting a label on the MachineDeployment") + t.Log("Setting a label on the MachineDeployment") modifyFunc = func(d *clusterv1.MachineDeployment) { d.Spec.Template.Labels["updated"] = "true" } - Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) - Eventually(func() int { + g.Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) + g.Eventually(func() int { if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { return -1 } return len(machineSets.Items) }, timeout).Should(BeEquivalentTo(2)) - By("Updating deletePolicy on the MachineDeployment") + t.Log("Updating deletePolicy on the MachineDeployment") modifyFunc = func(d *clusterv1.MachineDeployment) { d.Spec.Strategy.RollingUpdate.DeletePolicy = pointer.StringPtr("Newest") } - Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) - Eventually(func() string { + g.Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) + g.Eventually(func() string { if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { return "" } return machineSets.Items[0].Spec.DeletePolicy }, timeout).Should(Equal("Newest")) - //Verify that the old machine set retains its delete policy - Expect(machineSets.Items[1].Spec.DeletePolicy).Should(Equal("Oldest")) + // Verify that the old machine set retains its delete policy + g.Expect(machineSets.Items[1].Spec.DeletePolicy).To(Equal("Oldest")) // Verify that all the MachineSets have the expected OwnerRef. - By("Verifying MachineSet owner references") - Eventually(func() bool { + t.Log("Verifying MachineSet owner references") + g.Eventually(func() bool { if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { return false } @@ -286,7 +287,7 @@ var _ = Describe("MachineDeployment Reconciler", func() { return true }, timeout).Should(BeTrue()) - By("Locating the newest MachineSet") + t.Log("Locating the newest MachineSet") var thirdMachineSet *clusterv1.MachineSet for i := range machineSets.Items { ms := &machineSets.Items[i] @@ -295,14 +296,14 @@ var _ = Describe("MachineDeployment Reconciler", func() { break } } - Expect(thirdMachineSet).NotTo(BeNil()) + g.Expect(thirdMachineSet).NotTo(BeNil()) - By("Verifying the initial MachineSet is deleted") - Eventually(func() int { + t.Log("Verifying the initial MachineSet is deleted") + g.Eventually(func() int { // Set the all non-deleted machines as ready with a NodeRef, so the MachineSet controller can proceed // to properly set AvailableReplicas. foundMachines := &clusterv1.MachineList{} - Expect(testEnv.List(ctx, foundMachines, client.InNamespace(namespace.Name))).To(Succeed()) + g.Expect(testEnv.List(ctx, foundMachines, client.InNamespace(namespace.Name))).To(Succeed()) for i := 0; i < len(foundMachines.Items); i++ { m := foundMachines.Items[i] // Skip over deleted Machines @@ -313,8 +314,8 @@ var _ = Describe("MachineDeployment Reconciler", func() { if !metav1.IsControlledBy(&m, thirdMachineSet) { continue } - providerID := fakeInfrastructureRefReady(m.Spec.InfrastructureRef, infraResource) - fakeMachineNodeRef(&m, providerID) + providerID := fakeInfrastructureRefReady(m.Spec.InfrastructureRef, infraResource, g) + fakeMachineNodeRef(&m, providerID, g) } if err := testEnv.List(ctx, machineSets, msListOpts...); err != nil { @@ -336,15 +337,15 @@ var _ = Describe("MachineDeployment Reconciler", func() { clusterv1.ClusterLabelName: testCluster.Name, } - By("Updating MachineDeployment label") + t.Log("Updating MachineDeployment label") modifyFunc = func(d *clusterv1.MachineDeployment) { d.Spec.Selector.MatchLabels = newLabels d.Spec.Template.Labels = newLabels } - Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) + g.Expect(updateMachineDeployment(ctx, testEnv, deployment, modifyFunc)).To(Succeed()) - By("Verifying if a new MachineSet with updated labels are created") - Eventually(func() int { + t.Log("Verifying if a new MachineSet with updated labels are created") + g.Eventually(func() int { listOpts := client.MatchingLabels(newLabels) if err := testEnv.List(ctx, machineSets, listOpts); err != nil { return -1 @@ -353,12 +354,12 @@ var _ = Describe("MachineDeployment Reconciler", func() { }, timeout).Should(BeEquivalentTo(1)) newms := machineSets.Items[0] - By("Verifying new MachineSet has desired number of replicas") - Eventually(func() bool { + t.Log("Verifying new MachineSet has desired number of replicas") + g.Eventually(func() bool { // Set the all non-deleted machines as ready with a NodeRef, so the MachineSet controller can proceed // to properly set AvailableReplicas. foundMachines := &clusterv1.MachineList{} - Expect(testEnv.List(ctx, foundMachines, client.InNamespace(namespace.Name))).To(Succeed()) + g.Expect(testEnv.List(ctx, foundMachines, client.InNamespace(namespace.Name))).To(Succeed()) for i := 0; i < len(foundMachines.Items); i++ { m := foundMachines.Items[i] if !m.DeletionTimestamp.IsZero() { @@ -368,8 +369,8 @@ var _ = Describe("MachineDeployment Reconciler", func() { if !metav1.IsControlledBy(&m, &newms) { continue } - providerID := fakeInfrastructureRefReady(m.Spec.InfrastructureRef, infraResource) - fakeMachineNodeRef(&m, providerID) + providerID := fakeInfrastructureRefReady(m.Spec.InfrastructureRef, infraResource, g) + fakeMachineNodeRef(&m, providerID, g) } listOpts := client.MatchingLabels(newLabels) @@ -379,8 +380,8 @@ var _ = Describe("MachineDeployment Reconciler", func() { return machineSets.Items[0].Status.Replicas == *deployment.Spec.Replicas }, timeout*5).Should(BeTrue()) - By("Verifying MachineSets with old labels are deleted") - Eventually(func() int { + t.Log("Verifying MachineSets with old labels are deleted") + g.Eventually(func() int { listOpts := client.MatchingLabels(oldLabels) if err := testEnv.List(ctx, machineSets, listOpts); err != nil { return -1 @@ -390,9 +391,9 @@ var _ = Describe("MachineDeployment Reconciler", func() { }, timeout*5).Should(BeEquivalentTo(0)) // Validate that the controller set the cluster name label in selector. - Expect(deployment.Status.Selector).To(ContainSubstring(testCluster.Name)) + g.Expect(deployment.Status.Selector).To(ContainSubstring(testCluster.Name)) }) -}) +} func TestMachineSetToDeployments(t *testing.T) { g := NewWithT(t) @@ -713,7 +714,6 @@ func TestGetMachineSetsForDeployment(t *testing.T) { got, err := r.getMachineSetsForDeployment(ctx, &tc.machineDeployment) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(got).To(HaveLen(len(tc.expected))) for idx, res := range got { diff --git a/controllers/machineset_controller_test.go b/controllers/machineset_controller_test.go index 44b58e907bf7..d5bef349bf0c 100644 --- a/controllers/machineset_controller_test.go +++ b/controllers/machineset_controller_test.go @@ -20,7 +20,6 @@ import ( "testing" "time" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -39,27 +38,31 @@ import ( var _ reconcile.Reconciler = &MachineSetReconciler{} -var _ = Describe("MachineSet Reconciler", func() { +func TestMachineSetReconciler(t *testing.T) { namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ms-test"}} testCluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: namespace.Name, Name: "test-cluster"}} - BeforeEach(func() { - By("Creating the namespace") - Expect(testEnv.Create(ctx, namespace)).To(Succeed()) - By("Creating the Cluster") - Expect(testEnv.Create(ctx, testCluster)).To(Succeed()) - By("Creating the Cluster Kubeconfig Secret") - Expect(testEnv.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) - }) + setup := func(t *testing.T, g *WithT) { + t.Log("Creating the namespace") + g.Expect(testEnv.Create(ctx, namespace)).To(Succeed()) + t.Log("Creating the Cluster") + g.Expect(testEnv.Create(ctx, testCluster)).To(Succeed()) + t.Log("Creating the Cluster Kubeconfig Secret") + g.Expect(testEnv.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) + } - AfterEach(func() { - By("Deleting the Cluster") - Expect(testEnv.Delete(ctx, testCluster)).To(Succeed()) - By("Deleting the namespace") - Expect(testEnv.Delete(ctx, namespace)).To(Succeed()) - }) + teardown := func(t *testing.T, g *WithT) { + t.Log("Deleting the Cluster") + g.Expect(testEnv.Delete(ctx, testCluster)).To(Succeed()) + t.Log("Deleting the namespace") + g.Expect(testEnv.Delete(ctx, namespace)).To(Succeed()) + } + + t.Run("Should reconcile a MachineSet", func(t *testing.T) { + g := NewWithT(t) + setup(t, g) + defer teardown(t, g) - It("Should reconcile a MachineSet", func() { replicas := int32(2) version := "v1.14.2" instance := &clusterv1.MachineSet{ @@ -121,7 +124,7 @@ var _ = Describe("MachineSet Reconciler", func() { bootstrapTmpl.SetAPIVersion("bootstrap.cluster.x-k8s.io/v1alpha4") bootstrapTmpl.SetName("ms-template") bootstrapTmpl.SetNamespace(namespace.Name) - Expect(testEnv.Create(ctx, bootstrapTmpl)).To(Succeed()) + g.Expect(testEnv.Create(ctx, bootstrapTmpl)).To(Succeed()) // Create infrastructure template resource. infraResource := map[string]interface{}{ @@ -143,16 +146,16 @@ var _ = Describe("MachineSet Reconciler", func() { infraTmpl.SetAPIVersion("infrastructure.cluster.x-k8s.io/v1alpha4") infraTmpl.SetName("ms-template") infraTmpl.SetNamespace(namespace.Name) - Expect(testEnv.Create(ctx, infraTmpl)).To(Succeed()) + g.Expect(testEnv.Create(ctx, infraTmpl)).To(Succeed()) // Create the MachineSet. - Expect(testEnv.Create(ctx, instance)).To(Succeed()) + g.Expect(testEnv.Create(ctx, instance)).To(Succeed()) defer func() { - Expect(testEnv.Delete(ctx, instance)).To(Succeed()) + g.Expect(testEnv.Delete(ctx, instance)).To(Succeed()) }() - By("Verifying the linked bootstrap template has a cluster owner reference") - Eventually(func() bool { + t.Log("Verifying the linked bootstrap template has a cluster owner reference") + g.Eventually(func() bool { obj, err := external.Get(ctx, testEnv, instance.Spec.Template.Spec.Bootstrap.ConfigRef, instance.Namespace) if err != nil { return false @@ -166,8 +169,8 @@ var _ = Describe("MachineSet Reconciler", func() { }) }, timeout).Should(BeTrue()) - By("Verifying the linked infrastructure template has a cluster owner reference") - Eventually(func() bool { + t.Log("Verifying the linked infrastructure template has a cluster owner reference") + g.Eventually(func() bool { obj, err := external.Get(ctx, testEnv, &instance.Spec.Template.Spec.InfrastructureRef, instance.Namespace) if err != nil { return false @@ -184,7 +187,7 @@ var _ = Describe("MachineSet Reconciler", func() { machines := &clusterv1.MachineList{} // Verify that we have 2 replicas. - Eventually(func() int { + g.Eventually(func() int { if err := testEnv.List(ctx, machines, client.InNamespace(namespace.Name)); err != nil { return -1 } @@ -193,16 +196,16 @@ var _ = Describe("MachineSet Reconciler", func() { // Set the infrastructure reference as ready. for _, m := range machines.Items { - fakeBootstrapRefReady(*m.Spec.Bootstrap.ConfigRef, bootstrapResource) - fakeInfrastructureRefReady(m.Spec.InfrastructureRef, infraResource) + fakeBootstrapRefReady(*m.Spec.Bootstrap.ConfigRef, bootstrapResource, g) + fakeInfrastructureRefReady(m.Spec.InfrastructureRef, infraResource, g) } // Try to delete 1 machine and check the MachineSet scales back up. machineToBeDeleted := machines.Items[0] - Expect(testEnv.Delete(ctx, &machineToBeDeleted)).To(Succeed()) + g.Expect(testEnv.Delete(ctx, &machineToBeDeleted)).To(Succeed()) // Verify that the Machine has been deleted. - Eventually(func() bool { + g.Eventually(func() bool { key := client.ObjectKey{Name: machineToBeDeleted.Name, Namespace: machineToBeDeleted.Namespace} if err := testEnv.Get(ctx, key, &machineToBeDeleted); apierrors.IsNotFound(err) || !machineToBeDeleted.DeletionTimestamp.IsZero() { return true @@ -211,7 +214,7 @@ var _ = Describe("MachineSet Reconciler", func() { }, timeout).Should(BeTrue()) // Verify that we have 2 replicas. - Eventually(func() (ready int) { + g.Eventually(func() (ready int) { if err := testEnv.List(ctx, machines, client.InNamespace(namespace.Name)); err != nil { return -1 } @@ -233,15 +236,15 @@ var _ = Describe("MachineSet Reconciler", func() { continue } - Expect(m.Spec.Version).ToNot(BeNil()) - Expect(*m.Spec.Version).To(BeEquivalentTo("v1.14.2")) - fakeBootstrapRefReady(*m.Spec.Bootstrap.ConfigRef, bootstrapResource) - providerID := fakeInfrastructureRefReady(m.Spec.InfrastructureRef, infraResource) - fakeMachineNodeRef(&m, providerID) + g.Expect(m.Spec.Version).ToNot(BeNil()) + g.Expect(*m.Spec.Version).To(BeEquivalentTo("v1.14.2")) + fakeBootstrapRefReady(*m.Spec.Bootstrap.ConfigRef, bootstrapResource, g) + providerID := fakeInfrastructureRefReady(m.Spec.InfrastructureRef, infraResource, g) + fakeMachineNodeRef(&m, providerID, g) } // Verify that all Machines are Ready. - Eventually(func() int32 { + g.Eventually(func() int32 { key := client.ObjectKey{Name: instance.Name, Namespace: instance.Namespace} if err := testEnv.Get(ctx, key, instance); err != nil { return -1 @@ -250,9 +253,9 @@ var _ = Describe("MachineSet Reconciler", func() { }, timeout).Should(BeEquivalentTo(replicas)) // Validate that the controller set the cluster name label in selector. - Expect(instance.Status.Selector).To(ContainSubstring(testCluster.Name)) + g.Expect(instance.Status.Selector).To(ContainSubstring(testCluster.Name)) }) -}) +} func TestMachineSetOwnerReference(t *testing.T) { testCluster := &clusterv1.Cluster{ diff --git a/controllers/suite_test.go b/controllers/suite_test.go index fdbbf6dc76b0..f86b0899f074 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -22,7 +22,6 @@ import ( "testing" "time" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/onsi/gomega/types" "github.com/pkg/errors" @@ -33,7 +32,6 @@ import ( "sigs.k8s.io/cluster-api/test/helpers" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" "sigs.k8s.io/controller-runtime/pkg/log" // +kubebuilder:scaffold:imports ) @@ -48,7 +46,7 @@ var ( ) func TestMain(m *testing.M) { - fmt.Println("Creating new test environment") + fmt.Println("Creating a new test environment") testEnv = helpers.NewTestEnvironment() // Set up a ClusterCacheTracker and ClusterCacheReconciler to provide to controllers @@ -102,36 +100,27 @@ func TestMain(m *testing.M) { } go func() { - fmt.Println("Starting the manager") + fmt.Println("Starting the test environment manager") if err := testEnv.StartManager(ctx); err != nil { - panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) + panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) } }() <-testEnv.Manager.Elected() testEnv.WaitForWebhooks() + SetDefaultEventuallyPollingInterval(100 * time.Millisecond) + SetDefaultEventuallyTimeout(timeout) + code := m.Run() - fmt.Println("Tearing down test suite") + fmt.Println("Stopping the test environment") if err := testEnv.Stop(); err != nil { - panic(fmt.Sprintf("Failed to stop envtest: %v", err)) + panic(fmt.Sprintf("Failed to stop the test environment: %v", err)) } os.Exit(code) } -// TestGinkgoSuite will run the ginkgo tests. -// This will run with the testEnv setup and teardown in TestMain. -func TestGinkgoSuite(t *testing.T) { - SetDefaultEventuallyPollingInterval(100 * time.Millisecond) - SetDefaultEventuallyTimeout(timeout) - RegisterFailHandler(Fail) - - RunSpecsWithDefaultAndCustomReporters(t, - "Controllers Suite", - []Reporter{printer.NewlineReporter{}}) -} - func ContainRefOfGroupKind(group, kind string) types.GomegaMatcher { return &refGroupKindMatcher{ kind: kind, diff --git a/controllers/suite_util_test.go b/controllers/suite_util_test.go index 11c0b673446c..f8cdc2d06de5 100644 --- a/controllers/suite_util_test.go +++ b/controllers/suite_util_test.go @@ -38,9 +38,9 @@ func intOrStrPtr(i int32) *intstr.IntOrString { return &res } -func fakeBootstrapRefReady(ref corev1.ObjectReference, base map[string]interface{}) { +func fakeBootstrapRefReady(ref corev1.ObjectReference, base map[string]interface{}, g *WithT) { bref := (&unstructured.Unstructured{Object: base}).DeepCopy() - Eventually(func() error { + g.Eventually(func() error { return testEnv.Get(ctx, client.ObjectKey{Name: ref.Name, Namespace: ref.Namespace}, bref) }).Should(Succeed()) @@ -53,33 +53,33 @@ func fakeBootstrapRefReady(ref corev1.ObjectReference, base map[string]interface "value": "data", }, } - Expect(testEnv.Create(ctx, bdataSecret)).To(Succeed()) + g.Expect(testEnv.Create(ctx, bdataSecret)).To(Succeed()) brefPatch := client.MergeFrom(bref.DeepCopy()) - Expect(unstructured.SetNestedField(bref.Object, true, "status", "ready")).To(Succeed()) - Expect(unstructured.SetNestedField(bref.Object, bdataSecret.Name, "status", "dataSecretName")).To(Succeed()) - Expect(testEnv.Status().Patch(ctx, bref, brefPatch)).To(Succeed()) + g.Expect(unstructured.SetNestedField(bref.Object, true, "status", "ready")).To(Succeed()) + g.Expect(unstructured.SetNestedField(bref.Object, bdataSecret.Name, "status", "dataSecretName")).To(Succeed()) + g.Expect(testEnv.Status().Patch(ctx, bref, brefPatch)).To(Succeed()) } -func fakeInfrastructureRefReady(ref corev1.ObjectReference, base map[string]interface{}) string { +func fakeInfrastructureRefReady(ref corev1.ObjectReference, base map[string]interface{}, g *WithT) string { iref := (&unstructured.Unstructured{Object: base}).DeepCopy() - Eventually(func() error { + g.Eventually(func() error { return testEnv.Get(ctx, client.ObjectKey{Name: ref.Name, Namespace: ref.Namespace}, iref) }).Should(Succeed()) irefPatch := client.MergeFrom(iref.DeepCopy()) providerID := fmt.Sprintf("test:////%v", uuid.NewUUID()) - Expect(unstructured.SetNestedField(iref.Object, providerID, "spec", "providerID")).To(Succeed()) - Expect(testEnv.Patch(ctx, iref, irefPatch)).To(Succeed()) + g.Expect(unstructured.SetNestedField(iref.Object, providerID, "spec", "providerID")).To(Succeed()) + g.Expect(testEnv.Patch(ctx, iref, irefPatch)).To(Succeed()) irefPatch = client.MergeFrom(iref.DeepCopy()) - Expect(unstructured.SetNestedField(iref.Object, true, "status", "ready")).To(Succeed()) - Expect(testEnv.Status().Patch(ctx, iref, irefPatch)).To(Succeed()) + g.Expect(unstructured.SetNestedField(iref.Object, true, "status", "ready")).To(Succeed()) + g.Expect(testEnv.Status().Patch(ctx, iref, irefPatch)).To(Succeed()) return providerID } -func fakeMachineNodeRef(m *clusterv1.Machine, pid string) { - Eventually(func() error { +func fakeMachineNodeRef(m *clusterv1.Machine, pid string, g *WithT) { + g.Eventually(func() error { key := client.ObjectKey{Name: m.Name, Namespace: m.Namespace} return testEnv.Get(ctx, key, &clusterv1.Machine{}) }).Should(Succeed()) @@ -97,9 +97,9 @@ func fakeMachineNodeRef(m *clusterv1.Machine, pid string) { ProviderID: pid, }, } - Expect(testEnv.Create(ctx, node)).To(Succeed()) + g.Expect(testEnv.Create(ctx, node)).To(Succeed()) - Eventually(func() error { + g.Eventually(func() error { key := client.ObjectKey{Name: node.Name, Namespace: node.Namespace} return testEnv.Get(ctx, key, &corev1.Node{}) }).Should(Succeed()) @@ -107,12 +107,12 @@ func fakeMachineNodeRef(m *clusterv1.Machine, pid string) { // Patch the node and make it look like ready. patchNode := client.MergeFrom(node.DeepCopy()) node.Status.Conditions = append(node.Status.Conditions, corev1.NodeCondition{Type: corev1.NodeReady, Status: corev1.ConditionTrue}) - Expect(testEnv.Status().Patch(ctx, node, patchNode)).To(Succeed()) + g.Expect(testEnv.Status().Patch(ctx, node, patchNode)).To(Succeed()) // Patch the Machine. patchMachine := client.MergeFrom(m.DeepCopy()) m.Spec.ProviderID = pointer.StringPtr(pid) - Expect(testEnv.Patch(ctx, m, patchMachine)).To(Succeed()) + g.Expect(testEnv.Patch(ctx, m, patchMachine)).To(Succeed()) patchMachine = client.MergeFrom(m.DeepCopy()) m.Status.NodeRef = &corev1.ObjectReference{ @@ -120,5 +120,5 @@ func fakeMachineNodeRef(m *clusterv1.Machine, pid string) { Kind: node.Kind, Name: node.Name, } - Expect(testEnv.Status().Patch(ctx, m, patchMachine)).To(Succeed()) + g.Expect(testEnv.Status().Patch(ctx, m, patchMachine)).To(Succeed()) }