diff --git a/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types_test.go b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types_test.go new file mode 100644 index 000000000000..36e81284dff8 --- /dev/null +++ b/controlplane/kubeadm/api/v1alpha3/kubeadm_control_plane_types_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "golang.org/x/net/context" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" +) + +// These tests are written in BDD-style using Ginkgo framework. Refer to +// http://onsi.github.io/ginkgo to learn more. + +var _ = Describe("KubeadmControlPlane", func() { + var ( + key types.NamespacedName + created, fetched *KubeadmControlPlane + ctx = context.TODO() + ) + + // Add Tests for OpenAPI validation (or additional CRD features) specified in + // your API definition. + // Avoid adding tests for vanilla CRUD operations because they would + // test Kubernetes API server, which isn't the goal here. + Context("Create API", func() { + + It("should create an object successfully", func() { + + key = types.NamespacedName{ + Name: "foo", + Namespace: "default", + } + + // wrong version value + created = &KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Spec: KubeadmControlPlaneSpec{ + InfrastructureTemplate: corev1.ObjectReference{}, + Version: "1", + KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{}, + }, + } + + By("creating an API obj with wrong version") + Expect(k8sClient.Create(ctx, created)).NotTo(Succeed()) + + // missing field + created2 := map[string]interface{}{ + "kind": "KubeadmControlPlane", + "apiVersion": "controlplane.cluster.x-k8s.io/v1alpha3", + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "version": "v1.1.1", + }, + } + createdUnstructured := &unstructured.Unstructured{Object: created2} + + By("creating an API obj with missing field") + Expect(k8sClient.Create(ctx, createdUnstructured)).NotTo(Succeed()) + + created = &KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Spec: KubeadmControlPlaneSpec{ + InfrastructureTemplate: corev1.ObjectReference{}, + Version: "v1.1.1", + KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{}, + }, + } + + By("creating an API obj") + Expect(k8sClient.Create(ctx, created)).To(Succeed()) + + fetched = &KubeadmControlPlane{} + Expect(k8sClient.Get(ctx, key, fetched)).To(Succeed()) + Expect(fetched).To(Equal(created)) + + By("deleting the created object") + Expect(k8sClient.Delete(ctx, created)).To(Succeed()) + Expect(k8sClient.Get(ctx, key, created)).ToNot(Succeed()) + }) + + }) +}) diff --git a/controlplane/kubeadm/api/v1alpha3/suite_test.go b/controlplane/kubeadm/api/v1alpha3/suite_test.go index 97adc08a4fd6..2881e158e173 100644 --- a/controlplane/kubeadm/api/v1alpha3/suite_test.go +++ b/controlplane/kubeadm/api/v1alpha3/suite_test.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -58,8 +58,8 @@ var _ = BeforeSuite(func(done Done) { By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{ - filepath.Join("..", "..", "..", "..", "config", "bootstrap", "crd", "bases"), - filepath.Join("..", "..", "..", "..", "config", "controlplane", "crd", "bases"), + filepath.Join("..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "..", "..", "bootstrap", "kubeadm", "config", "crd", "bases"), filepath.Join("..", "..", "..", "..", "config", "crd", "bases"), }, } diff --git a/controlplane/kubeadm/controllers/controller_test.go b/controlplane/kubeadm/controllers/controller_test.go index f54cd77b08fc..7b4cdf21b5c8 100644 --- a/controlplane/kubeadm/controllers/controller_test.go +++ b/controlplane/kubeadm/controllers/controller_test.go @@ -21,6 +21,7 @@ import ( "fmt" "testing" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" @@ -28,16 +29,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" "k8s.io/klog/klogr" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/log" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" @@ -48,24 +43,78 @@ import ( "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" ) +var _ = Describe("KubeadmControlPlaneReconciler", func() { + BeforeEach(func() {}) + AfterEach(func() {}) + + Context("Reconcile a KubeadmControlPlane", func() { + It("should return error if owner cluster is missing", func() { + clusterName, clusterNamespace := "foo", "default" + cluster := newCluster(&types.NamespacedName{Name: clusterName, Namespace: clusterNamespace}) + + kcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: clusterNamespace, + Name: clusterName, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + Name: clusterName, + UID: "1", + }, + }, + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + }, + } + + kcp.Default() + + Expect(k8sClient.Create(context.Background(), kcp)).To(Succeed()) + Expect(k8sClient.Create(context.Background(), cluster)).To(Succeed()) + + r := &KubeadmControlPlaneReconciler{ + Client: k8sClient, + Log: log.Log, + recorder: record.NewFakeRecorder(32), + } + + result, err := r.Reconcile(ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + Expect(err).NotTo(HaveOccurred()) + Expect(result).To(Equal(ctrl.Result{})) + + By("Calling reconcile should return error") + Expect(k8sClient.Delete(context.Background(), cluster)).To(Succeed()) + + result, err = r.Reconcile(ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + + Expect(err).To(HaveOccurred()) + Expect(result.Requeue).To(BeFalse()) + }) + }) +}) + func TestClusterToKubeadmControlPlane(t *testing.T) { g := NewWithT(t) fakeClient := newFakeClient(g) - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "test", - }, - Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Namespace: "test", - Name: "kcp-foo", - APIVersion: controlplanev1.GroupVersion.String(), - }, + cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: "test"}) + cluster.Spec = clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + Kind: "KubeadmControlPlane", + Namespace: "test", + Name: "kcp-foo", + APIVersion: controlplanev1.GroupVersion.String(), }, } @@ -96,13 +145,7 @@ func TestClusterToKubeadmControlPlaneNoControlPlane(t *testing.T) { g := NewWithT(t) fakeClient := newFakeClient(g) - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "test", - }, - Spec: clusterv1.ClusterSpec{}, - } + cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: "test"}) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -123,18 +166,13 @@ func TestClusterToKubeadmControlPlaneOtherControlPlane(t *testing.T) { g := NewWithT(t) fakeClient := newFakeClient(g) - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "test", - }, - Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "OtherControlPlane", - Namespace: "test", - Name: "other-foo", - APIVersion: controlplanev1.GroupVersion.String(), - }, + cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: "test"}) + cluster.Spec = clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + Kind: "OtherControlPlane", + Namespace: "test", + Name: "other-foo", + APIVersion: controlplanev1.GroupVersion.String(), }, } @@ -186,6 +224,32 @@ func TestReconcileNoClusterOwnerRef(t *testing.T) { g.Expect(machineList.Items).To(BeEmpty()) } +func TestReconcileNoKCP(t *testing.T) { + g := NewWithT(t) + + kcp := &controlplanev1.KubeadmControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "foo", + }, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", + }, + } + + fakeClient := newFakeClient(g) + log.SetLogger(klogr.New()) + + r := &KubeadmControlPlaneReconciler{ + Client: fakeClient, + Log: log.Log, + recorder: record.NewFakeRecorder(32), + } + + _, err := r.Reconcile(ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).NotTo(HaveOccurred()) +} + func TestReconcileNoCluster(t *testing.T) { g := NewWithT(t) @@ -225,18 +289,61 @@ func TestReconcileNoCluster(t *testing.T) { g.Expect(machineList.Items).To(BeEmpty()) } -func TestReconcileClusterNoEndpoints(t *testing.T) { +func TestReconcilePaused(t *testing.T) { g := NewWithT(t) - cluster := &clusterv1.Cluster{ + clusterName, clusterNamespace := "foo", "test" + + // Test: cluster is paused and kcp is not + cluster := newCluster(&types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}) + cluster.Spec.Paused = true + kcp := &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "test", + Namespace: clusterNamespace, + Name: clusterName, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + Name: clusterName, + }, + }, }, - Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Spec: controlplanev1.KubeadmControlPlaneSpec{ + Version: "v1.16.6", }, } + kcp.Default() + g.Expect(kcp.ValidateCreate()).To(Succeed()) + fakeClient := newFakeClient(g, kcp.DeepCopy(), cluster.DeepCopy()) + log.SetLogger(klogr.New()) + + r := &KubeadmControlPlaneReconciler{ + Client: fakeClient, + Log: log.Log, + recorder: record.NewFakeRecorder(32), + } + + _, err := r.Reconcile(ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).NotTo(HaveOccurred()) + + machineList := &clusterv1.MachineList{} + g.Expect(fakeClient.List(context.Background(), machineList, client.InNamespace(clusterNamespace))).To(Succeed()) + g.Expect(machineList.Items).To(BeEmpty()) + + // Test: kcp is paused and cluster is not + cluster.Spec.Paused = false + kcp.ObjectMeta.Annotations = map[string]string{} + kcp.ObjectMeta.Annotations[clusterv1.PausedAnnotation] = "paused" + _, err = r.Reconcile(ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestReconcileClusterNoEndpoints(t *testing.T) { + g := NewWithT(t) + + cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: "test"}) + cluster.Status = clusterv1.ClusterStatus{InfrastructureReady: true} kcp := &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ @@ -291,21 +398,14 @@ func TestReconcileClusterNoEndpoints(t *testing.T) { func TestReconcileInitializeControlPlane(t *testing.T) { g := NewWithT(t) - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "test", - }, - Spec: clusterv1.ClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ - Host: "test.local", - Port: 9999, - }, - }, - Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: "test"}) + cluster.Spec = clusterv1.ClusterSpec{ + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: "test.local", + Port: 9999, }, } + cluster.Status = clusterv1.ClusterStatus{InfrastructureReady: true} genericMachineTemplate := &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -465,12 +565,7 @@ kubernetesVersion: metav1.16.1`, func TestKubeadmControlPlaneReconciler_updateCoreDNS(t *testing.T) { // TODO: (wfernandes) This test could use some refactor love. - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "test", - }, - } + cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: "default"}) kcp := &controlplanev1.KubeadmControlPlane{ ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, @@ -848,7 +943,8 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane(t *testing.T) { ControlPlaneHealthy: true, }, } - cluster := &clusterv1.Cluster{} + cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: "default"}) + kcp := &controlplanev1.KubeadmControlPlane{} controlPlane := &internal.ControlPlane{ KCP: kcp, @@ -863,6 +959,8 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane(t *testing.T) { g.Expect(err).To(HaveOccurred()) } +// test utils + func newFakeClient(g *WithT, initObjs ...runtime.Object) client.Client { g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) g.Expect(bootstrapv1.AddToScheme(scheme.Scheme)).To(Succeed()) @@ -871,18 +969,13 @@ func newFakeClient(g *WithT, initObjs ...runtime.Object) client.Client { } func createClusterWithControlPlane() (*clusterv1.Cluster, *controlplanev1.KubeadmControlPlane, *unstructured.Unstructured) { - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "test", - }, - Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Namespace: "test", - Name: "kcp-foo", - APIVersion: controlplanev1.GroupVersion.String(), - }, + cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: "test"}) + cluster.Spec = clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + Kind: "KubeadmControlPlane", + Namespace: "test", + Name: "kcp-foo", + APIVersion: controlplanev1.GroupVersion.String(), }, } @@ -960,3 +1053,17 @@ func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *control return machine, node } + +// newCluster return a CAPI cluster object +func newCluster(namespacedName *types.NamespacedName) *clusterv1.Cluster { + return &clusterv1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespacedName.Namespace, + Name: namespacedName.Name, + }, + } +} diff --git a/controlplane/kubeadm/controllers/scale_test.go b/controlplane/kubeadm/controllers/scale_test.go index 07bf145b550e..9aa5bd3962ea 100644 --- a/controlplane/kubeadm/controllers/scale_test.go +++ b/controlplane/kubeadm/controllers/scale_test.go @@ -23,7 +23,6 @@ import ( . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" @@ -210,12 +209,3 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. _, err := r.scaleDownControlPlane(context.Background(), cluster, kcp, machines, machines, controlPlane) g.Expect(err).ToNot(HaveOccurred()) } - -func machine(name string) *clusterv1.Machine { - return &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: name, - }, - } -} diff --git a/controlplane/kubeadm/controllers/suite_test.go b/controlplane/kubeadm/controllers/suite_test.go new file mode 100644 index 000000000000..51f880d0d98f --- /dev/null +++ b/controlplane/kubeadm/controllers/suite_test.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog" + "k8s.io/klog/klogr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var k8sClient client.Client +var testEnv *envtest.Environment + +func init() { + klog.InitFlags(nil) + klog.SetOutput(GinkgoWriter) + logf.SetLogger(klogr.New()) +} + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{printer.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "config", "crd", "bases"), + filepath.Join("..", "..", "..", "bootstrap", "kubeadm", "config", "crd", "bases"), + filepath.Join("..", "..", "..", "config", "crd", "bases"), + }, + } + + cfg, err := testEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + Expect(controlplanev1.AddToScheme(scheme.Scheme)).To(Succeed()) + Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/controlplane/kubeadm/controllers/upgrade_test.go b/controlplane/kubeadm/controllers/upgrade_test.go index c29364bf3e36..478756eef37c 100644 --- a/controlplane/kubeadm/controllers/upgrade_test.go +++ b/controlplane/kubeadm/controllers/upgrade_test.go @@ -19,9 +19,12 @@ package controllers import ( "context" "testing" + "time" . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" @@ -76,5 +79,117 @@ func TestKubeadmControlPlaneReconciler_upgradeControlPlane(t *testing.T) { g.Expect(err).To(Equal(&capierrors.RequeueAfterError{RequeueAfter: healthCheckFailedRequeueAfter})) } -// TODO -func TestSelectMachineForUpgrade(t *testing.T) {} +func TestSelectMachineForUpgrade(t *testing.T) { + g := NewWithT(t) + + cluster, kcp, genericMachineTemplate := createClusterWithControlPlane() + kcp.Spec.KubeadmConfigSpec.ClusterConfiguration = nil + + m1 := machine("machine-1", withFailureDomain("one")) + m2 := machine("machine-2", withFailureDomain("two"), withTimestamp(metav1.Time{Time: time.Date(1, 0, 0, 0, 0, 0, 0, time.UTC)})) + m3 := machine("machine-3", withFailureDomain("two"), withTimestamp(metav1.Time{Time: time.Date(2, 0, 0, 0, 0, 0, 0, time.UTC)})) + + mc1 := internal.FilterableMachineCollection{ + "machine-1": m1, + "machine-2": m2, + "machine-3": m3, + } + fd1 := clusterv1.FailureDomains{ + "one": failureDomain(true), + "two": failureDomain(true), + "three": failureDomain(true), + "four": failureDomain(false), + } + + controlPlane := &internal.ControlPlane{ + KCP: &controlplanev1.KubeadmControlPlane{}, + Cluster: &clusterv1.Cluster{Status: clusterv1.ClusterStatus{FailureDomains: fd1}}, + Machines: mc1, + } + + fakeClient := newFakeClient( + g, + cluster.DeepCopy(), + kcp.DeepCopy(), + genericMachineTemplate.DeepCopy(), + m2.DeepCopy(), + ) + + r := &KubeadmControlPlaneReconciler{ + Client: fakeClient, + Log: log.Log, + recorder: record.NewFakeRecorder(32), + managementCluster: &fakeManagementCluster{ + Management: &internal.Management{Client: fakeClient}, + Workload: fakeWorkloadCluster{}, + }, + } + + testCases := []struct { + name string + upgradeMachines internal.FilterableMachineCollection + cpMachines internal.FilterableMachineCollection + expectErr bool + expectedMachine clusterv1.Machine + }{ + { + name: "matching controlplane machines and upgrade machines", + upgradeMachines: mc1, + cpMachines: controlPlane.Machines, + expectErr: false, + expectedMachine: clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-2"}}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) + + selectedMachine, err := r.selectMachineForUpgrade(context.Background(), cluster, controlPlane.Machines, controlPlane) + + if tc.expectErr { + g.Expect(err).To(HaveOccurred()) + return + } + + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(selectedMachine.Name).To(Equal(tc.expectedMachine.Name)) + }) + } + +} + +func failureDomain(controlPlane bool) clusterv1.FailureDomainSpec { + return clusterv1.FailureDomainSpec{ + ControlPlane: controlPlane, + } +} + +type machineOpt func(*clusterv1.Machine) + +func withFailureDomain(fd string) machineOpt { + return func(m *clusterv1.Machine) { + m.Spec.FailureDomain = &fd + } +} + +func withTimestamp(t metav1.Time) machineOpt { + return func(m *clusterv1.Machine) { + m.CreationTimestamp = t + } +} + +func machine(name string, opts ...machineOpt) *clusterv1.Machine { + m := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "default", + }, + } + for _, opt := range opts { + opt(m) + } + return m +}