diff --git a/controllers/topology/cluster_controller_test.go b/controllers/topology/cluster_controller_test.go index 8114442f4ddb..e615d0ff0300 100644 --- a/controllers/topology/cluster_controller_test.go +++ b/controllers/topology/cluster_controller_test.go @@ -1,14 +1,27 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package topology import ( - "context" - "log" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" - v1 "k8s.io/api/core/v1" - "sigs.k8s.io/cluster-api/controllers/topology/internal/contract" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -21,7 +34,6 @@ import ( "testing" - "sigs.k8s.io/cluster-api/controllers/topology/internal/scope" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -73,235 +85,239 @@ func TestClusterReconciler_reconcile(t *testing.T) { Build()). Build() - type args struct { - ctx context.Context - s *scope.Scope + // Create a set of initObjects from the objects created above to add to the API server when the test environment starts. + initObjs := []client.Object{ + clusterClass, + cluster, + infrastructureCluster, + infrastructureMachineTemplate, + bootstrapTemplate, + controlPlane, } - tests := []struct { - name string - args args - initObjs []client.Object - want []client.Object - wantErr bool - }{ - { - name: "getting things up and running", - initObjs: []client.Object{ - clusterClass, - cluster, - infrastructureCluster, - infrastructureMachineTemplate, - bootstrapTemplate, - controlPlane, - }, - want: []client.Object{ - cluster, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() - for _, obj := range tt.initObjs { - err := env.Create(ctx, obj) - g.Expect(err).ToNot(HaveOccurred()) - } - actualCluster := &clusterv1.Cluster{} - // Get Cluster object and check that relevant fields are set as expected. - g.Eventually(func() bool { - key := client.ObjectKey{Name: clusterName, Namespace: ns.Name} - if err := env.Get(ctx, key, actualCluster); err != nil { - return false - } - // Check if relevant managed topology labels are present. - if ok := ensureClusterTopologyOwnedLabel(actualCluster); !ok { - return false - } - // Check if InfrastructureRef exists and is of the expected Kind and APIVersion. - if ok := referenceExistsWithCorrectKindAndAPIVersion(actualCluster.Spec.InfrastructureRef, - builder.GenericInfrastructureClusterKind, - builder.InfrastructureGroupVersion); !ok { - log.Printf("We failed on infraref %v", actualCluster) - return false - } + testName := "Cluster creation from barebones cluster object and clusterClass." + t.Run(testName, func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() - // Check if ControlPlaneRef exists is of the expected Kind and APIVersion. - if ok := referenceExistsWithCorrectKindAndAPIVersion(actualCluster.Spec.ControlPlaneRef, - builder.GenericControlPlaneKind, - builder.ControlPlaneGroupVersion); !ok { - log.Printf("We failed on controlPlane ref %v", actualCluster) - return false - } - log.Printf("Cluster passed with %v", actualCluster) - return true - }, timeout).Should(BeTrue()) + for _, obj := range initObjs { + err := env.Create(ctx, obj) + g.Expect(err).ToNot(HaveOccurred()) + } + actualCluster := &clusterv1.Cluster{} + // Get Cluster object and check that relevant fields are set as expected. + g.Eventually(func() bool { + return assertClusterReconcile(ns.Name, clusterName, actualCluster) + }, timeout).Should(BeTrue()) - // Check if InfrastructureCluster has been created and has the correct labels and annotations. - g.Eventually(func() bool { - _, ok := getAndEnsureLabelsAndAnnotations(*actualCluster.Spec.InfrastructureRef, clusterName) - return ok - }, timeout).Should(BeTrue()) + // Check if InfrastructureCluster has been created and has the correct labels and annotations. + g.Eventually(func() bool { + return assertInfrastructureClusterReconcile(actualCluster) + }, timeout).Should(BeTrue()) - // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. - cp := &unstructured.Unstructured{} - g.Eventually(func() bool { - var ok bool - if cp, ok = getAndEnsureLabelsAndAnnotations(*actualCluster.Spec.ControlPlaneRef, clusterName); !ok { - return false - } - // Check if the ControlPlane Version matches the version in the Cluster's managed topology spec. - version, err := contract.ControlPlane().Version().Get(cp) - if err != nil { - log.Printf("We failed on getting control plane version %v %v", err, cp) - } - if *version != cluster.Spec.Topology.Version { - log.Printf("We failed on comparing control plane version %v", cp) - return false - } - // Check for Control Plane replicase if it's set in the Cluster.Spec.Topology - if cluster.Spec.Topology.ControlPlane.Replicas != nil { - replicas, err := contract.ControlPlane().Replicas().Get(cp) - if err != nil { - log.Printf("We failed on getting control plane replicas %v", cp) - return false - } - // Check for Control Plane replicase if it's set in the Cluster.Spec.Topology - if int32(*replicas) != *cluster.Spec.Topology.ControlPlane.Replicas { - log.Printf("We failed on comparing control plane replicas %v", cp) - return false - } - } - log.Printf("ControlPlane passed with %v", cp) - return true - }, timeout).Should(BeTrue()) + // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. + g.Eventually(func() bool { + return assertControlPlaneReconcile(actualCluster, clusterClass) + }, timeout).Should(BeTrue()) + g.Eventually(func() bool { + return assertMachineDeploymentsReconcile(actualCluster) + }, timeout).Should(BeTrue()) + }) +} - // If the clusterClass defines an underlying InfrastructureTemplate for the control plane check that it exists and has the correct GVK. - // Check if the correct InfrastructureMachine object has been created. - if clusterClass.Spec.ControlPlane.MachineInfrastructure != nil && clusterClass.Spec.ControlPlane.MachineInfrastructure.Ref != nil { - g.Eventually(func() bool { - cpInfra, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(cp) - if err != nil { - log.Printf("We failed on getting control plane infrastructure %v, %v", err, cp) - } - if ok := referenceExistsWithCorrectKindAndAPIVersion(cpInfra, - builder.GenericInfrastructureMachineTemplateKind, - builder.InfrastructureGroupVersion); !ok { - log.Printf("Kind or version wrong from InfrastructureMachineTemplate %v", cp) - return false - } - got, ok := getAndEnsureLabelsAndAnnotations(*cpInfra, clusterName) - if ok { - log.Printf("ControlPlaneInfrastructure passed with %v", got) - } - return ok - }, timeout).Should(BeTrue()) - } - g.Eventually(func() bool { - // List all created machine deployments to ensure the expected numbers are created. - machineDeployments := &clusterv1.MachineDeploymentList{} - err := env.List(ctx, machineDeployments) - if err != nil { - log.Printf("machine deployments not lists %v", err) - return false - } - clusterMDs := make([]clusterv1.MachineDeployment, 0) - // Run through all machine deployments and add only those with the TopologyOwnedLable and the correct - // ClusterLabelName to the items for further testing. - for _, md := range machineDeployments.Items { - // If the machineDeployment doesn't have the ClusterTopologyOwnedLabel and the ClusterLabelName ignore. - if !(ensureClusterTopologyOwnedLabel(&md) && - ensureClusterLabelName(&md, clusterName)) { - continue - } - clusterMDs = append(clusterMDs, md) - } - // If the total number of machine deployments is not as expected return false. - if len(clusterMDs) != len(cluster.Spec.Topology.Workers.MachineDeployments) { - return false - } - for _, md := range clusterMDs { - for _, topologyMD := range cluster.Spec.Topology.Workers.MachineDeployments { - // use the ClusterTopologyMachineDeploymentLabel to get the specific machineDeployment to compare to. - if topologyMD.Name != md.GetLabels()[clusterv1.ClusterTopologyMachineDeploymentLabelName] { - continue - } - // Assert that the correct Finalizer has been added to the MachineDeployment. - for _, f := range md.Finalizers { - // Break as soon as we find a matching finalizer. - if f == clusterv1.MachineDeploymentTopologyFinalizer { - break - } - // False if the finalizer is not present on the MachineDeployment. - return false - } - if !(ensureClusterTopologyOwnedLabel(&md) && ensureClusterLabelName(&md, clusterName)) { - log.Printf("machine deployment does not have the correct labels.") - return false - } - // Check replicas for the MachineDeployment. - if *md.Spec.Replicas != *topologyMD.Replicas { - log.Printf("We failed on deployment replicas%v %v", md.Spec.Replicas, topologyMD.Replicas) - return false - } - if *md.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { - log.Printf("We failed on deployment version%v", md) - return false - } +// assertClusterReconcile checks if the Cluster object: +// 1) Has its InfrastructureReference and ControlPlane reference added correctly. +// 2) InfrastructureReference and ControlPlaneRef have the expected Group, Version and Kind. +func assertClusterReconcile(namespace, clusterName string, cluster *clusterv1.Cluster) bool { + key := client.ObjectKey{Name: clusterName, Namespace: namespace} + if err := env.Get(ctx, key, cluster); err != nil { + return false + } + // Check if relevant managed topology labels are present. + if ok := ensureClusterTopologyOwnedLabel(cluster); !ok { + return false + } + // Check if InfrastructureRef exists and is of the expected Kind and APIVersion. + if ok := referenceExistsWithCorrectKindAndAPIVersion(cluster.Spec.InfrastructureRef, + builder.GenericInfrastructureClusterKind, + builder.InfrastructureGroupVersion); !ok { + log.Infof("We failed on infraref %v", cluster) + return false + } + + // Check if ControlPlaneRef exists is of the expected Kind and APIVersion. + if ok := referenceExistsWithCorrectKindAndAPIVersion(cluster.Spec.ControlPlaneRef, + builder.GenericControlPlaneKind, + builder.ControlPlaneGroupVersion); !ok { + log.Infof("We failed on controlPlane ref %v", cluster) + return false + } + log.Infof("Cluster passed with %v", cluster) + return true +} + +// assertInfrastructureClusterReconcile checks if the infrastructureCluster object: +// 1) Is created correctly. +// 2) Has the correct labels and annotations. +func assertInfrastructureClusterReconcile(cluster *clusterv1.Cluster) bool { + _, ok := getAndEnsureLabelsAndAnnotations(*cluster.Spec.InfrastructureRef, cluster.Name) + return ok +} - log.Print(md.Annotations) - log.Print(md.Labels) - // test here to ensure that labels an annotations are being properly propagated. - if ok := ensureMachineDeploymentLabelsAndAnnotations(md, clusterClass.Spec.Workers.MachineDeployments, topologyMD); !ok { - log.Printf("Labels and annotations do not match for %v", md) - return false - } +// assertControlPlaneReconcile checks if the ControlPlane object: +// 1) Is created correctly. +// 2) Has the correct labels and annotations. +// 3) If it requires ControlPlane Infrastructure and if so: +// i) That the infrastructureMachineTemplate is created correctly. +// ii) That the infrastructureMachineTemplate has the correct labels and annotations +func assertControlPlaneReconcile(cluster *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) bool { + cp, ok := getAndEnsureLabelsAndAnnotations(*cluster.Spec.ControlPlaneRef, cluster.Name) + if !ok { + return false + } + // Check if the ControlPlane Version matches the version in the Cluster's managed topology spec. + version, err := contract.ControlPlane().Version().Get(cp) + if err != nil { + log.Infof("We failed on getting control plane version %v %v", err, cp) + return false + } + if *version != cluster.Spec.Topology.Version { + log.Infof("We failed on comparing control plane version %v", cp) + return false + } + // Check for Control Plane replicase if it's set in the Cluster.Spec.Topology + if cluster.Spec.Topology.ControlPlane.Replicas != nil { + replicas, err := contract.ControlPlane().Replicas().Get(cp) + if err != nil { + log.Infof("We failed on getting control plane replicas %v", cp) + return false + } + // Check for Control Plane replicase if it's set in the Cluster.Spec.Topology + if int32(*replicas) != *cluster.Spec.Topology.ControlPlane.Replicas { + log.Infof("We failed on comparing control plane replicas %v", cp) + return false + } + } + log.Infof("ControlPlane passed with %v", cp) + if clusterClass.Spec.ControlPlane.MachineInfrastructure != nil && clusterClass.Spec.ControlPlane.MachineInfrastructure.Ref != nil { + cpInfra, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(cp) + if err != nil { + log.Infof("We failed on getting control plane infrastructure %v, %v", err, cp) + return false + } + if ok := referenceExistsWithCorrectKindAndAPIVersion(cpInfra, + builder.GenericInfrastructureMachineTemplateKind, + builder.InfrastructureGroupVersion); !ok { + log.Infof("Kind or version wrong from InfrastructureMachineTemplate %v", cp) + return false + } + if _, ok := getAndEnsureLabelsAndAnnotations(*cpInfra, cluster.Name); !ok { + return false + } + log.Infof("ControlPlaneInfrastructure passed with %v", cpInfra) + } + return true +} - // Check if the InfrastructureReference exists. - if ok := referenceExistsWithCorrectKindAndAPIVersion(&md.Spec.Template.Spec.InfrastructureRef, - builder.GenericInfrastructureMachineTemplateKind, - builder.InfrastructureGroupVersion); !ok { - log.Printf("We failed on infrastructure version and kind %v", md) - return false - } - // Check if the InfrastructureReference has the expected labels and annotations. - if _, ok := getAndEnsureLabelsAndAnnotations(md.Spec.Template.Spec.InfrastructureRef, clusterName); !ok { - log.Printf("We failed on infrastructure labels and annotations. %v", md) - return false - } - // Check if the Bootstrap reference has the expected Kind and APIVersion. - if ok := referenceExistsWithCorrectKindAndAPIVersion(md.Spec.Template.Spec.Bootstrap.ConfigRef, - builder.GenericBootstrapConfigTemplateKind, - builder.BootstrapGroupVersion); !ok { - log.Printf("We failed on bootstrap version and kind %v", md) - return false - } - // Check if the Bootstrap reference has the expected labels and annotations. - if _, ok := getAndEnsureLabelsAndAnnotations(*md.Spec.Template.Spec.Bootstrap.ConfigRef, clusterName); !ok { - log.Printf("We failed on bootstrap labels and annotations %v", md) - return false - } - } +// assertMachineDeploymentsReconcile checks if the MachineDeployments: +// 1) Are created in the correct number. +// 2) Have the correct labels (TopologyOwned, ClusterName, MachineDeploymentName). +// 3) Have the correct finalizer applied. +// 4) Have the correct replicas and version. +// 6) Have the correct Kind/APIVersion and Labels/Annotations for BoostrapRef and InfrastructureRef templates. +func assertMachineDeploymentsReconcile(cluster *clusterv1.Cluster) bool { + // List all created machine deployments to ensure the expected numbers are c*reated. + machineDeployments := &clusterv1.MachineDeploymentList{} + if err := env.List(ctx, machineDeployments); err != nil { + log.Infof("machine deployments not listed %v", err) + return false + } + clusterMDs := make([]clusterv1.MachineDeployment, 0) + // Run through all machine deployments and add only those with the TopologyOwnedLable and the correct + // ClusterLabelName to the items for further testing. + for _, m := range machineDeployments.Items { + // If the machineDeployment doesn't have the ClusterTopologyOwnedLabel and the ClusterLabelName ignore. + md := m.DeepCopy() + if !(ensureClusterTopologyOwnedLabel(md) && + ensureClusterLabelName(md, cluster.Name)) { + continue + } + clusterMDs = append(clusterMDs, *md) + } + // If the total number of machine deployments is not as expected return false. + if len(clusterMDs) != len(cluster.Spec.Topology.Workers.MachineDeployments) { + return false + } + for _, m := range clusterMDs { + for _, topologyMD := range cluster.Spec.Topology.Workers.MachineDeployments { + md := m.DeepCopy() + // use the ClusterTopologyMachineDeploymentLabel to get the specific machineDeployment to compare to. + if topologyMD.Name != md.GetLabels()[clusterv1.ClusterTopologyMachineDeploymentLabelName] { + continue + } + // Assert that the correct Finalizer has been added to the MachineDeployment. + for _, f := range md.Finalizers { + // Break as soon as we find a matching finalizer. + if f == clusterv1.MachineDeploymentTopologyFinalizer { + break } - return true - }, timeout).Should(BeTrue()) - }) + // False if the finalizer is not present on the MachineDeployment. + return false + } + if !(ensureClusterTopologyOwnedLabel(md) && ensureClusterLabelName(md, cluster.Name)) { + log.Infof("machine deployment does not have the correct labels.") + return false + } + // Check replicas for the MachineDeployment. + if *md.Spec.Replicas != *topologyMD.Replicas { + log.Infof("We failed on deployment replicas%v %v", md.Spec.Replicas, topologyMD.Replicas) + return false + } + if *md.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { + log.Infof("We failed on deployment version%v", md) + return false + } + // Check if the InfrastructureReference exists. + if ok := referenceExistsWithCorrectKindAndAPIVersion(&md.Spec.Template.Spec.InfrastructureRef, + builder.GenericInfrastructureMachineTemplateKind, + builder.InfrastructureGroupVersion); !ok { + log.Infof("We failed on infrastructure version and kind %v", md) + return false + } + // Check if the InfrastructureReference has the expected labels and annotations. + if _, ok := getAndEnsureLabelsAndAnnotations(md.Spec.Template.Spec.InfrastructureRef, cluster.Name); !ok { + log.Infof("We failed on infrastructure labels and annotations. %v", md) + return false + } + // Check if the Bootstrap reference has the expected Kind and APIVersion. + if ok := referenceExistsWithCorrectKindAndAPIVersion(md.Spec.Template.Spec.Bootstrap.ConfigRef, + builder.GenericBootstrapConfigTemplateKind, + builder.BootstrapGroupVersion); !ok { + log.Infof("We failed on bootstrap version and kind %v", md) + return false + } + // Check if the Bootstrap reference has the expected labels and annotations. + if _, ok := getAndEnsureLabelsAndAnnotations(*md.Spec.Template.Spec.Bootstrap.ConfigRef, cluster.Name); !ok { + log.Infof("We failed on bootstrap labels and annotations %v", md) + return false + } + } + log.Infof("MachineDeployments passed with %v", m) } + return true } // getAndEnsureLabelsAndAnnotations pulls the template referenced in the ObjectReference from the API server, checks for: -// 1) The ClusterTopologyOwnedLabel -// 2) The correct ClusterLabelName -// 3) The annotation stating where the template was cloned from +// 1) The ClusterTopologyOwnedLabel. +// 2) The correct ClusterLabelName. +// 3) The annotation stating where the template was cloned from. // The function returns the unstructured object and a bool indicating if it passed all tests. -func getAndEnsureLabelsAndAnnotations(template v1.ObjectReference, clusterName string) (*unstructured.Unstructured, bool) { +func getAndEnsureLabelsAndAnnotations(template corev1.ObjectReference, clusterName string) (*unstructured.Unstructured, bool) { key := client.ObjectKey{Name: template.Name, Namespace: template.Namespace} got := &unstructured.Unstructured{} got.SetKind(template.Kind) got.SetAPIVersion(template.APIVersion) if err := env.Get(ctx, key, got); err != nil { - log.Printf("We failed on get %v %v", key, got) - log.Printf(err.Error()) + log.Infof("%v failed on get %v %v", err, key, got) return nil, false } if !ensureLabelsAndAnnotations(got, clusterName) { @@ -337,46 +353,9 @@ func ensureTemplateClonedFromNameAnnotation(got client.Object) bool { return ok } -func referenceExistsWithCorrectKindAndAPIVersion(reference *v1.ObjectReference, kind string, apiVersion schema.GroupVersion) bool { +// referenceExistsWithCorrectKindAndAPIVersion asserts that the passed ObjectReference is not nil and that it has the correct kind and apiVersion. +func referenceExistsWithCorrectKindAndAPIVersion(reference *corev1.ObjectReference, kind string, apiVersion schema.GroupVersion) bool { return reference != nil && reference.Kind == kind && reference.APIVersion == apiVersion.String() } - -func ensureMachineDeploymentLabelsAndAnnotations(got clusterv1.MachineDeployment, workerClasses []clusterv1.MachineDeploymentClass, workerTopology clusterv1.MachineDeploymentTopology) bool { - var workerClass clusterv1.MachineDeploymentClass - // Get the correct MachineDeploymentClass from the passed list - for _, md := range workerClasses { - if md.Class == workerTopology.Class { - workerClass = md - break - } - } - // Check labels and annotations are passed correctly from the clusterclass.spec.workers.machineDeploymentClass to the MachineDeployment. - for l, v := range workerClass.Template.Metadata.Labels { - if got.Labels[l] != v { - log.Printf("label %v not equal. Workerclass has %v. MachineDeployment has %v.", l, v, got.Labels[l]) - return false - } - } - for l, v := range workerClass.Template.Metadata.Annotations { - if got.Labels[l] != v { - log.Printf("annotation %v not equal. Workerclass has %v. MachineDeployment has %v.", l, v, got.Labels[l]) - return false - } - } - // Check labels and annotations are passed correctly from the cluster.spec.topology.workers.machineDeploymentClass to the MachineDeployment. - for l, v := range workerTopology.Metadata.Labels { - if got.Labels[l] != v { - log.Printf("label %v not equal. Workertopology has %v. MachineDeployment has %v.", l, v, got.Labels[l]) - return false - } - } - for l, v := range workerTopology.Metadata.Annotations { - if got.Labels[l] != v { - log.Printf("annotation %v not equal. Workertopology has %v. MachineDeployment has %v.", l, v, got.Labels[l]) - return false - } - } - return true -} diff --git a/controllers/topology/suite_test.go b/controllers/topology/suite_test.go index df32d6c92a76..ec178936cb11 100644 --- a/controllers/topology/suite_test.go +++ b/controllers/topology/suite_test.go @@ -23,6 +23,8 @@ import ( "testing" "time" + tlog "sigs.k8s.io/cluster-api/controllers/topology/internal/log" + . "github.com/onsi/gomega" "sigs.k8s.io/controller-runtime/pkg/client" @@ -42,6 +44,7 @@ var ( ctx = ctrl.SetupSignalHandler() fakeScheme = runtime.NewScheme() env *envtest.Environment + log tlog.Logger ) func init() { @@ -55,6 +58,7 @@ func TestMain(m *testing.M) { panic(fmt.Sprintf("unable to setup index: %v", err)) } } + log = tlog.LoggerFrom(ctx) setupReconcilers := func(ctx context.Context, mgr ctrl.Manager) { unstructuredCachingClient, err := client.NewDelegatingClient( client.NewDelegatingClientInput{ @@ -76,12 +80,9 @@ func TestMain(m *testing.M) { }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 5}); err != nil { os.Exit(1) } - } - SetDefaultEventuallyPollingInterval(100 * time.Millisecond) SetDefaultEventuallyTimeout(timeout) - os.Exit(envtest.Run(ctx, envtest.RunInput{ M: m, ManagerUncachedObjs: []client.Object{}, diff --git a/internal/builder/builders.go b/internal/builder/builders.go index 18dcd47c8cd5..d097c0d34625 100644 --- a/internal/builder/builders.go +++ b/internal/builder/builders.go @@ -63,6 +63,7 @@ func (c *ClusterBuilder) WithClusterClass(clusterClass string) *ClusterBuilder { return c } +// WithTopology adds the passed Topology object to the ClusterBuilder. func (c *ClusterBuilder) WithTopology(topology *clusterv1.Topology) *ClusterBuilder { c.topology = topology return c @@ -95,6 +96,7 @@ func (c *ClusterBuilder) Build() *clusterv1.Cluster { return obj } +// ClusterTopologyBuilder contains the fields needed to build a testable ClusterTopology. type ClusterTopologyBuilder struct { class string workers *clusterv1.WorkersTopology @@ -102,6 +104,7 @@ type ClusterTopologyBuilder struct { controlPlaneReplicas int32 } +// ClusterTopology returns a ClusterTopologyBuilder with the passed ClusterClass name. func ClusterTopology(clusterClass string) *ClusterTopologyBuilder { return &ClusterTopologyBuilder{ class: clusterClass, @@ -111,21 +114,25 @@ func ClusterTopology(clusterClass string) *ClusterTopologyBuilder { } } +// WithVersion adds the passed version to the ClusterTopologyBuilder. func (c *ClusterTopologyBuilder) WithVersion(version string) *ClusterTopologyBuilder { c.version = version return c } +// WithControlPlaneReplicas adds the passed replicas value to the ClusterTopologyBuilder. func (c *ClusterTopologyBuilder) WithControlPlaneReplicas(replicas int32) *ClusterTopologyBuilder { c.controlPlaneReplicas = replicas return c } +// WithWorkerTopology passes the full MachineDeploymentTopology and adds it to an existing list in the ClusterTopologyBuilder. func (c *ClusterTopologyBuilder) WithWorkerTopology(mdc clusterv1.MachineDeploymentTopology) *ClusterTopologyBuilder { c.workers.MachineDeployments = append(c.workers.MachineDeployments, mdc) return c } +// Build returns a testable cluster Topology object with any values passed to the builder. func (c *ClusterTopologyBuilder) Build() *clusterv1.Topology { return &clusterv1.Topology{ Class: c.class, @@ -137,49 +144,33 @@ func (c *ClusterTopologyBuilder) Build() *clusterv1.Topology { } } -type WorkerTopologyBuilder struct { - machineDeploymentTopologies []clusterv1.MachineDeploymentTopology -} - -func WorkerTopology() *clusterv1.WorkersTopology { - return &clusterv1.WorkersTopology{ - MachineDeployments: []clusterv1.MachineDeploymentTopology{}, - } -} - -func (w *WorkerTopologyBuilder) WithMachineDeploymentTopology(mdt clusterv1.MachineDeploymentTopology) *WorkerTopologyBuilder { - w.machineDeploymentTopologies = append(w.machineDeploymentTopologies, mdt) - return w -} - -func (w *WorkerTopologyBuilder) Build() *clusterv1.WorkersTopology { - return &clusterv1.WorkersTopology{ - MachineDeployments: w.machineDeploymentTopologies, - } -} - +// MachineDeploymentTopologyBuilder holds the values needed to create a testable MachineDeploymentTopology. type MachineDeploymentTopologyBuilder struct { class string name string replicas *int32 } +// MachineDeploymentTopology returns a builder used to create a testable MachineDeploymentTopology. func MachineDeploymentTopology(class string) *MachineDeploymentTopologyBuilder { return &MachineDeploymentTopologyBuilder{ class: class, } } +// WithName adds a name string used as the MachineDeploymentTopology name. func (m *MachineDeploymentTopologyBuilder) WithName(name string) *MachineDeploymentTopologyBuilder { m.name = name return m } +// WithReplicas adds a replicas value used as the MachineDeploymentTopology replicas value. func (m *MachineDeploymentTopologyBuilder) WithReplicas(replicas int32) *MachineDeploymentTopologyBuilder { m.replicas = &replicas return m } +// Build returns a testable MachineDeploymentTopology with any values passed to the builder. func (m *MachineDeploymentTopologyBuilder) Build() clusterv1.MachineDeploymentTopology { return clusterv1.MachineDeploymentTopology{ Class: m.class,