From 40ea63e0b1a7f920965b021223b3b8c463ffac67 Mon Sep 17 00:00:00 2001 From: Chuck Ha Date: Fri, 28 Feb 2020 18:28:40 -0500 Subject: [PATCH] Wire up kubeadm control plane Ready status Signed-off-by: Chuck Ha --- .../kubeadm_control_plane_controller.go | 75 +++---- .../kubeadm_control_plane_controller_test.go | 186 ++++++++---------- .../kubeadm/internal/workload_cluster.go | 41 ++++ test/framework/control_plane.go | 36 +++- test/infrastructure/docker/e2e/docker_test.go | 32 +-- 5 files changed, 197 insertions(+), 173 deletions(-) diff --git a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go index 79a95501bc45..fd75ea1ab6cb 100644 --- a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go +++ b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go @@ -44,7 +44,6 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" - "sigs.k8s.io/cluster-api/controllers/remote" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/hash" @@ -93,8 +92,6 @@ type KubeadmControlPlaneReconciler struct { controller controller.Controller recorder record.EventRecorder - remoteClientGetter remote.ClusterClientGetter - managementCluster managementCluster } @@ -116,9 +113,9 @@ func (r *KubeadmControlPlaneReconciler) SetupWithManager(mgr ctrl.Manager, optio r.scheme = mgr.GetScheme() r.controller = c r.recorder = mgr.GetEventRecorderFor("kubeadm-control-plane-controller") - r.managementCluster = &internal.ManagementCluster{Client: r.Client} - if r.remoteClientGetter == nil { - r.remoteClientGetter = remote.NewClusterClient + + if r.managementCluster == nil { + r.managementCluster = &internal.ManagementCluster{Client: r.Client} } return nil @@ -305,36 +302,32 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c kcp.Status.UpdatedReplicas = int32(len(currentMachines)) replicas := int32(len(ownedMachines)) + + // set basic data that does not require interacting with the workload cluster kcp.Status.Replicas = replicas - readyMachines := int32(0) + kcp.Status.ReadyReplicas = 0 + kcp.Status.UnavailableReplicas = replicas - remoteClient, err := r.remoteClientGetter(ctx, r.Client, util.ObjectKey(cluster), r.scheme) + workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster)) if err != nil { - if cause := errors.Cause(err); !apierrors.IsNotFound(cause) && !apierrors.IsTimeout(cause) { - return errors.Wrap(err, "failed to create remote cluster client") - } - } else { - for i := range ownedMachines { - node, err := getMachineNode(ctx, remoteClient, ownedMachines[i]) - if err != nil { - return errors.Wrap(err, "failed to get referenced Node") - } - if node == nil { - continue - } - if node.Spec.ProviderID != "" { - readyMachines++ - } - } + return errors.Wrap(err, "failed to create remote cluster client") + } + status, err := workloadCluster.ClusterStatus(ctx) + if err != nil { + return err } - kcp.Status.ReadyReplicas = readyMachines - kcp.Status.UnavailableReplicas = replicas - readyMachines + kcp.Status.ReadyReplicas = status.ReadyNodes + kcp.Status.UnavailableReplicas = replicas - status.ReadyNodes - if !kcp.Status.Initialized { - if kcp.Status.ReadyReplicas > 0 { - kcp.Status.Initialized = true - } + // This only gets initialized once and does not change if the kubeadm config map goes away. + if status.HasKubeadmConfig { + kcp.Status.Initialized = true + } + + if kcp.Status.ReadyReplicas > 0 { + kcp.Status.Ready = true } + return nil } @@ -852,25 +845,3 @@ func (r *KubeadmControlPlaneReconciler) ClusterToKubeadmControlPlane(o handler.M return nil } - -func getMachineNode(ctx context.Context, crClient client.Client, machine *clusterv1.Machine) (*corev1.Node, error) { - nodeRef := machine.Status.NodeRef - if nodeRef == nil { - return nil, nil - } - - node := &corev1.Node{} - err := crClient.Get( - ctx, - client.ObjectKey{Name: nodeRef.Name}, - node, - ) - if err != nil { - if apierrors.IsNotFound(errors.Cause(err)) { - return nil, nil - } - return nil, err - } - - return node, nil -} diff --git a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go index 9da2f4c6f6a8..b7ff759407aa 100644 --- a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go +++ b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go @@ -42,7 +42,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha3" kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" - fakeremote "sigs.k8s.io/cluster-api/controllers/remote/fake" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/hash" @@ -439,11 +438,13 @@ func TestReconcileClusterNoEndpoints(t *testing.T) { log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - Log: log.Log, - remoteClientGetter: fakeremote.NewClusterClient, - recorder: record.NewFakeRecorder(32), - managementCluster: &internal.ManagementCluster{Client: fakeClient}, + Client: fakeClient, + Log: log.Log, + recorder: record.NewFakeRecorder(32), + managementCluster: &fakeManagementCluster{ + ManagementCluster: &internal.ManagementCluster{Client: fakeClient}, + Cluster: &internal.Cluster{Client: fakeClient}, + }, } result, err := r.Reconcile(ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) @@ -532,14 +533,15 @@ func TestReconcileInitializeControlPlane(t *testing.T) { log.SetLogger(klogr.New()) expectedLabels := map[string]string{clusterv1.ClusterLabelName: "foo"} - r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - Log: log.Log, - remoteClientGetter: fakeremote.NewClusterClient, - scheme: scheme.Scheme, - recorder: record.NewFakeRecorder(32), - managementCluster: &internal.ManagementCluster{Client: fakeClient}, + Client: fakeClient, + Log: log.Log, + scheme: scheme.Scheme, + recorder: record.NewFakeRecorder(32), + managementCluster: &fakeManagementCluster{ + ManagementCluster: &internal.ManagementCluster{Client: fakeClient}, + Cluster: &internal.Cluster{Client: fakeClient}, + }, } result, err := r.Reconcile(ctrl.Request{NamespacedName: util.ObjectKey(kcp)}) @@ -692,60 +694,6 @@ func TestKubeadmControlPlaneReconciler_generateKubeadmConfig(t *testing.T) { g.Expect(bootstrapConfig.Spec).To(Equal(spec)) } -func Test_getMachineNodeNoNodeRef(t *testing.T) { - g := NewWithT(t) - - fakeClient := newFakeClient(g) - - m := &clusterv1.Machine{} - node, err := getMachineNode(context.Background(), fakeClient, m) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(node).To(BeNil()) -} - -func Test_getMachineNodeNotFound(t *testing.T) { - g := NewWithT(t) - - fakeClient := newFakeClient(g) - - m := &clusterv1.Machine{ - Status: clusterv1.MachineStatus{ - NodeRef: &corev1.ObjectReference{ - Kind: "Node", - APIVersion: corev1.SchemeGroupVersion.String(), - Name: "notfound", - }, - }, - } - node, err := getMachineNode(context.Background(), fakeClient, m) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(node).To(BeNil()) -} - -func Test_getMachineNodeFound(t *testing.T) { - g := NewWithT(t) - - node := &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testNode", - }, - } - fakeClient := newFakeClient(g, node.DeepCopy()) - - m := &clusterv1.Machine{ - Status: clusterv1.MachineStatus{ - NodeRef: &corev1.ObjectReference{ - Kind: "Node", - APIVersion: corev1.SchemeGroupVersion.String(), - Name: "testNode", - }, - }, - } - node, err := getMachineNode(context.Background(), fakeClient, m) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(node).To(Equal(node)) -} - func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) { g := NewWithT(t) @@ -769,12 +717,14 @@ func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) { log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - Log: log.Log, - remoteClientGetter: fakeremote.NewClusterClient, - scheme: scheme.Scheme, - managementCluster: &internal.ManagementCluster{Client: fakeClient}, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + Log: log.Log, + scheme: scheme.Scheme, + managementCluster: &fakeManagementCluster{ + Machines: map[string]*clusterv1.Machine{}, + Cluster: &internal.Cluster{Client: fakeClient}, + }, + recorder: record.NewFakeRecorder(32), } g.Expect(r.updateStatus(context.Background(), kcp, cluster)).To(Succeed()) @@ -809,7 +759,8 @@ func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *control node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: name, + Labels: map[string]string{"node-role.kubernetes.io/master": ""}, }, } @@ -845,23 +796,27 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin kcp.Default() g.Expect(kcp.ValidateCreate()).To(Succeed()) + machines := map[string]*clusterv1.Machine{} objs := []runtime.Object{cluster.DeepCopy(), kcp.DeepCopy()} for i := 0; i < 3; i++ { name := fmt.Sprintf("test-%d", i) m, n := createMachineNodePair(name, cluster, kcp, false) - objs = append(objs, m, n) + objs = append(objs, n) + machines[m.Name] = m } fakeClient := newFakeClient(g, objs...) log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - Log: log.Log, - remoteClientGetter: fakeremote.NewClusterClient, - scheme: scheme.Scheme, - managementCluster: &internal.ManagementCluster{Client: fakeClient}, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + Log: log.Log, + scheme: scheme.Scheme, + managementCluster: &fakeManagementCluster{ + Machines: machines, + Cluster: &internal.Cluster{Client: fakeClient}, + }, + recorder: record.NewFakeRecorder(32), } g.Expect(r.updateStatus(context.Background(), kcp, cluster)).To(Succeed()) @@ -875,6 +830,15 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin g.Expect(kcp.Status.Ready).To(BeFalse()) } +func kubeadmConfigMap() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadm-config", + Namespace: metav1.NamespaceSystem, + }, + } +} + func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T) { g := NewWithT(t) @@ -894,23 +858,27 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T kcp.Default() g.Expect(kcp.ValidateCreate()).To(Succeed()) - objs := []runtime.Object{cluster.DeepCopy(), kcp.DeepCopy()} + objs := []runtime.Object{cluster.DeepCopy(), kcp.DeepCopy(), kubeadmConfigMap()} + machines := map[string]*clusterv1.Machine{} for i := 0; i < 3; i++ { name := fmt.Sprintf("test-%d", i) m, n := createMachineNodePair(name, cluster, kcp, true) - objs = append(objs, m, n) + objs = append(objs, n) + machines[m.Name] = m } fakeClient := newFakeClient(g, objs...) log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - Log: log.Log, - remoteClientGetter: fakeremote.NewClusterClient, - scheme: scheme.Scheme, - managementCluster: &internal.ManagementCluster{Client: fakeClient}, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + Log: log.Log, + scheme: scheme.Scheme, + managementCluster: &fakeManagementCluster{ + Machines: machines, + Cluster: &internal.Cluster{Client: fakeClient}, + }, + recorder: record.NewFakeRecorder(32), } g.Expect(r.updateStatus(context.Background(), kcp, cluster)).To(Succeed()) @@ -921,9 +889,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T g.Expect(kcp.Status.FailureMessage).To(BeNil()) g.Expect(kcp.Status.FailureReason).To(BeEquivalentTo("")) g.Expect(kcp.Status.Initialized).To(BeTrue()) - - // TODO: will need to be updated once we start handling Ready - g.Expect(kcp.Status.Ready).To(BeFalse()) + g.Expect(kcp.Status.Ready).To(BeTrue()) } func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing.T) { @@ -944,26 +910,29 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing } kcp.Default() g.Expect(kcp.ValidateCreate()).To(Succeed()) - + machines := map[string]*clusterv1.Machine{} objs := []runtime.Object{cluster.DeepCopy(), kcp.DeepCopy()} for i := 0; i < 4; i++ { name := fmt.Sprintf("test-%d", i) m, n := createMachineNodePair(name, cluster, kcp, false) - objs = append(objs, m, n) + machines[m.Name] = m + objs = append(objs, n) } m, n := createMachineNodePair("testReady", cluster, kcp, true) - objs = append(objs, m, n) - + objs = append(objs, n, kubeadmConfigMap()) + machines[m.Name] = m fakeClient := newFakeClient(g, objs...) log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ - Client: fakeClient, - Log: log.Log, - remoteClientGetter: fakeremote.NewClusterClient, - scheme: scheme.Scheme, - managementCluster: &internal.ManagementCluster{Client: fakeClient}, - recorder: record.NewFakeRecorder(32), + Client: fakeClient, + Log: log.Log, + scheme: scheme.Scheme, + managementCluster: &fakeManagementCluster{ + Machines: machines, + Cluster: &internal.Cluster{Client: fakeClient}, + }, + recorder: record.NewFakeRecorder(32), } g.Expect(r.updateStatus(context.Background(), kcp, cluster)).To(Succeed()) @@ -974,9 +943,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing g.Expect(kcp.Status.FailureMessage).To(BeNil()) g.Expect(kcp.Status.FailureReason).To(BeEquivalentTo("")) g.Expect(kcp.Status.Initialized).To(BeTrue()) - - // TODO: will need to be updated once we start handling Ready - g.Expect(kcp.Status.Ready).To(BeFalse()) + g.Expect(kcp.Status.Ready).To(BeTrue()) } func TestCloneConfigsAndGenerateMachine(t *testing.T) { @@ -1221,16 +1188,21 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { } type fakeManagementCluster struct { + *internal.ManagementCluster ControlPlaneHealthy bool EtcdHealthy bool Machines internal.FilterableMachineCollection + Cluster *internal.Cluster } func (f *fakeManagementCluster) GetWorkloadCluster(_ context.Context, _ client.ObjectKey) (*internal.Cluster, error) { - return nil, nil + return f.Cluster, nil } -func (f *fakeManagementCluster) GetMachinesForCluster(_ context.Context, _ client.ObjectKey, _ ...internal.MachineFilter) (internal.FilterableMachineCollection, error) { +func (f *fakeManagementCluster) GetMachinesForCluster(c context.Context, n client.ObjectKey, filters ...internal.MachineFilter) (internal.FilterableMachineCollection, error) { + if f.Machines == nil { + return f.ManagementCluster.GetMachinesForCluster(c, n, filters...) + } return f.Machines, nil } diff --git a/controlplane/kubeadm/internal/workload_cluster.go b/controlplane/kubeadm/internal/workload_cluster.go index 5df5b2c0f7c9..07d4a71f7450 100644 --- a/controlplane/kubeadm/internal/workload_cluster.go +++ b/controlplane/kubeadm/internal/workload_cluster.go @@ -36,6 +36,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" etcdutil "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/util" + "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -394,6 +395,46 @@ func (c *Cluster) ReconcileKubeletRBACRole(ctx context.Context, version semver.V return nil } +// ClusterStatus holds stats information about the cluster. +type ClusterStatus struct { + // Nodes are a total count of nodes + Nodes int32 + // ReadyNodes are the count of nodes that are reporting ready + ReadyNodes int32 + // HasKubeadmConfig will be true if the kubeadm config map has been uploaded, false otherwise. + HasKubeadmConfig bool +} + +// ClusterStatus returns the status of the cluster. +func (c *Cluster) ClusterStatus(ctx context.Context) (ClusterStatus, error) { + status := ClusterStatus{} + + // count the control plane nodes + nodes, err := c.getControlPlaneNodes(ctx) + if err != nil { + return status, err + } + + for _, node := range nodes.Items { + nodeCopy := node + status.Nodes++ + if util.IsNodeReady(&nodeCopy) { + status.ReadyNodes++ + } + } + + // find the kubeadm conifg + kubeadmConfigKey := ctrlclient.ObjectKey{ + Name: "kubeadm-config", + Namespace: metav1.NamespaceSystem, + } + err = c.Client.Get(ctx, kubeadmConfigKey, &corev1.ConfigMap{}) + // TODO: Consider if this should only return false if the error is IsNotFound. + // TODO: Consider adding a third state of 'unknown' when there is an error retrieving the config map. + status.HasKubeadmConfig = err == nil + return status, nil +} + func generateClientCert(caCertEncoded, caKeyEncoded []byte) (tls.Certificate, error) { privKey, err := certs.NewPrivateKey() if err != nil { diff --git a/test/framework/control_plane.go b/test/framework/control_plane.go index c9fcb11beec5..b2d7285024d7 100644 --- a/test/framework/control_plane.go +++ b/test/framework/control_plane.go @@ -244,6 +244,39 @@ func WaitForKubeadmControlPlaneMachinesToExist(ctx context.Context, input WaitFo }, intervals...).Should(Equal(int(*input.ControlPlane.Spec.Replicas))) } +// WaitForKubeadmControlPlaneMachinesToExistInput is the input for WaitForKubeadmControlPlaneMachinesToExist. +type WaitForOneKubeadmControlPlaneMachineToExistInput struct { + Lister Lister + Cluster *clusterv1.Cluster + ControlPlane *controlplanev1.KubeadmControlPlane +} + +// WaitForKubeadmControlPlaneMachineToExist will wait until all control plane machines have node refs. +func WaitForOneKubeadmControlPlaneMachineToExist(ctx context.Context, input WaitForOneKubeadmControlPlaneMachineToExistInput, intervals ...interface{}) { + By("waiting for one control plane node to exist") + inClustersNamespaceListOption := client.InNamespace(input.Cluster.Namespace) + // ControlPlane labels + matchClusterListOption := client.MatchingLabels{ + clusterv1.MachineControlPlaneLabelName: "", + clusterv1.ClusterLabelName: input.Cluster.Name, + } + + Eventually(func() (bool, error) { + machineList := &clusterv1.MachineList{} + if err := input.Lister.List(ctx, machineList, inClustersNamespaceListOption, matchClusterListOption); err != nil { + fmt.Println(err) + return false, err + } + count := 0 + for _, machine := range machineList.Items { + if machine.Status.NodeRef != nil { + count++ + } + } + return count > 0, nil + }, intervals...).Should(BeTrue()) +} + // WaitForControlPlaneToBeReadyInput is the input for WaitForControlPlaneToBeReady. type WaitForControlPlaneToBeReadyInput struct { Getter Getter @@ -264,8 +297,7 @@ func WaitForControlPlaneToBeReady(ctx context.Context, input WaitForControlPlane if err := input.Getter.Get(ctx, key, controlplane); err != nil { return false, err } - // TODO: Return status.Ready instead... - return controlplane.Status.Initialized, nil + return controlplane.Status.Ready, nil }, intervals...).Should(BeTrue()) } diff --git a/test/infrastructure/docker/e2e/docker_test.go b/test/infrastructure/docker/e2e/docker_test.go index e97ca6fcfdc3..ef281e870eee 100644 --- a/test/infrastructure/docker/e2e/docker_test.go +++ b/test/infrastructure/docker/e2e/docker_test.go @@ -129,22 +129,13 @@ var _ = Describe("Docker", func() { } framework.WaitForClusterToProvision(ctx, assertClusterProvisionsInput) - // Create the workload nodes - createMachineDeploymentinput := framework.CreateMachineDeploymentInput{ - Creator: client, - MachineDeployment: md, - BootstrapConfigTemplate: bootstrapTemplate, - InfraMachineTemplate: infraTemplate, - } - framework.CreateMachineDeployment(ctx, createMachineDeploymentinput) - - // Wait for the controlplane nodes to exist - assertKubeadmControlPlaneNodesExistInput := framework.WaitForKubeadmControlPlaneMachinesToExistInput{ + // Wait for at least one control plane node to be ready + waitForOneKubeadmControlPlaneMachineToExistInput := framework.WaitForOneKubeadmControlPlaneMachineToExistInput{ Lister: client, Cluster: cluster, ControlPlane: controlPlane, } - framework.WaitForKubeadmControlPlaneMachinesToExist(ctx, assertKubeadmControlPlaneNodesExistInput, "10m", "10s") + framework.WaitForOneKubeadmControlPlaneMachineToExist(ctx, waitForOneKubeadmControlPlaneMachineToExistInput) // Insatll a networking solution on the workload cluster workloadClient, err := mgmt.GetWorkloadClient(ctx, cluster.Namespace, cluster.Name) @@ -157,6 +148,23 @@ var _ = Describe("Docker", func() { } framework.ApplyYAMLURL(ctx, applyYAMLURLInput) + // Wait for the controlplane nodes to exist + assertKubeadmControlPlaneNodesExistInput := framework.WaitForKubeadmControlPlaneMachinesToExistInput{ + Lister: client, + Cluster: cluster, + ControlPlane: controlPlane, + } + framework.WaitForKubeadmControlPlaneMachinesToExist(ctx, assertKubeadmControlPlaneNodesExistInput, "10m", "10s") + + // Create the workload nodes + createMachineDeploymentinput := framework.CreateMachineDeploymentInput{ + Creator: client, + MachineDeployment: md, + BootstrapConfigTemplate: bootstrapTemplate, + InfraMachineTemplate: infraTemplate, + } + framework.CreateMachineDeployment(ctx, createMachineDeploymentinput) + // Wait for the workload nodes to exist waitForMachineDeploymentNodesToExistInput := framework.WaitForMachineDeploymentNodesToExistInput{ Lister: client,