diff --git a/controlplane/kubeadm/controllers/controller.go b/controlplane/kubeadm/controllers/controller.go index 3a27a7c77da3..8670644649a9 100644 --- a/controlplane/kubeadm/controllers/controller.go +++ b/controlplane/kubeadm/controllers/controller.go @@ -19,6 +19,8 @@ package controllers import ( "context" "fmt" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api/feature" "time" "sigs.k8s.io/cluster-api/util/collections" @@ -423,8 +425,16 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu // all the machines are deleted in parallel. conditions.SetAggregate(kcp, controlplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef(), conditions.WithStepCounterIf(false)) + allMachinePools := &expv1.MachinePoolList{} + // Get all machine pools. + if feature.Gates.Enabled(feature.MachinePool) { + allMachinePools, err = r.managementCluster.GetMachinePoolsForCluster(ctx, cluster) + if err != nil { + return ctrl.Result{}, err + } + } // Verify that only control plane machines remain - if len(allMachines) != len(ownedMachines) { + if len(allMachines) != len(ownedMachines) || len(allMachinePools.Items) != 0 { log.Info("Waiting for worker nodes to be deleted first") conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "Waiting for worker nodes to be deleted first") return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil diff --git a/controlplane/kubeadm/controllers/controller_test.go b/controlplane/kubeadm/controllers/controller_test.go index 1558c2a6a0e3..8f41e4427963 100644 --- a/controlplane/kubeadm/controllers/controller_test.go +++ b/controlplane/kubeadm/controllers/controller_test.go @@ -19,6 +19,8 @@ package controllers import ( "context" "fmt" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" + "sigs.k8s.io/cluster-api/feature" "sync" "testing" "time" @@ -1232,6 +1234,55 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { g.Expect(controlPlaneMachines.Items).To(HaveLen(3)) }) + t.Run("does not remove any control plane Machines if MachinePools exist", func(t *testing.T) { + _ = feature.MutableGates.Set("MachinePool=true") + g := NewWithT(t) + + cluster, kcp, _ := createClusterWithControlPlane() + controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer) + + workerMachinePool := &expv1.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "worker", + Namespace: cluster.Namespace, + Labels: map[string]string{ + clusterv1.ClusterLabelName: cluster.Name, + }, + }, + } + + initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), workerMachinePool.DeepCopy()} + + for i := 0; i < 3; i++ { + m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) + initObjs = append(initObjs, m) + } + + fakeClient := newFakeClient(g, initObjs...) + + r := &KubeadmControlPlaneReconciler{ + Client: fakeClient, + managementCluster: &fakeManagementCluster{ + Management: &internal.Management{Client: fakeClient}, + Workload: fakeWorkloadCluster{}, + }, + recorder: record.NewFakeRecorder(32), + } + + result, err := r.reconcileDelete(ctx, cluster, kcp) + g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: deleteRequeueAfter})) + g.Expect(err).To(BeNil()) + + g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer)) + + controlPlaneMachines := clusterv1.MachineList{} + labels := map[string]string{ + clusterv1.MachineControlPlaneLabelName: "", + } + g.Expect(fakeClient.List(ctx, &controlPlaneMachines, client.MatchingLabels(labels))).To(Succeed()) + g.Expect(controlPlaneMachines.Items).To(HaveLen(3)) + }) + t.Run("removes the finalizer if no control plane Machines exist", func(t *testing.T) { g := NewWithT(t) diff --git a/controlplane/kubeadm/controllers/fakes_test.go b/controlplane/kubeadm/controllers/fakes_test.go index a42f3fce2ee1..980ed366f474 100644 --- a/controlplane/kubeadm/controllers/fakes_test.go +++ b/controlplane/kubeadm/controllers/fakes_test.go @@ -21,16 +21,18 @@ import ( "github.com/blang/semver" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "sigs.k8s.io/cluster-api/util/collections" "sigs.k8s.io/controller-runtime/pkg/client" ) type fakeManagementCluster struct { // TODO: once all client interactions are moved to the Management cluster this can go away - Management *internal.Management - Machines collections.Machines - Workload fakeWorkloadCluster - Reader client.Reader + Management *internal.Management + Machines collections.Machines + MachinePools *expv1.MachinePoolList + Workload fakeWorkloadCluster + Reader client.Reader } func (f *fakeManagementCluster) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { @@ -52,6 +54,13 @@ func (f *fakeManagementCluster) GetMachinesForCluster(c context.Context, cluster return f.Machines, nil } +func (f *fakeManagementCluster) GetMachinePoolsForCluster(c context.Context, cluster *clusterv1.Cluster) (*expv1.MachinePoolList, error) { + if f.Management != nil { + return f.Management.GetMachinePoolsForCluster(c, cluster) + } + return f.MachinePools, nil +} + type fakeWorkloadCluster struct { *internal.Workload Status internal.ClusterStatus diff --git a/controlplane/kubeadm/internal/cluster.go b/controlplane/kubeadm/internal/cluster.go index a99f1dd9d023..419bea8f9ceb 100644 --- a/controlplane/kubeadm/internal/cluster.go +++ b/controlplane/kubeadm/internal/cluster.go @@ -21,6 +21,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "time" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" @@ -44,6 +45,7 @@ type ManagementCluster interface { ctrlclient.Reader GetMachinesForCluster(ctx context.Context, cluster *clusterv1.Cluster, filters ...collections.Func) (collections.Machines, error) + GetMachinePoolsForCluster(ctx context.Context, cluster *clusterv1.Cluster) (*expv1.MachinePoolList, error) GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) } @@ -78,6 +80,19 @@ func (m *Management) GetMachinesForCluster(ctx context.Context, cluster *cluster return collections.GetFilteredMachinesForCluster(ctx, m.Client, cluster, filters...) } +// GetMachinePoolsForCluster returns a list of machine pools owned by the cluster. +func (m *Management) GetMachinePoolsForCluster(ctx context.Context, cluster *clusterv1.Cluster) (*expv1.MachinePoolList, error) { + selectors := []client.ListOption{ + client.InNamespace(cluster.GetNamespace()), + client.MatchingLabels{ + clusterv1.ClusterLabelName: cluster.GetName(), + }, + } + machinePoolList := &expv1.MachinePoolList{} + err := m.Client.List(ctx, machinePoolList, selectors...) + return machinePoolList, err +} + // GetWorkloadCluster builds a cluster object. // The cluster comes with an etcd client generator to connect to any etcd pod living on a managed machine. func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) {