Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🐛 Wait for MachinePools to be deleted before deleting KCP Machines #4646

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion controlplane/kubeadm/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ package controllers
import (
"context"
"fmt"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4"
"sigs.k8s.io/cluster-api/feature"
"time"

"sigs.k8s.io/cluster-api/util/collections"
Expand Down Expand Up @@ -423,8 +425,16 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu
// all the machines are deleted in parallel.
conditions.SetAggregate(kcp, controlplanev1.MachinesReadyCondition, ownedMachines.ConditionGetters(), conditions.AddSourceRef(), conditions.WithStepCounterIf(false))

allMachinePools := &expv1.MachinePoolList{}
// Get all machine pools.
if feature.Gates.Enabled(feature.MachinePool) {
allMachinePools, err = r.managementCluster.GetMachinePoolsForCluster(ctx, cluster)
if err != nil {
return ctrl.Result{}, err
}
}
// Verify that only control plane machines remain
if len(allMachines) != len(ownedMachines) {
if len(allMachines) != len(ownedMachines) || len(allMachinePools.Items) != 0 {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we sum all the dependents in a single integer? If later on we add more classes it might be easier to reason about

log.Info("Waiting for worker nodes to be deleted first")
conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "Waiting for worker nodes to be deleted first")
return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil
Expand Down
51 changes: 51 additions & 0 deletions controlplane/kubeadm/controllers/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ package controllers
import (
"context"
"fmt"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4"
"sigs.k8s.io/cluster-api/feature"
"sync"
"testing"
"time"
Expand Down Expand Up @@ -1232,6 +1234,55 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) {
g.Expect(controlPlaneMachines.Items).To(HaveLen(3))
})

t.Run("does not remove any control plane Machines if MachinePools exist", func(t *testing.T) {
_ = feature.MutableGates.Set("MachinePool=true")
g := NewWithT(t)

cluster, kcp, _ := createClusterWithControlPlane()
controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer)

workerMachinePool := &expv1.MachinePool{
ObjectMeta: metav1.ObjectMeta{
Name: "worker",
Namespace: cluster.Namespace,
Labels: map[string]string{
clusterv1.ClusterLabelName: cluster.Name,
},
},
}

initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), workerMachinePool.DeepCopy()}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: This deepCopy is fine though is a bit redundant. I see this is calling newFakeClient which calls NewFakeClientWithScheme which already deepCopy the objects

objsWithResourceVersion := initObjs[i].DeepCopyObject().(client.Object)

Also the fakeClient tracker itself deepCopy as well.

https://github.com/kubernetes/client-go/blob/master/testing/fixture.go#L373-L376


for i := 0; i < 3; i++ {
m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true)
initObjs = append(initObjs, m)
}

fakeClient := newFakeClient(g, initObjs...)

r := &KubeadmControlPlaneReconciler{
Client: fakeClient,
managementCluster: &fakeManagementCluster{
Management: &internal.Management{Client: fakeClient},
Workload: fakeWorkloadCluster{},
},
recorder: record.NewFakeRecorder(32),
}

result, err := r.reconcileDelete(ctx, cluster, kcp)
g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: deleteRequeueAfter}))
g.Expect(err).To(BeNil())

g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))

controlPlaneMachines := clusterv1.MachineList{}
labels := map[string]string{
clusterv1.MachineControlPlaneLabelName: "",
}
g.Expect(fakeClient.List(ctx, &controlPlaneMachines, client.MatchingLabels(labels))).To(Succeed())
g.Expect(controlPlaneMachines.Items).To(HaveLen(3))
})

t.Run("removes the finalizer if no control plane Machines exist", func(t *testing.T) {
g := NewWithT(t)

Expand Down
17 changes: 13 additions & 4 deletions controlplane/kubeadm/controllers/fakes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,18 @@ import (
"github.com/blang/semver"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
"sigs.k8s.io/cluster-api/controlplane/kubeadm/internal"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4"
"sigs.k8s.io/cluster-api/util/collections"
"sigs.k8s.io/controller-runtime/pkg/client"
)

type fakeManagementCluster struct {
// TODO: once all client interactions are moved to the Management cluster this can go away
Management *internal.Management
Machines collections.Machines
Workload fakeWorkloadCluster
Reader client.Reader
Management *internal.Management
Machines collections.Machines
MachinePools *expv1.MachinePoolList
Workload fakeWorkloadCluster
Reader client.Reader
}

func (f *fakeManagementCluster) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error {
Expand All @@ -52,6 +54,13 @@ func (f *fakeManagementCluster) GetMachinesForCluster(c context.Context, cluster
return f.Machines, nil
}

func (f *fakeManagementCluster) GetMachinePoolsForCluster(c context.Context, cluster *clusterv1.Cluster) (*expv1.MachinePoolList, error) {
if f.Management != nil {
return f.Management.GetMachinePoolsForCluster(c, cluster)
}
return f.MachinePools, nil
}

type fakeWorkloadCluster struct {
*internal.Workload
Status internal.ClusterStatus
Expand Down
15 changes: 15 additions & 0 deletions controlplane/kubeadm/internal/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4"
"time"

clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
Expand All @@ -44,6 +45,7 @@ type ManagementCluster interface {
ctrlclient.Reader

GetMachinesForCluster(ctx context.Context, cluster *clusterv1.Cluster, filters ...collections.Func) (collections.Machines, error)
GetMachinePoolsForCluster(ctx context.Context, cluster *clusterv1.Cluster) (*expv1.MachinePoolList, error)
GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error)
}

Expand Down Expand Up @@ -78,6 +80,19 @@ func (m *Management) GetMachinesForCluster(ctx context.Context, cluster *cluster
return collections.GetFilteredMachinesForCluster(ctx, m.Client, cluster, filters...)
}

// GetMachinePoolsForCluster returns a list of machine pools owned by the cluster.
func (m *Management) GetMachinePoolsForCluster(ctx context.Context, cluster *clusterv1.Cluster) (*expv1.MachinePoolList, error) {
selectors := []client.ListOption{
client.InNamespace(cluster.GetNamespace()),
client.MatchingLabels{
clusterv1.ClusterLabelName: cluster.GetName(),
},
}
machinePoolList := &expv1.MachinePoolList{}
err := m.Client.List(ctx, machinePoolList, selectors...)
return machinePoolList, err
}

// GetWorkloadCluster builds a cluster object.
// The cluster comes with an etcd client generator to connect to any etcd pod living on a managed machine.
func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey client.ObjectKey) (WorkloadCluster, error) {
Expand Down