diff --git a/controllers/machine_controller.go b/controllers/machine_controller.go index ccd94b3475c2..a6940ee188ec 100644 --- a/controllers/machine_controller.go +++ b/controllers/machine_controller.go @@ -81,6 +81,7 @@ type MachineReconciler struct { // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string + // WaitForNodeDeletion causes the reconciler to error when a node deletion fails, instead of ignoring it. WaitForNodeDeletion bool controller controller.Controller diff --git a/controllers/machine_controller_test.go b/controllers/machine_controller_test.go index 645be1fcb516..3b06fd5ab38c 100644 --- a/controllers/machine_controller_test.go +++ b/controllers/machine_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package controllers import ( + "context" "testing" "time" @@ -1853,6 +1854,115 @@ func TestNodeToMachine(t *testing.T) { } } +// TODO: We should add a test for the --wait-for-node-deletion flag. This requires error injection +// in the fakeclient, which isn't supported yet. There is a very recent issue to track this: +// https://github.com/kubernetes-sigs/controller-runtime/issues/1702 +func TestNodeDeletion(t *testing.T) { + g := NewWithT(t) + + time := metav1.Now() + + testCluster := clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: metav1.NamespaceDefault, + }, + } + + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: corev1.NodeSpec{ProviderID: "test://id-1"}, + } + + testMachine := clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.MachineControlPlaneLabelName: "", + }, + Annotations: map[string]string{ + "machine.cluster.x-k8s.io/exclude-node-draining": "", + }, + Finalizers: []string{clusterv1.MachineFinalizer}, + DeletionTimestamp: &time, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test-cluster", + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "GenericInfrastructureMachine", + Name: "infra-config1", + }, + Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.StringPtr("data")}, + }, + Status: clusterv1.MachineStatus{ + NodeRef: &corev1.ObjectReference{ + Name: "test", + }, + }, + } + + cpmachine1 := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cp1", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.ClusterLabelName: "test-cluster", + clusterv1.MachineControlPlaneLabelName: "", + }, + Finalizers: []string{clusterv1.MachineFinalizer}, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: "test-cluster", + InfrastructureRef: corev1.ObjectReference{}, + Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.StringPtr("data")}, + }, + Status: clusterv1.MachineStatus{ + NodeRef: &corev1.ObjectReference{ + Name: "cp1", + }, + }, + } + + testCases := []struct { + waitForDeletion bool + resultErr bool + }{ + { + waitForDeletion: false, + resultErr: false, + }, + } + + for _, tc := range testCases { + m := testMachine.DeepCopy() + + fc := fake.NewClientBuilder(). + WithObjects(node, m, cpmachine1). + Build() + tracker := remote.NewTestClusterCacheTracker(log.NullLogger{}, fc, fakeScheme, client.ObjectKeyFromObject(&testCluster)) + + r := &MachineReconciler{ + WaitForNodeDeletion: tc.waitForDeletion, + Client: fc, + Tracker: tracker, + } + + _, err := r.reconcileDelete(context.Background(), &testCluster, m) + + if tc.resultErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + n := &corev1.Node{} + g.Expect(fc.Get(context.Background(), client.ObjectKeyFromObject(node), n)).NotTo(Succeed()) + } + } +} + // adds a condition list to an external object. func addConditionsToExternal(u *unstructured.Unstructured, newConditions clusterv1.Conditions) { existingConditions := clusterv1.Conditions{}