From c21b898de92f16a514835d5d05e4d2a3a98c11aa Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Fri, 31 Mar 2023 18:18:25 +0200 Subject: [PATCH] fix node label propagation --- api/v1beta1/common_types.go | 3 + .../src/reference/labels_and_annotations.md | 1 + .../controllers/machine/machine_controller.go | 4 - .../machine/machine_controller_noderef.go | 101 +++---- .../machine_controller_noderef_test.go | 272 +++++++++++++++--- 5 files changed, 291 insertions(+), 90 deletions(-) diff --git a/api/v1beta1/common_types.go b/api/v1beta1/common_types.go index 5fa401dcb67e..db826ba0c1f6 100644 --- a/api/v1beta1/common_types.go +++ b/api/v1beta1/common_types.go @@ -74,6 +74,9 @@ const ( // OwnerKindAnnotation is the annotation set on nodes identifying the owner kind. OwnerKindAnnotation = "cluster.x-k8s.io/owner-kind" + // LabelsFromMachineAnnotation is the annotation set on nodes to track the labels originated from machines. + LabelsFromMachineAnnotation = "cluster.x-k8s.io/labels-from-machine" + // OwnerNameAnnotation is the annotation set on nodes identifying the owner name. OwnerNameAnnotation = "cluster.x-k8s.io/owner-name" diff --git a/docs/book/src/reference/labels_and_annotations.md b/docs/book/src/reference/labels_and_annotations.md index ed3a67582b77..cdbb764b3f7c 100644 --- a/docs/book/src/reference/labels_and_annotations.md +++ b/docs/book/src/reference/labels_and_annotations.md @@ -25,6 +25,7 @@ | unsafe.topology.cluster.x-k8s.io/disable-update-class-name-check | It can be used to disable the webhook check on update that disallows a pre-existing Cluster to be populated with Topology information and Class. | | cluster.x-k8s.io/cluster-name | It is set on nodes identifying the name of the cluster the node belongs to. | | cluster.x-k8s.io/cluster-namespace | It is set on nodes identifying the namespace of the cluster the node belongs to. | +| cluster.x-k8s.io/labels-from-machine | It is set on nodes to track the labels originated from machines. | | cluster.x-k8s.io/machine | It is set on nodes identifying the machine the node belongs to. | | cluster.x-k8s.io/owner-kind | It is set on nodes identifying the owner kind. | | cluster.x-k8s.io/owner-name | It is set on nodes identifying the owner name. | diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 640401b6128b..4335bac6c2d4 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -60,10 +60,6 @@ import ( const ( // controllerName defines the controller used when creating clients. controllerName = "machine-controller" - - // machineManagerName is the manager name used for Server-Side-Apply (SSA) operations - // in the Machine controller. - machineManagerName = "capi-machine" ) var ( diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index 89ac0a2dfdde..796b6c764791 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -24,8 +24,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -33,12 +31,10 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/api/v1beta1/index" "sigs.k8s.io/cluster-api/controllers/noderefutil" - "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/internal/util/taints" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" ) var ( @@ -105,40 +101,26 @@ func (r *Reconciler) reconcileNode(ctx context.Context, cluster *clusterv1.Clust // Set the NodeSystemInfo. machine.Status.NodeInfo = &node.Status.NodeInfo - // Reconcile node annotations. - patchHelper, err := patch.NewHelper(node, remoteClient) - if err != nil { - return ctrl.Result{}, err - } - desired := map[string]string{ + // Compute all the annotations that CAPI is setting on nodes; + // CAPI only enforces some annotations and never changes or removes them. + nodeAnnotations := map[string]string{ clusterv1.ClusterNameAnnotation: machine.Spec.ClusterName, clusterv1.ClusterNamespaceAnnotation: machine.GetNamespace(), clusterv1.MachineAnnotation: machine.Name, } if owner := metav1.GetControllerOfNoCopy(machine); owner != nil { - desired[clusterv1.OwnerKindAnnotation] = owner.Kind - desired[clusterv1.OwnerNameAnnotation] = owner.Name - } - if annotations.AddAnnotations(node, desired) { - if err := patchHelper.Patch(ctx, node); err != nil { - log.V(2).Info("Failed patch node to set annotations", "err", err, "node name", node.Name) - return ctrl.Result{}, err - } + nodeAnnotations[clusterv1.OwnerKindAnnotation] = owner.Kind + nodeAnnotations[clusterv1.OwnerNameAnnotation] = owner.Name } - updatedNode := unstructuredNode(node.Name, node.UID, getManagedLabels(machine.Labels)) - err = ssa.Patch(ctx, remoteClient, machineManagerName, updatedNode, ssa.WithCachingProxy{Cache: r.ssaCache, Original: node}) - if err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to apply labels to Node") - } - // Update `node` with the new version of the object. - if err := r.Client.Scheme().Convert(updatedNode, node, nil); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to convert node to structured object %s", klog.KObj(node)) - } + // Compute labels to be propagated from Machines to nodes. + // NOTE: CAPI should manage only a subset of node labels, everything else should be preserved. + // NOTE: Once we reconcile node labels for the first time, the NodeUninitializedTaint is removed from the node. + nodeLabels := getManagedLabels(machine.Labels) // Reconcile node taints - if err := r.reconcileNodeTaints(ctx, remoteClient, node); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile taints on Node %s", klog.KObj(node)) + if err := r.patchNode(ctx, remoteClient, node, nodeLabels, nodeAnnotations); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile Node %s", klog.KObj(node)) } // Do the remaining node health checks, then set the node health to true if all checks pass. @@ -156,17 +138,6 @@ func (r *Reconciler) reconcileNode(ctx context.Context, cluster *clusterv1.Clust return ctrl.Result{}, nil } -// unstructuredNode returns a raw unstructured from Node input. -func unstructuredNode(name string, uid types.UID, labels map[string]string) *unstructured.Unstructured { - obj := &unstructured.Unstructured{} - obj.SetAPIVersion("v1") - obj.SetKind("Node") - obj.SetName(name) - obj.SetUID(uid) - obj.SetLabels(labels) - return obj -} - // getManagedLabels gets a map[string]string and returns another map[string]string // filtering out labels not managed by CAPI. func getManagedLabels(labels map[string]string) map[string]string { @@ -269,16 +240,48 @@ func (r *Reconciler) getNode(ctx context.Context, c client.Reader, providerID *n return &nodeList.Items[0], nil } -func (r *Reconciler) reconcileNodeTaints(ctx context.Context, remoteClient client.Client, node *corev1.Node) error { - patchHelper, err := patch.NewHelper(node, remoteClient) - if err != nil { - return errors.Wrapf(err, "failed to create patch helper for Node %s", klog.KObj(node)) +// PatchNode is required to workaround an issue on Node.Status.Address which is incorrectly annotated as patchStrategy=merge +// and this causes SSA patch to fail in case there are two addresses with the same key https://github.com/kubernetes-sigs/cluster-api/issues/8417 +func (r *Reconciler) patchNode(ctx context.Context, remoteClient client.Client, node *corev1.Node, newLabels, newAnnotations map[string]string) error { + newNode := node.DeepCopy() + + // Adds the annotations CAPI sets on the node. + hasAnnotationChanges := annotations.AddAnnotations(newNode, newAnnotations) + + // Adds the labels from the Machine. + // NOTE: in order to handle deletion we are tracking the labels set from the Machine in an annotation. + // At the next reconcile we are going to use this for deleting labels previously set by the Machine, but + // not present anymore. Labels not set from machines should be always preserved. + if newNode.Labels == nil { + newNode.Labels = make(map[string]string) + } + hasLabelChanges := false + labelsFromPreviousReconcile := strings.Split(newNode.Annotations[clusterv1.LabelsFromMachineAnnotation], ",") + if len(labelsFromPreviousReconcile) == 1 && labelsFromPreviousReconcile[0] == "" { + labelsFromPreviousReconcile = []string{} + } + labelsFromCurrentReconcile := []string{} + for k, v := range newLabels { + if cur, ok := newNode.Labels[k]; !ok || cur != v { + newNode.Labels[k] = v + hasLabelChanges = true + } + labelsFromCurrentReconcile = append(labelsFromCurrentReconcile, k) } - // Drop the NodeUninitializedTaint taint on the node. - if taints.RemoveNodeTaint(node, clusterv1.NodeUninitializedTaint) { - if err := patchHelper.Patch(ctx, node); err != nil { - return errors.Wrapf(err, "failed to patch Node %s to modify taints", klog.KObj(node)) + for _, k := range labelsFromPreviousReconcile { + if _, ok := newLabels[k]; !ok { + delete(newNode.Labels, k) + hasLabelChanges = true } } - return nil + annotations.AddAnnotations(newNode, map[string]string{clusterv1.LabelsFromMachineAnnotation: strings.Join(labelsFromCurrentReconcile, ",")}) + + // Drop the NodeUninitializedTaint taint on the node given that we are reconciling labels. + hasTaintChanges := taints.RemoveNodeTaint(newNode, clusterv1.NodeUninitializedTaint) + + if !hasAnnotationChanges && !hasLabelChanges && !hasTaintChanges { + return nil + } + + return remoteClient.Patch(ctx, newNode, client.StrategicMergeFrom(node)) } diff --git a/internal/controllers/machine/machine_controller_noderef_test.go b/internal/controllers/machine/machine_controller_noderef_test.go index f71d109724b4..3c0a72f53169 100644 --- a/internal/controllers/machine/machine_controller_noderef_test.go +++ b/internal/controllers/machine/machine_controller_noderef_test.go @@ -27,7 +27,6 @@ import ( "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -258,6 +257,18 @@ func TestNodeLabelSync(t *testing.T) { GenerateName: "machine-test-node-", }, Spec: corev1.NodeSpec{ProviderID: nodeProviderID}, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "1.1.1.1", + }, + { + Type: corev1.NodeInternalIP, + Address: "2.2.2.2", + }, + }, + }, } // Set Node labels @@ -461,29 +472,209 @@ func TestGetManagedLabels(t *testing.T) { g.Expect(got).To(BeEquivalentTo(managedLabels)) } -func TestReconcileNodeTaints(t *testing.T) { - tests := []struct { - name string - node *corev1.Node +func TestPatchNode(t *testing.T) { + testCases := []struct { + name string + oldNode *corev1.Node + newLabels map[string]string + newAnnotations map[string]string + expectedLabels map[string]string + expectedAnnotations map[string]string + expectedTaints []corev1.Taint }{ { - name: "if the node has the uninitialized taint it should be dropped from the node", - node: &corev1.Node{ + name: "Check that patch works even if there are Status.Addresses with the same key", + oldNode: &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-node-1", + Name: fmt.Sprintf("node-%s", util.RandomString(6)), }, - Spec: corev1.NodeSpec{ - Taints: []corev1.Taint{ - clusterv1.NodeUninitializedTaint, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "1.1.1.1", + }, + { + Type: corev1.NodeInternalIP, + Address: "2.2.2.2", + }, }, }, }, + newLabels: map[string]string{"foo": "bar"}, + expectedLabels: map[string]string{"foo": "bar"}, + expectedAnnotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: "foo", + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + }, }, + // Labels (CAPI owns a subset of labels, everything else should be preserved) { - name: "if the node is a control plane and has the uninitialized taint it should be dropped from the node", - node: &corev1.Node{ + name: "Existing labels should be preserved if there are no label from machines", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%s", util.RandomString(6)), + Labels: map[string]string{ + "not-managed-by-capi": "foo", + }, + }, + }, + expectedLabels: map[string]string{ + "not-managed-by-capi": "foo", + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + }, + }, + { + name: "Add label must preserve existing labels", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%s", util.RandomString(6)), + Labels: map[string]string{ + "not-managed-by-capi": "foo", + }, + }, + }, + newLabels: map[string]string{ + "label-from-machine": "foo", + }, + expectedLabels: map[string]string{ + "not-managed-by-capi": "foo", + "label-from-machine": "foo", + }, + expectedAnnotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: "label-from-machine", + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + }, + }, + { + name: "CAPI takes ownership of existing labels if they are set from machines", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%s", util.RandomString(6)), + Labels: map[string]string{ + clusterv1.NodeRoleLabelPrefix: "foo", + }, + }, + }, + newLabels: map[string]string{ + clusterv1.NodeRoleLabelPrefix: "control-plane", + }, + expectedLabels: map[string]string{ + clusterv1.NodeRoleLabelPrefix: "control-plane", + }, + expectedAnnotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix, + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + }, + }, + { + name: "change a label previously set from machines", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%s", util.RandomString(6)), + Labels: map[string]string{ + clusterv1.NodeRoleLabelPrefix: "foo", + }, + Annotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix, + }, + }, + }, + newLabels: map[string]string{ + clusterv1.NodeRoleLabelPrefix: "control-plane", + }, + expectedLabels: map[string]string{ + clusterv1.NodeRoleLabelPrefix: "control-plane", + }, + expectedAnnotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix, + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + }, + }, + { + name: "Delete a label previously set from machines", + oldNode: &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-node-1", + Name: fmt.Sprintf("node-%s", util.RandomString(6)), + Labels: map[string]string{ + clusterv1.NodeRoleLabelPrefix: "foo", + "not-managed-by-capi": "foo", + }, + Annotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix, + }, + }, + }, + expectedLabels: map[string]string{ + "not-managed-by-capi": "foo", + }, + expectedAnnotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: "", + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + }, + }, + { + name: "Label previously set from machine, already removed out of band, annotation should be cleaned up", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%s", util.RandomString(6)), + Annotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix, + }, + }, + }, + expectedAnnotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: "", + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + }, + }, + // Add annotations (CAPI only enforces some annotations and never changes or removes them) + { + name: "Add CAPI annotations", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%s", util.RandomString(6)), + Annotations: map[string]string{ + "not-managed-by-capi": "foo", + }, + }, + }, + newAnnotations: map[string]string{ + clusterv1.ClusterNameAnnotation: "foo", + clusterv1.ClusterNamespaceAnnotation: "bar", + clusterv1.MachineAnnotation: "baz", + }, + expectedAnnotations: map[string]string{ + clusterv1.ClusterNameAnnotation: "foo", + clusterv1.ClusterNamespaceAnnotation: "bar", + clusterv1.MachineAnnotation: "baz", + "not-managed-by-capi": "foo", + clusterv1.LabelsFromMachineAnnotation: "", + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + }, + }, + // Taint (CAPI only remove one taint if it exists, other taints should be preserved) + { + name: "Removes NodeUninitializedTaint if present", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%s", util.RandomString(6)), }, Spec: corev1.NodeSpec{ Taints: []corev1.Taint{ @@ -495,35 +686,42 @@ func TestReconcileNodeTaints(t *testing.T) { }, }, }, - }, - { - name: "if the node does not have the uninitialized taint it should remain absent from the node", - node: &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-node-1", - }, - Spec: corev1.NodeSpec{ - Taints: []corev1.Taint{}, + expectedAnnotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: "", + }, + expectedTaints: []corev1.Taint{ + { + Key: "node-role.kubernetes.io/control-plane", + Effect: corev1.TaintEffectNoSchedule, }, + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + r := Reconciler{Client: env} + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithObjects(tt.node).WithScheme(fakeScheme).Build() - nodeBefore := tt.node.DeepCopy() - r := &Reconciler{} - g.Expect(r.reconcileNodeTaints(ctx, fakeClient, tt.node)).Should(Succeed()) - // Verify the NodeUninitializedTaint is dropped. - g.Expect(tt.node.Spec.Taints).ShouldNot(ContainElement(clusterv1.NodeUninitializedTaint)) - // Verify all other taints are same. - for _, taint := range nodeBefore.Spec.Taints { - if !taint.MatchTaint(&clusterv1.NodeUninitializedTaint) { - g.Expect(tt.node.Spec.Taints).Should(ContainElement(taint)) - } - } + oldNode := tc.oldNode.DeepCopy() + + g.Expect(env.Create(ctx, oldNode)).To(Succeed()) + t.Cleanup(func() { + _ = env.Cleanup(ctx, oldNode) + }) + + err := r.patchNode(ctx, env, oldNode, tc.newLabels, tc.newAnnotations) + g.Expect(err).ToNot(HaveOccurred()) + + g.Eventually(func(g Gomega) { + gotNode := &corev1.Node{} + err = env.Get(ctx, client.ObjectKeyFromObject(oldNode), gotNode) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(gotNode.Labels).To(BeComparableTo(tc.expectedLabels)) + g.Expect(gotNode.Annotations).To(BeComparableTo(tc.expectedAnnotations)) + g.Expect(gotNode.Spec.Taints).To(BeComparableTo(tc.expectedTaints)) + }, 10*time.Second).Should(Succeed()) }) } }