From 0a100002ae881478ef21fb2838bfa99aef700a38 Mon Sep 17 00:00:00 2001 From: Hiromu Asahina Date: Fri, 7 Apr 2023 22:27:41 +0900 Subject: [PATCH] Add Taint during rolling update --- api/v1beta1/common_types.go | 8 + .../controllers/machine/machine_controller.go | 16 ++ .../machine/machine_controller_noderef.go | 67 ++++- .../machine_controller_noderef_test.go | 230 +++++++++++++++++- internal/test/builder/builders.go | 1 + internal/util/taints/taints.go | 10 + util/util.go | 131 +++++++++- util/util_test.go | 151 ++++++++++++ 8 files changed, 609 insertions(+), 5 deletions(-) diff --git a/api/v1beta1/common_types.go b/api/v1beta1/common_types.go index b25985f1eccb..1834d6e30804 100644 --- a/api/v1beta1/common_types.go +++ b/api/v1beta1/common_types.go @@ -228,6 +228,14 @@ const ( MachineSetPreflightCheckControlPlaneIsStable MachineSetPreflightCheck = "ControlPlaneIsStable" ) +// NodeOutdatedRevisionTaint can be added to Nodes at rolling updates in general triggered by updating MachineDeployment +// This taint is used to prevent unnecessary pod churn, i.e., as the first node is drained, pods previously running on +// that node are scheduled onto nodes who have yet to be replaced, but will be torn down soon. +var NodeOutdatedRevisionTaint = corev1.Taint{ + Key: "node.cluster.x-k8s.io/outdated-revision", + Effect: corev1.TaintEffectPreferNoSchedule, +} + // NodeUninitializedTaint can be added to Nodes at creation by the bootstrap provider, e.g. the // KubeadmBootstrap provider will add the taint. // This taint is used to prevent workloads to be scheduled on Nodes before the node is initialized by Cluster API. diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index f720438ee317..fb2668c5632a 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -100,6 +100,14 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt if err != nil { return err } + msToMachines, err := util.MachineSetToObjectsMapper(mgr.GetClient(), &clusterv1.MachineList{}, mgr.GetScheme()) + if err != nil { + return err + } + mdToMachines, err := util.MachineDeploymentToObjectsMapper(mgr.GetClient(), &clusterv1.MachineList{}, mgr.GetScheme()) + if err != nil { + return err + } if r.nodeDeletionRetryTimeout.Nanoseconds() == 0 { r.nodeDeletionRetryTimeout = 10 * time.Second @@ -122,6 +130,14 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue), ), )). + Watches( + &clusterv1.MachineSet{}, + handler.EnqueueRequestsFromMapFunc(msToMachines), + ). + Watches( + &clusterv1.MachineDeployment{}, + handler.EnqueueRequestsFromMapFunc(mdToMachines), + ). Build(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index 975c50742385..719e7d41138e 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -25,12 +25,14 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/api/v1beta1/index" + "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" "sigs.k8s.io/cluster-api/internal/util/taints" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" @@ -133,7 +135,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, s *scope) (ctrl.Result, _, nodeHadInterruptibleLabel := node.Labels[clusterv1.InterruptibleLabel] // Reconcile node taints - if err := r.patchNode(ctx, remoteClient, node, nodeLabels, nodeAnnotations); err != nil { + if err := r.patchNode(ctx, remoteClient, node, nodeLabels, nodeAnnotations, machine); err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile Node %s", klog.KObj(node)) } if !nodeHadInterruptibleLabel && interruptible { @@ -255,7 +257,7 @@ func (r *Reconciler) getNode(ctx context.Context, c client.Reader, providerID st // PatchNode is required to workaround an issue on Node.Status.Address which is incorrectly annotated as patchStrategy=merge // and this causes SSA patch to fail in case there are two addresses with the same key https://github.com/kubernetes-sigs/cluster-api/issues/8417 -func (r *Reconciler) patchNode(ctx context.Context, remoteClient client.Client, node *corev1.Node, newLabels, newAnnotations map[string]string) error { +func (r *Reconciler) patchNode(ctx context.Context, remoteClient client.Client, node *corev1.Node, newLabels, newAnnotations map[string]string, m *clusterv1.Machine) error { newNode := node.DeepCopy() // Adds the annotations CAPI sets on the node. @@ -292,9 +294,70 @@ func (r *Reconciler) patchNode(ctx context.Context, remoteClient client.Client, // Drop the NodeUninitializedTaint taint on the node given that we are reconciling labels. hasTaintChanges := taints.RemoveNodeTaint(newNode, clusterv1.NodeUninitializedTaint) + // Set Taint to a node in an old MachineSet and unset Taint from a node in a new MachineSet + isOutdated, err := shouldNodeHaveOutdatedTaint(ctx, r.Client, m) + if err != nil { + return errors.Wrapf(err, "failed to check if Node %s is outdated", klog.KRef("", node.Name)) + } + if isOutdated { + hasTaintChanges = taints.EnsureNodeTaint(newNode, clusterv1.NodeOutdatedRevisionTaint) || hasTaintChanges + } else { + hasTaintChanges = taints.RemoveNodeTaint(newNode, clusterv1.NodeOutdatedRevisionTaint) || hasTaintChanges + } + if !hasAnnotationChanges && !hasLabelChanges && !hasTaintChanges { return nil } return remoteClient.Patch(ctx, newNode, client.StrategicMergeFrom(node)) } + +func shouldNodeHaveOutdatedTaint(ctx context.Context, c client.Client, m *clusterv1.Machine) (bool, error) { + if _, hasLabel := m.Labels[clusterv1.MachineDeploymentNameLabel]; !hasLabel { + return false, nil + } + + // Resolve the MachineSet name via owner references because the label value + // could also be a hash. + objKey, err := getOwnerMachineSetObjectKey(m.ObjectMeta) + if err != nil { + return false, err + } + ms := &clusterv1.MachineSet{} + if err := c.Get(ctx, *objKey, ms); err != nil { + return false, err + } + md := &clusterv1.MachineDeployment{} + objKey = &client.ObjectKey{ + Namespace: m.ObjectMeta.Namespace, + Name: m.Labels[clusterv1.MachineDeploymentNameLabel], + } + if err := c.Get(ctx, *objKey, md); err != nil { + return false, err + } + msRev, err := mdutil.Revision(ms) + if err != nil { + return false, err + } + mdRev, err := mdutil.Revision(md) + if err != nil { + return false, err + } + if msRev < mdRev { + return true, nil + } + return false, nil +} + +func getOwnerMachineSetObjectKey(obj metav1.ObjectMeta) (*client.ObjectKey, error) { + for _, ref := range obj.GetOwnerReferences() { + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, err + } + if ref.Kind == "MachineSet" && gv.Group == clusterv1.GroupVersion.Group { + return &client.ObjectKey{Namespace: obj.Namespace, Name: ref.Name}, nil + } + } + return nil, errors.Errorf("failed to find MachineSet owner reference for Machine %s", klog.KRef(obj.GetNamespace(), obj.GetName())) +} diff --git a/internal/controllers/machine/machine_controller_noderef_test.go b/internal/controllers/machine/machine_controller_noderef_test.go index 70a9d0853f75..a805e57201aa 100644 --- a/internal/controllers/machine/machine_controller_noderef_test.go +++ b/internal/controllers/machine/machine_controller_noderef_test.go @@ -515,6 +515,8 @@ func TestGetManagedLabels(t *testing.T) { } func TestPatchNode(t *testing.T) { + clusterName := "test-cluster" + testCases := []struct { name string oldNode *corev1.Node @@ -523,6 +525,9 @@ func TestPatchNode(t *testing.T) { expectedLabels map[string]string expectedAnnotations map[string]string expectedTaints []corev1.Taint + machine *clusterv1.Machine + ms *clusterv1.MachineSet + md *clusterv1.MachineDeployment }{ { name: "Check that patch works even if there are Status.Addresses with the same key", @@ -551,6 +556,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, // Labels (CAPI owns a subset of labels, everything else should be preserved) { @@ -569,6 +577,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, { name: "Add label must preserve existing labels", @@ -593,6 +604,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, { name: "CAPI takes ownership of existing labels if they are set from machines", @@ -616,6 +630,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, { name: "change a label previously set from machines", @@ -642,6 +659,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, { name: "Delete a label previously set from machines", @@ -666,6 +686,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, { name: "Label previously set from machine, already removed out of band, annotation should be cleaned up", @@ -683,6 +706,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, // Add annotations (CAPI only enforces some annotations and never changes or removes them) { @@ -710,6 +736,9 @@ func TestPatchNode(t *testing.T) { expectedTaints: []corev1.Taint{ {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), }, // Taint (CAPI only remove one taint if it exists, other taints should be preserved) { @@ -738,6 +767,137 @@ func TestPatchNode(t *testing.T) { }, {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server }, + machine: newFakeMachine(metav1.NamespaceDefault, clusterName), + ms: newFakeMachineSet(metav1.NamespaceDefault, clusterName), + md: newFakeMachineDeployment(metav1.NamespaceDefault, clusterName), + }, + { + name: "Ensure NodeOutdatedRevisionTaint to be set if a node is associated to an outdated machineset", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%s", util.RandomString(6)), + }, + }, + expectedAnnotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: "", + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + clusterv1.NodeOutdatedRevisionTaint, + }, + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ma-%s", util.RandomString(6)), + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: "test-ms-outdated", + clusterv1.MachineDeploymentNameLabel: "test-md-outdated", + }, + OwnerReferences: []metav1.OwnerReference{{ + Kind: "MachineSet", + Name: "test-ms-outdated", + APIVersion: clusterv1.GroupVersion.String(), + UID: "uid", + }}, + }, + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + ms: &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ms-outdated", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "1", + }, + }, + Spec: clusterv1.MachineSetSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + }, + }, + md: &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-md-outdated", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "2", + }, + }, + Spec: clusterv1.MachineDeploymentSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + }, + }, + }, + { + name: "Removes NodeOutdatedRevisionTaint if a node is associated to a non-outdated machineset", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("node-%s", util.RandomString(6)), + }, + Spec: corev1.NodeSpec{ + Taints: []corev1.Taint{ + clusterv1.NodeOutdatedRevisionTaint, + }, + }, + }, + expectedAnnotations: map[string]string{ + clusterv1.LabelsFromMachineAnnotation: "", + }, + expectedTaints: []corev1.Taint{ + {Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server + }, + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ma-%s", util.RandomString(6)), + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: "test-ms-not-outdated", + clusterv1.MachineDeploymentNameLabel: "test-md-not-outdated", + }, + OwnerReferences: []metav1.OwnerReference{{ + Kind: "MachineSet", + Name: "test-ms-not-outdated", + APIVersion: clusterv1.GroupVersion.String(), + UID: "uid", + }}, + }, + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + ms: &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ms-not-outdated", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "3", + }, + }, + Spec: clusterv1.MachineSetSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + }, + }, + md: &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-md-not-outdated", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: "2", + }, + }, + Spec: clusterv1.MachineDeploymentSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), + }, + }, + }, }, } @@ -749,13 +909,19 @@ func TestPatchNode(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) oldNode := tc.oldNode.DeepCopy() + machine := tc.machine.DeepCopy() + ms := tc.ms.DeepCopy() + md := tc.md.DeepCopy() g.Expect(env.Create(ctx, oldNode)).To(Succeed()) + g.Expect(env.Create(ctx, machine)).To(Succeed()) + g.Expect(env.Create(ctx, ms)).To(Succeed()) + g.Expect(env.Create(ctx, md)).To(Succeed()) t.Cleanup(func() { - _ = env.Cleanup(ctx, oldNode) + _ = env.Cleanup(ctx, oldNode, machine, ms, md) }) - err := r.patchNode(ctx, env, oldNode, tc.newLabels, tc.newAnnotations) + err := r.patchNode(ctx, env, oldNode, tc.newLabels, tc.newAnnotations, tc.machine) g.Expect(err).ToNot(HaveOccurred()) g.Eventually(func(g Gomega) { @@ -770,3 +936,63 @@ func TestPatchNode(t *testing.T) { }) } } + +func newFakeMachineSpec(namespace, clusterName string) clusterv1.MachineSpec { + return clusterv1.MachineSpec{ + ClusterName: clusterName, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha3", + Kind: "KubeadmConfigTemplate", + Name: fmt.Sprintf("%s-md-0", clusterName), + Namespace: namespace, + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3", + Kind: "FakeMachineTemplate", + Name: fmt.Sprintf("%s-md-0", clusterName), + Namespace: namespace, + }, + } +} + +func newFakeMachine(namespace, clusterName string) *clusterv1.Machine { + return &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ma-%s", util.RandomString(6)), + Namespace: namespace, + }, + Spec: newFakeMachineSpec(namespace, clusterName), + } +} + +func newFakeMachineSet(namespace, clusterName string) *clusterv1.MachineSet { + return &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ms-%s", util.RandomString(6)), + Namespace: namespace, + }, + Spec: clusterv1.MachineSetSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(namespace, clusterName), + }, + }, + } +} + +func newFakeMachineDeployment(namespace, clusterName string) *clusterv1.MachineDeployment { + return &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("md-%s", util.RandomString(6)), + Namespace: namespace, + }, + Spec: clusterv1.MachineDeploymentSpec{ + ClusterName: clusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: newFakeMachineSpec(namespace, clusterName), + }, + }, + } +} diff --git a/internal/test/builder/builders.go b/internal/test/builder/builders.go index 0b9cb6567e6b..b2a14f77c5bd 100644 --- a/internal/test/builder/builders.go +++ b/internal/test/builder/builders.go @@ -1776,6 +1776,7 @@ func (m *MachineSetBuilder) Build() *clusterv1.MachineSet { }, } obj.Spec.ClusterName = m.clusterName + obj.Spec.Template.Spec.ClusterName = m.clusterName obj.Spec.Replicas = m.replicas if m.bootstrapTemplate != nil { obj.Spec.Template.Spec.Bootstrap.ConfigRef = objToRef(m.bootstrapTemplate) diff --git a/internal/util/taints/taints.go b/internal/util/taints/taints.go index 0e88cfdb046a..3bda0e5ec4b9 100644 --- a/internal/util/taints/taints.go +++ b/internal/util/taints/taints.go @@ -46,3 +46,13 @@ func HasTaint(taints []corev1.Taint, targetTaint corev1.Taint) bool { } return false } + +// EnsureNodeTaint makes sure the node has the Taint. +// It returns true if the taints are modified, false otherwise. +func EnsureNodeTaint(node *corev1.Node, taint corev1.Taint) bool { + if !HasTaint(node.Spec.Taints, taint) { + node.Spec.Taints = append(node.Spec.Taints, taint) + return true + } + return false +} diff --git a/util/util.go b/util/util.go index d72947ecfd89..1648cb3d1256 100644 --- a/util/util.go +++ b/util/util.go @@ -48,6 +48,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/contract" + "sigs.k8s.io/cluster-api/util/labels/format" ) const ( @@ -486,7 +487,7 @@ func ClusterToTypedObjectsMapper(c client.Client, ro client.ObjectList, scheme * } objectList, ok := obj.(client.ObjectList) if !ok { - return nil, errors.Errorf("expected objject to be a client.ObjectList, is actually %T", obj) + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) } isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) @@ -533,6 +534,134 @@ func ClusterToTypedObjectsMapper(c client.Client, ro client.ObjectList, scheme * }, nil } +// MachineDeploymentToObjectsMapper returns a mapper function that gets a machinedeployment +// and lists all objects for the object passed in and returns a list of requests. +// NB: The objects are required to have `clusterv1.MachineDeploymentNameLabel` applied. +func MachineDeploymentToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return nil, err + } + + // Note: we create the typed ObjectList once here, so we don't have to use + // reflection in every execution of the actual event handler. + obj, err := scheme.New(gvk) + if err != nil { + return nil, errors.Wrapf(err, "failed to construct object of type %s", gvk) + } + objectList, ok := obj.(client.ObjectList) + if !ok { + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) + } + + isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) + if err != nil { + return nil, err + } + + return func(ctx context.Context, o client.Object) []ctrl.Request { + md, ok := o.(*clusterv1.MachineDeployment) + if !ok { + return nil + } + + listOpts := []client.ListOption{ + client.MatchingLabels{ + clusterv1.MachineDeploymentNameLabel: md.Name, + }, + } + + if isNamespaced { + listOpts = append(listOpts, client.InNamespace(md.Namespace)) + } + + objectList = objectList.DeepCopyObject().(client.ObjectList) + if err := c.List(ctx, objectList, listOpts...); err != nil { + return nil + } + + objects, err := meta.ExtractList(objectList) + if err != nil { + return nil + } + + results := []ctrl.Request{} + for _, obj := range objects { + // Note: We don't check if the type cast succeeds as all items in an client.ObjectList + // are client.Objects. + o := obj.(client.Object) + results = append(results, ctrl.Request{ + NamespacedName: client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()}, + }) + } + return results + }, nil +} + +// MachineSetToObjectsMapper returns a mapper function that gets a machineset +// and lists all objects for the object passed in and returns a list of requests. +// NB: The objects are required to have `clusterv1.MachineSetNameLabel` applied. +func MachineSetToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return nil, err + } + + // Note: we create the typed ObjectList once here, so we don't have to use + // reflection in every execution of the actual event handler. + obj, err := scheme.New(gvk) + if err != nil { + return nil, errors.Wrapf(err, "failed to construct object of type %s", gvk) + } + objectList, ok := obj.(client.ObjectList) + if !ok { + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) + } + + isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) + if err != nil { + return nil, err + } + + return func(ctx context.Context, o client.Object) []ctrl.Request { + ms, ok := o.(*clusterv1.MachineSet) + if !ok { + return nil + } + + listOpts := []client.ListOption{ + client.MatchingLabels{ + clusterv1.MachineSetNameLabel: format.MustFormatValue(ms.Name), + }, + } + + if isNamespaced { + listOpts = append(listOpts, client.InNamespace(ms.Namespace)) + } + + objectList = objectList.DeepCopyObject().(client.ObjectList) + if err := c.List(ctx, objectList, listOpts...); err != nil { + return nil + } + + objects, err := meta.ExtractList(objectList) + if err != nil { + return nil + } + + results := []ctrl.Request{} + for _, obj := range objects { + // Note: We don't check if the type cast succeeds as all items in an client.ObjectList + // are client.Objects. + o := obj.(client.Object) + results = append(results, ctrl.Request{ + NamespacedName: client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()}, + }) + } + return results + }, nil +} + // isAPINamespaced detects if a GroupVersionKind is namespaced. func isAPINamespaced(gk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { restMapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) diff --git a/util/util_test.go b/util/util_test.go index f37e60284ebd..cf2616e97ed7 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -36,6 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/labels/format" ) func TestMachineToInfrastructureMapFunc(t *testing.T) { @@ -745,6 +746,156 @@ func TestClusterToObjectsMapper(t *testing.T) { } } +func TestMachineDeploymentToObjectsMapper(t *testing.T) { + g := NewWithT(t) + + machineDeployment := &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-md-0", + }, + } + + table := []struct { + name string + objects []client.Object + output []ctrl.Request + expectError bool + }{ + { + name: "should return a list of requests with labelled machines", + objects: []client.Object{ + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Labels: map[string]string{ + clusterv1.MachineDeploymentNameLabel: machineDeployment.GetName(), + }, + }, + }, + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine2", + Labels: map[string]string{ + clusterv1.MachineDeploymentNameLabel: machineDeployment.GetName(), + }, + }, + }, + }, + output: []ctrl.Request{ + {NamespacedName: client.ObjectKey{Name: "machine1"}}, + {NamespacedName: client.ObjectKey{Name: "machine2"}}, + }, + }, + } + + for _, tc := range table { + tc.objects = append(tc.objects, machineDeployment) + + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{clusterv1.GroupVersion}) + + // Add tc.input gvk to the restMapper. + gvk, err := apiutil.GVKForObject(&clusterv1.MachineList{}, scheme) + g.Expect(err).ToNot(HaveOccurred()) + restMapper.Add(gvk, meta.RESTScopeNamespace) + + client := fake.NewClientBuilder().WithObjects(tc.objects...).WithRESTMapper(restMapper).Build() + f, err := MachineDeploymentToObjectsMapper(client, &clusterv1.MachineList{}, scheme) + g.Expect(err != nil, err).To(Equal(tc.expectError)) + g.Expect(f(ctx, machineDeployment)).To(ConsistOf(tc.output)) + } +} + +func TestMachineSetToObjectsMapper(t *testing.T) { + g := NewWithT(t) + + table := []struct { + name string + machineSet *clusterv1.MachineSet + objects []client.Object + output []ctrl.Request + expectError bool + }{ + { + name: "should return a list of requests with labelled machines", + machineSet: &clusterv1.MachineSet{ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-ms-0", + }}, + objects: []client.Object{ + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0"), + }, + }, + }, + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine2", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0"), + }, + }, + }, + }, + output: []ctrl.Request{ + {NamespacedName: client.ObjectKey{Name: "machine1"}}, + {NamespacedName: client.ObjectKey{Name: "machine2"}}, + }, + }, + { + name: "should return a list of requests with labelled machines when the machineset name is hashed in the label", + machineSet: &clusterv1.MachineSet{ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-ms-0-looooooooooooooooooooooooooooooooooooooooooooong-name", + }}, + objects: []client.Object{ + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0-looooooooooooooooooooooooooooooooooooooooooooong-name"), + }, + }, + }, + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine2", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0-looooooooooooooooooooooooooooooooooooooooooooong-name"), + }, + }, + }, + }, + output: []ctrl.Request{ + {NamespacedName: client.ObjectKey{Name: "machine1"}}, + {NamespacedName: client.ObjectKey{Name: "machine2"}}, + }, + }, + } + + for _, tc := range table { + tc.objects = append(tc.objects, tc.machineSet) + + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{clusterv1.GroupVersion}) + + // Add tc.input gvk to the restMapper. + gvk, err := apiutil.GVKForObject(&clusterv1.MachineList{}, scheme) + g.Expect(err).ToNot(HaveOccurred()) + restMapper.Add(gvk, meta.RESTScopeNamespace) + + client := fake.NewClientBuilder().WithObjects(tc.objects...).WithRESTMapper(restMapper).Build() + f, err := MachineSetToObjectsMapper(client, &clusterv1.MachineList{}, scheme) + g.Expect(err != nil, err).To(Equal(tc.expectError)) + g.Expect(f(ctx, tc.machineSet)).To(ConsistOf(tc.output)) + } +} + func TestOrdinalize(t *testing.T) { tests := []struct { input int