From c98092e3a4ddcfe613349e4b298582b8dfc4be30 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Thu, 29 Feb 2024 14:14:52 +0100 Subject: [PATCH] review fixes --- .../controllers/machine/machine_controller.go | 8 + .../machine/machine_controller_noderef.go | 45 ++++-- .../machine_controller_noderef_test.go | 12 ++ internal/test/builder/builders.go | 1 + test/e2e/data/test-extension/deployment.yaml | 138 ------------------ util/util.go | 102 +++++++++++-- util/util_test.go | 109 ++++++++++++-- 7 files changed, 244 insertions(+), 171 deletions(-) delete mode 100644 test/e2e/data/test-extension/deployment.yaml diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index cbe464a26e42..f4edc6c0531d 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -104,6 +104,10 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt if err != nil { return err } + mdToMachines, err := util.MachineDeploymentToObjectsMapper(mgr.GetClient(), &clusterv1.MachineList{}, mgr.GetScheme()) + if err != nil { + return err + } if r.nodeDeletionRetryTimeout.Nanoseconds() == 0 { r.nodeDeletionRetryTimeout = 10 * time.Second @@ -130,6 +134,10 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt &clusterv1.MachineSet{}, handler.EnqueueRequestsFromMapFunc(msToMachines), ). + Watches( + &clusterv1.MachineSet{}, + handler.EnqueueRequestsFromMapFunc(mdToMachines), + ). Build(r) if err != nil { return errors.Wrap(err, "failed setting up with a controller manager") diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index 27b2742f574d..e75c04de5ce5 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -25,6 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -257,8 +258,6 @@ func (r *Reconciler) getNode(ctx context.Context, c client.Reader, providerID st // PatchNode is required to workaround an issue on Node.Status.Address which is incorrectly annotated as patchStrategy=merge // and this causes SSA patch to fail in case there are two addresses with the same key https://github.com/kubernetes-sigs/cluster-api/issues/8417 func (r *Reconciler) patchNode(ctx context.Context, remoteClient client.Client, node *corev1.Node, newLabels, newAnnotations map[string]string, m *clusterv1.Machine) error { - log := ctrl.LoggerFrom(ctx) - newNode := node.DeepCopy() // Adds the annotations CAPI sets on the node. @@ -298,9 +297,9 @@ func (r *Reconciler) patchNode(ctx context.Context, remoteClient client.Client, // Set Taint to a node in an old machineDeployment and unset Taint from a node in a new machineDeployment // Ignore errors as it's not critical for the reconcile. // To avoid an unnecessary Taint remaining due to the error remove Taint when errors occur. - isOutdated, err := isNodeOutdated(ctx, r.Client, m) + isOutdated, err := shouldNodeHaveOutdatedTaint(ctx, r.Client, m) if err != nil { - log.V(2).Info("Failed to check if Node %s is outdated", "err", err, "node name", node.Name) + return errors.Wrapf(err, "failed to check if Node %s is outdated", klog.KRef("", node.Name)) } if isOutdated { hasTaintChanges = taints.EnsureNodeTaint(newNode, clusterv1.NodeOutdatedRevisionTaint) || hasTaintChanges @@ -315,21 +314,30 @@ func (r *Reconciler) patchNode(ctx context.Context, remoteClient client.Client, return remoteClient.Patch(ctx, newNode, client.StrategicMergeFrom(node)) } -func isNodeOutdated(ctx context.Context, c client.Client, m *clusterv1.Machine) (bool, error) { - ms := &clusterv1.MachineSet{} - objKey := client.ObjectKey{ - Namespace: m.ObjectMeta.Namespace, - Name: m.Labels[clusterv1.MachineSetNameLabel], +func shouldNodeHaveOutdatedTaint(ctx context.Context, c client.Client, m *clusterv1.Machine) (bool, error) { + if _, hasLabel := m.Labels[clusterv1.MachineSetNameLabel]; !hasLabel { + return false, nil + } + if _, hasLabel := m.Labels[clusterv1.MachineDeploymentNameLabel]; !hasLabel { + return false, nil + } + + // Resolve the MachineSet name via owner references because the label value + // could also be a hash. + objKey, err := getOwnerMachineSetObjectKey(m.ObjectMeta) + if err != nil { + return false, err } - if err := c.Get(ctx, objKey, ms); err != nil { + ms := &clusterv1.MachineSet{} + if err := c.Get(ctx, *objKey, ms); err != nil { return false, err } md := &clusterv1.MachineDeployment{} - objKey = client.ObjectKey{ + objKey = &client.ObjectKey{ Namespace: m.ObjectMeta.Namespace, Name: m.Labels[clusterv1.MachineDeploymentNameLabel], } - if err := c.Get(ctx, objKey, md); err != nil { + if err := c.Get(ctx, *objKey, md); err != nil { return false, err } msRev, err := mdutil.Revision(ms) @@ -345,3 +353,16 @@ func isNodeOutdated(ctx context.Context, c client.Client, m *clusterv1.Machine) } return false, nil } + +func getOwnerMachineSetObjectKey(obj metav1.ObjectMeta) (*client.ObjectKey, error) { + for _, ref := range obj.GetOwnerReferences() { + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, err + } + if ref.Kind == "MachineSet" && gv.Group == clusterv1.GroupVersion.Group { + return &client.ObjectKey{Namespace: obj.Namespace, Name: ref.Name}, nil + } + } + return nil, errors.Errorf("failed to find MachineSet owner reference for Machine %s", klog.KRef(obj.GetNamespace(), obj.GetName())) +} diff --git a/internal/controllers/machine/machine_controller_noderef_test.go b/internal/controllers/machine/machine_controller_noderef_test.go index 451389e3cd48..a805e57201aa 100644 --- a/internal/controllers/machine/machine_controller_noderef_test.go +++ b/internal/controllers/machine/machine_controller_noderef_test.go @@ -793,6 +793,12 @@ func TestPatchNode(t *testing.T) { clusterv1.MachineSetNameLabel: "test-ms-outdated", clusterv1.MachineDeploymentNameLabel: "test-md-outdated", }, + OwnerReferences: []metav1.OwnerReference{{ + Kind: "MachineSet", + Name: "test-ms-outdated", + APIVersion: clusterv1.GroupVersion.String(), + UID: "uid", + }}, }, Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), }, @@ -853,6 +859,12 @@ func TestPatchNode(t *testing.T) { clusterv1.MachineSetNameLabel: "test-ms-not-outdated", clusterv1.MachineDeploymentNameLabel: "test-md-not-outdated", }, + OwnerReferences: []metav1.OwnerReference{{ + Kind: "MachineSet", + Name: "test-ms-not-outdated", + APIVersion: clusterv1.GroupVersion.String(), + UID: "uid", + }}, }, Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName), }, diff --git a/internal/test/builder/builders.go b/internal/test/builder/builders.go index 0b9cb6567e6b..b2a14f77c5bd 100644 --- a/internal/test/builder/builders.go +++ b/internal/test/builder/builders.go @@ -1776,6 +1776,7 @@ func (m *MachineSetBuilder) Build() *clusterv1.MachineSet { }, } obj.Spec.ClusterName = m.clusterName + obj.Spec.Template.Spec.ClusterName = m.clusterName obj.Spec.Replicas = m.replicas if m.bootstrapTemplate != nil { obj.Spec.Template.Spec.Bootstrap.ConfigRef = objToRef(m.bootstrapTemplate) diff --git a/test/e2e/data/test-extension/deployment.yaml b/test/e2e/data/test-extension/deployment.yaml deleted file mode 100644 index 8c4846a53d51..000000000000 --- a/test/e2e/data/test-extension/deployment.yaml +++ /dev/null @@ -1,138 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - cluster.x-k8s.io/provider: runtime-extension-test - name: test-extension-system ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - cluster.x-k8s.io/provider: runtime-extension-test - name: test-extension-manager - namespace: test-extension-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - cluster.x-k8s.io/provider: runtime-extension-test - name: test-extension-manager-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - patch - - update - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - cluster.x-k8s.io/provider: runtime-extension-test - name: test-extension-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: test-extension-manager-role -subjects: -- kind: ServiceAccount - name: test-extension-manager - namespace: test-extension-system ---- -apiVersion: v1 -kind: Service -metadata: - labels: - cluster.x-k8s.io/provider: runtime-extension-test - name: test-extension-webhook-service - namespace: test-extension-system -spec: - ports: - - port: 443 - targetPort: webhook-server - selector: - app: test-extension-manager - cluster.x-k8s.io/provider: runtime-extension-test ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - cluster.x-k8s.io/provider: runtime-extension-test - name: test-extension-manager - namespace: test-extension-system -spec: - replicas: 1 - selector: - matchLabels: - app: test-extension-manager - cluster.x-k8s.io/provider: runtime-extension-test - template: - metadata: - labels: - app: test-extension-manager - cluster.x-k8s.io/provider: runtime-extension-test - spec: - containers: - - command: - - /manager - image: gcr.io/k8s-staging-cluster-api/test-extension-amd64:dev - imagePullPolicy: IfNotPresent - name: manager - ports: - - containerPort: 9443 - name: webhook-server - protocol: TCP - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - serviceAccountName: test-extension-manager - terminationGracePeriodSeconds: 10 - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - volumes: - - name: cert - secret: - secretName: test-extension-webhook-service-cert ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - labels: - cluster.x-k8s.io/provider: runtime-extension-test - name: test-extension-serving-cert - namespace: test-extension-system -spec: - dnsNames: - - test-extension-webhook-service.test-extension-system.svc - - test-extension-webhook-service.test-extension-system.svc.cluster.local - - localhost - issuerRef: - kind: Issuer - name: test-extension-selfsigned-issuer - secretName: test-extension-webhook-service-cert - subject: - organizations: - - k8s-sig-cluster-lifecycle ---- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - labels: - cluster.x-k8s.io/provider: runtime-extension-test - name: test-extension-selfsigned-issuer - namespace: test-extension-system -spec: - selfSigned: {} diff --git a/util/util.go b/util/util.go index 7860e03e20e5..5dea6f8dfd8b 100644 --- a/util/util.go +++ b/util/util.go @@ -48,6 +48,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/contract" + "sigs.k8s.io/cluster-api/util/labels/format" ) const ( @@ -486,7 +487,7 @@ func ClusterToTypedObjectsMapper(c client.Client, ro client.ObjectList, scheme * } objectList, ok := obj.(client.ObjectList) if !ok { - return nil, errors.Errorf("expected objject to be a client.ObjectList, is actually %T", obj) + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) } isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) @@ -533,20 +534,94 @@ func ClusterToTypedObjectsMapper(c client.Client, ro client.ObjectList, scheme * }, nil } -// MachineSetToObjectsMapper returns a mapper function that gets a machineset and lists all objects for the object passed in -// and returns a list of requests. +// MachineDeploymentToObjectsMapper returns a mapper function that gets a machinedeployment +// and lists all objects for the object passed in and returns a list of requests. +func MachineDeploymentToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return nil, err + } + + // Note: we create the typed ObjectList once here, so we don't have to use + // reflection in every execution of the actual event handler. + obj, err := scheme.New(gvk) + if err != nil { + return nil, errors.Wrapf(err, "failed to construct object of type %s", gvk) + } + objectList, ok := obj.(client.ObjectList) + if !ok { + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) + } + + isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) + if err != nil { + return nil, err + } + + return func(ctx context.Context, o client.Object) []ctrl.Request { + md, ok := o.(*clusterv1.MachineDeployment) + if !ok { + return nil + } + + listOpts := []client.ListOption{ + client.MatchingLabels{ + clusterv1.MachineDeploymentNameLabel: md.Name, + }, + } + + if isNamespaced { + listOpts = append(listOpts, client.InNamespace(md.Namespace)) + } + + objectList = objectList.DeepCopyObject().(client.ObjectList) + if err := c.List(ctx, objectList, listOpts...); err != nil { + return nil + } + + objects, err := meta.ExtractList(objectList) + if err != nil { + return nil + } + + results := []ctrl.Request{} + for _, obj := range objects { + // Note: We don't check if the type cast succeeds as all items in an client.ObjectList + // are client.Objects. + o := obj.(client.Object) + results = append(results, ctrl.Request{ + NamespacedName: client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()}, + }) + } + return results + }, nil +} + +// MachineSetToObjectsMapper returns a mapper function that gets a machineset +// and lists all objects for the object passed in and returns a list of requests. func MachineSetToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { gvk, err := apiutil.GVKForObject(ro, scheme) if err != nil { return nil, err } + // Note: we create the typed ObjectList once here, so we don't have to use + // reflection in every execution of the actual event handler. + obj, err := scheme.New(gvk) + if err != nil { + return nil, errors.Wrapf(err, "failed to construct object of type %s", gvk) + } + objectList, ok := obj.(client.ObjectList) + if !ok { + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) + } + isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) if err != nil { return nil, err } - return func(_ context.Context, o client.Object) []ctrl.Request { + return func(ctx context.Context, o client.Object) []ctrl.Request { ms, ok := o.(*clusterv1.MachineSet) if !ok { return nil @@ -554,7 +629,7 @@ func MachineSetToObjectsMapper(c client.Client, ro client.ObjectList, scheme *ru listOpts := []client.ListOption{ client.MatchingLabels{ - clusterv1.MachineSetNameLabel: ms.Name, + clusterv1.MachineSetNameLabel: format.MustFormatValue(ms.Name), }, } @@ -562,16 +637,23 @@ func MachineSetToObjectsMapper(c client.Client, ro client.ObjectList, scheme *ru listOpts = append(listOpts, client.InNamespace(ms.Namespace)) } - list := &unstructured.UnstructuredList{} - list.SetGroupVersionKind(gvk) - if err := c.List(context.TODO(), list, listOpts...); err != nil { + objectList = objectList.DeepCopyObject().(client.ObjectList) + if err := c.List(ctx, objectList, listOpts...); err != nil { + return nil + } + + objects, err := meta.ExtractList(objectList) + if err != nil { return nil } results := []ctrl.Request{} - for _, obj := range list.Items { + for _, obj := range objects { + // Note: We don't check if the type cast succeeds as all items in an client.ObjectList + // are client.Objects. + o := obj.(client.Object) results = append(results, ctrl.Request{ - NamespacedName: client.ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()}, + NamespacedName: client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()}, }) } return results diff --git a/util/util_test.go b/util/util_test.go index bb5ea49d3aff..cf2616e97ed7 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -36,6 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/labels/format" ) func TestMachineToInfrastructureMapFunc(t *testing.T) { @@ -748,28 +749,26 @@ func TestClusterToObjectsMapper(t *testing.T) { func TestMachineDeploymentToObjectsMapper(t *testing.T) { g := NewWithT(t) - cluster := &clusterv1.MachineSet{ + machineDeployment := &clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-md-0-jgbsb-5684b789c9", + Name: "cluster-md-0", }, } table := []struct { name string objects []client.Object - input client.ObjectList output []ctrl.Request expectError bool }{ { - name: "should return a list of requests with labelled machines", - input: &clusterv1.MachineList{}, + name: "should return a list of requests with labelled machines", objects: []client.Object{ &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "machine1", Labels: map[string]string{ - clusterv1.MachineSetNameLabel: "cluster-md-0-jgbsb-5684b789c9", + clusterv1.MachineDeploymentNameLabel: machineDeployment.GetName(), }, }, }, @@ -777,7 +776,7 @@ func TestMachineDeploymentToObjectsMapper(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "machine2", Labels: map[string]string{ - clusterv1.MachineSetNameLabel: "cluster-md-0-jgbsb-5684b789c9", + clusterv1.MachineDeploymentNameLabel: machineDeployment.GetName(), }, }, }, @@ -790,7 +789,7 @@ func TestMachineDeploymentToObjectsMapper(t *testing.T) { } for _, tc := range table { - tc.objects = append(tc.objects, cluster) + tc.objects = append(tc.objects, machineDeployment) scheme := runtime.NewScheme() _ = clusterv1.AddToScheme(scheme) @@ -798,14 +797,102 @@ func TestMachineDeploymentToObjectsMapper(t *testing.T) { restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{clusterv1.GroupVersion}) // Add tc.input gvk to the restMapper. - gvk, err := apiutil.GVKForObject(tc.input, scheme) + gvk, err := apiutil.GVKForObject(&clusterv1.MachineList{}, scheme) g.Expect(err).ToNot(HaveOccurred()) restMapper.Add(gvk, meta.RESTScopeNamespace) client := fake.NewClientBuilder().WithObjects(tc.objects...).WithRESTMapper(restMapper).Build() - f, err := MachineSetToObjectsMapper(client, tc.input, scheme) + f, err := MachineDeploymentToObjectsMapper(client, &clusterv1.MachineList{}, scheme) g.Expect(err != nil, err).To(Equal(tc.expectError)) - g.Expect(f(ctx, cluster)).To(ConsistOf(tc.output)) + g.Expect(f(ctx, machineDeployment)).To(ConsistOf(tc.output)) + } +} + +func TestMachineSetToObjectsMapper(t *testing.T) { + g := NewWithT(t) + + table := []struct { + name string + machineSet *clusterv1.MachineSet + objects []client.Object + output []ctrl.Request + expectError bool + }{ + { + name: "should return a list of requests with labelled machines", + machineSet: &clusterv1.MachineSet{ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-ms-0", + }}, + objects: []client.Object{ + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0"), + }, + }, + }, + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine2", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0"), + }, + }, + }, + }, + output: []ctrl.Request{ + {NamespacedName: client.ObjectKey{Name: "machine1"}}, + {NamespacedName: client.ObjectKey{Name: "machine2"}}, + }, + }, + { + name: "should return a list of requests with labelled machines when the machineset name is hashed in the label", + machineSet: &clusterv1.MachineSet{ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-ms-0-looooooooooooooooooooooooooooooooooooooooooooong-name", + }}, + objects: []client.Object{ + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0-looooooooooooooooooooooooooooooooooooooooooooong-name"), + }, + }, + }, + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine2", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0-looooooooooooooooooooooooooooooooooooooooooooong-name"), + }, + }, + }, + }, + output: []ctrl.Request{ + {NamespacedName: client.ObjectKey{Name: "machine1"}}, + {NamespacedName: client.ObjectKey{Name: "machine2"}}, + }, + }, + } + + for _, tc := range table { + tc.objects = append(tc.objects, tc.machineSet) + + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{clusterv1.GroupVersion}) + + // Add tc.input gvk to the restMapper. + gvk, err := apiutil.GVKForObject(&clusterv1.MachineList{}, scheme) + g.Expect(err).ToNot(HaveOccurred()) + restMapper.Add(gvk, meta.RESTScopeNamespace) + + client := fake.NewClientBuilder().WithObjects(tc.objects...).WithRESTMapper(restMapper).Build() + f, err := MachineSetToObjectsMapper(client, &clusterv1.MachineList{}, scheme) + g.Expect(err != nil, err).To(Equal(tc.expectError)) + g.Expect(f(ctx, tc.machineSet)).To(ConsistOf(tc.output)) } }