From 968edf8b80efa4d8e0bb3456746b5aec512bc4a3 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Fri, 11 Nov 2022 15:33:52 +0100 Subject: [PATCH] logging: Avoid adding multiple objects to the same logger in for loops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan Büringer buringerst@vmware.com --- controlplane/kubeadm/internal/controllers/controller.go | 2 +- internal/controllers/machine/machine_controller.go | 5 +---- .../machinedeployment_rollout_ondelete.go | 4 ++-- internal/controllers/machineset/machineset_controller.go | 8 +++++--- internal/controllers/topology/cluster/patches/engine.go | 6 +++--- test/extension/handlers/topologymutation/handler.go | 2 ++ 6 files changed, 14 insertions(+), 13 deletions(-) diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 1d9d1df14bf0..a2cb30e99109 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -600,7 +600,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileCertificateExpiries(ctx context } for _, m := range machines { - log = log.WithValues("Machine", klog.KObj(m)) + log := log.WithValues("Machine", klog.KObj(m)) kubeadmConfig, ok := controlPlane.GetKubeadmConfig(m.Name) if !ok { diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 52cdbd763745..b4f66c0472ee 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -253,11 +253,8 @@ func patchMachine(ctx context.Context, patchHelper *patch.Helper, machine *clust } func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx) - if err := r.watchClusterNodes(ctx, cluster); err != nil { - log.Error(err, "error watching nodes on target cluster") - return ctrl.Result{}, err + return ctrl.Result{}, errors.Wrapf(err, "error watching nodes on target cluster") } // If the Machine belongs to a cluster, add an owner reference. diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go index 7f3db87750be..55be548ad7b4 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go @@ -85,7 +85,7 @@ func (r *Reconciler) reconcileOldMachineSetsOnDelete(ctx context.Context, oldMSs totalReplicas := mdutil.GetReplicaCountForMachineSets(allMSs) scaleDownAmount := totalReplicas - *deployment.Spec.Replicas for _, oldMS := range oldMSs { - log = log.WithValues("MachineSet", klog.KObj(oldMS)) + log := log.WithValues("MachineSet", klog.KObj(oldMS)) if oldMS.Spec.Replicas == nil || *oldMS.Spec.Replicas <= 0 { log.V(4).Info("fully scaled down") continue @@ -138,7 +138,7 @@ func (r *Reconciler) reconcileOldMachineSetsOnDelete(ctx context.Context, oldMSs } log.V(4).Info("Finished reconcile of Old MachineSets to account for deleted machines. Now analyzing if there's more potential to scale down") for _, oldMS := range oldMSs { - log = log.WithValues("MachineSet", klog.KObj(oldMS)) + log := log.WithValues("MachineSet", klog.KObj(oldMS)) if scaleDownAmount <= 0 { break } diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 88dc22958459..5998452ec922 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -262,7 +262,7 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, filteredMachines := make([]*clusterv1.Machine, 0, len(allMachines.Items)) for idx := range allMachines.Items { machine := &allMachines.Items[idx] - log = log.WithValues("Machine", klog.KObj(machine)) + log := log.WithValues("Machine", klog.KObj(machine)) if shouldExcludeMachine(machineSet, machine) { continue } @@ -283,7 +283,7 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, var errs []error for _, machine := range filteredMachines { - log = log.WithValues("Machine", klog.KObj(machine)) + log := log.WithValues("Machine", klog.KObj(machine)) // filteredMachines contains machines in deleting status to calculate correct status. // skip remediation for those in deleting status. if !machine.DeletionTimestamp.IsZero() { @@ -369,6 +369,8 @@ func (r *Reconciler) syncReplicas(ctx context.Context, ms *clusterv1.MachineSet, ) for i := 0; i < diff; i++ { + // Create a new logger so the global logger is not modified. + log := log machine := r.getNewMachine(ms) // Clone and set the infrastructure and bootstrap references. @@ -665,7 +667,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, cluster *clusterv1.Cluste templateLabel := labels.Set(ms.Spec.Template.Labels).AsSelectorPreValidated() for _, machine := range filteredMachines { - log = log.WithValues("Machine", klog.KObj(machine)) + log := log.WithValues("Machine", klog.KObj(machine)) if templateLabel.Matches(labels.Set(machine.Labels)) { fullyLabeledReplicasCount++ diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index 0b29481bf8a8..586f37dad6a7 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -78,7 +78,7 @@ func (e *engine) Apply(ctx context.Context, blueprint *scope.ClusterBlueprint, d // respecting the order in which they are defined. for i := range blueprint.ClusterClass.Spec.Patches { clusterClassPatch := blueprint.ClusterClass.Spec.Patches[i] - ctx, log = log.WithValues("patch", clusterClassPatch.Name).Into(ctx) + ctx, log := log.WithValues("patch", clusterClassPatch.Name).Into(ctx) log.V(5).Infof("Applying patch to templates") @@ -115,7 +115,7 @@ func (e *engine) Apply(ctx context.Context, blueprint *scope.ClusterBlueprint, d continue } - ctx, log = log.WithValues("patch", clusterClassPatch.Name).Into(ctx) + ctx, log := log.WithValues("patch", clusterClassPatch.Name).Into(ctx) log.V(5).Infof("Validating topology") @@ -283,7 +283,7 @@ func applyPatchesToRequest(ctx context.Context, req *runtimehooksv1.GeneratePatc log := tlog.LoggerFrom(ctx) for _, patch := range resp.Items { - log = log.WithValues("uid", patch.UID) + log := log.WithValues("uid", patch.UID) // Get the request item the patch belongs to. requestItem := getRequestItemByUID(req, patch.UID) diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go index 842b6b5f955d..e2bd7e0600ab 100644 --- a/test/extension/handlers/topologymutation/handler.go +++ b/test/extension/handlers/topologymutation/handler.go @@ -81,6 +81,8 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo // easier to read and less error prone than using unstructured or working with raw json/yaml. // IMPORTANT: by unit testing this func/nested func properly, it is possible to prevent unexpected rollouts when patches are modified. topologymutation.WalkTemplates(ctx, h.decoder, req, resp, func(ctx context.Context, obj runtime.Object, variables map[string]apiextensionsv1.JSON, holderRef runtimehooksv1.HolderReference) error { + log := ctrl.LoggerFrom(ctx) + switch obj := obj.(type) { case *infrav1.DockerClusterTemplate: if err := patchDockerClusterTemplate(ctx, obj, variables); err != nil {