Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🐛 logging: Avoid adding multiple objects to the same logger in for loops #7534

Merged
merged 1 commit into from
Nov 11, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion controlplane/kubeadm/internal/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -600,7 +600,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileCertificateExpiries(ctx context
}

for _, m := range machines {
log = log.WithValues("Machine", klog.KObj(m))
log := log.WithValues("Machine", klog.KObj(m))

kubeadmConfig, ok := controlPlane.GetKubeadmConfig(m.Name)
if !ok {
Expand Down
5 changes: 1 addition & 4 deletions internal/controllers/machine/machine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -253,11 +253,8 @@ func patchMachine(ctx context.Context, patchHelper *patch.Helper, machine *clust
}

func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx)

if err := r.watchClusterNodes(ctx, cluster); err != nil {
log.Error(err, "error watching nodes on target cluster")
return ctrl.Result{}, err
return ctrl.Result{}, errors.Wrapf(err, "error watching nodes on target cluster")
}

// If the Machine belongs to a cluster, add an owner reference.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func (r *Reconciler) reconcileOldMachineSetsOnDelete(ctx context.Context, oldMSs
totalReplicas := mdutil.GetReplicaCountForMachineSets(allMSs)
scaleDownAmount := totalReplicas - *deployment.Spec.Replicas
for _, oldMS := range oldMSs {
log = log.WithValues("MachineSet", klog.KObj(oldMS))
log := log.WithValues("MachineSet", klog.KObj(oldMS))
if oldMS.Spec.Replicas == nil || *oldMS.Spec.Replicas <= 0 {
log.V(4).Info("fully scaled down")
continue
Expand Down Expand Up @@ -138,7 +138,7 @@ func (r *Reconciler) reconcileOldMachineSetsOnDelete(ctx context.Context, oldMSs
}
log.V(4).Info("Finished reconcile of Old MachineSets to account for deleted machines. Now analyzing if there's more potential to scale down")
for _, oldMS := range oldMSs {
log = log.WithValues("MachineSet", klog.KObj(oldMS))
log := log.WithValues("MachineSet", klog.KObj(oldMS))
if scaleDownAmount <= 0 {
break
}
Expand Down
8 changes: 5 additions & 3 deletions internal/controllers/machineset/machineset_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster,
filteredMachines := make([]*clusterv1.Machine, 0, len(allMachines.Items))
for idx := range allMachines.Items {
machine := &allMachines.Items[idx]
log = log.WithValues("Machine", klog.KObj(machine))
log := log.WithValues("Machine", klog.KObj(machine))
if shouldExcludeMachine(machineSet, machine) {
continue
}
Expand All @@ -283,7 +283,7 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster,

var errs []error
for _, machine := range filteredMachines {
log = log.WithValues("Machine", klog.KObj(machine))
log := log.WithValues("Machine", klog.KObj(machine))
// filteredMachines contains machines in deleting status to calculate correct status.
// skip remediation for those in deleting status.
if !machine.DeletionTimestamp.IsZero() {
Expand Down Expand Up @@ -369,6 +369,8 @@ func (r *Reconciler) syncReplicas(ctx context.Context, ms *clusterv1.MachineSet,
)

for i := 0; i < diff; i++ {
// Create a new logger so the global logger is not modified.
log := log
machine := r.getNewMachine(ms)

// Clone and set the infrastructure and bootstrap references.
Expand Down Expand Up @@ -665,7 +667,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, cluster *clusterv1.Cluste
templateLabel := labels.Set(ms.Spec.Template.Labels).AsSelectorPreValidated()

for _, machine := range filteredMachines {
log = log.WithValues("Machine", klog.KObj(machine))
log := log.WithValues("Machine", klog.KObj(machine))

if templateLabel.Matches(labels.Set(machine.Labels)) {
fullyLabeledReplicasCount++
Expand Down
6 changes: 3 additions & 3 deletions internal/controllers/topology/cluster/patches/engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func (e *engine) Apply(ctx context.Context, blueprint *scope.ClusterBlueprint, d
// respecting the order in which they are defined.
for i := range blueprint.ClusterClass.Spec.Patches {
clusterClassPatch := blueprint.ClusterClass.Spec.Patches[i]
ctx, log = log.WithValues("patch", clusterClassPatch.Name).Into(ctx)
ctx, log := log.WithValues("patch", clusterClassPatch.Name).Into(ctx)

log.V(5).Infof("Applying patch to templates")

Expand Down Expand Up @@ -115,7 +115,7 @@ func (e *engine) Apply(ctx context.Context, blueprint *scope.ClusterBlueprint, d
continue
}

ctx, log = log.WithValues("patch", clusterClassPatch.Name).Into(ctx)
ctx, log := log.WithValues("patch", clusterClassPatch.Name).Into(ctx)

log.V(5).Infof("Validating topology")

Expand Down Expand Up @@ -283,7 +283,7 @@ func applyPatchesToRequest(ctx context.Context, req *runtimehooksv1.GeneratePatc
log := tlog.LoggerFrom(ctx)

for _, patch := range resp.Items {
log = log.WithValues("uid", patch.UID)
log := log.WithValues("uid", patch.UID)

// Get the request item the patch belongs to.
requestItem := getRequestItemByUID(req, patch.UID)
Expand Down
2 changes: 2 additions & 0 deletions test/extension/handlers/topologymutation/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo
// easier to read and less error prone than using unstructured or working with raw json/yaml.
// IMPORTANT: by unit testing this func/nested func properly, it is possible to prevent unexpected rollouts when patches are modified.
topologymutation.WalkTemplates(ctx, h.decoder, req, resp, func(ctx context.Context, obj runtime.Object, variables map[string]apiextensionsv1.JSON, holderRef runtimehooksv1.HolderReference) error {
log := ctrl.LoggerFrom(ctx)

switch obj := obj.(type) {
case *infrav1.DockerClusterTemplate:
if err := patchDockerClusterTemplate(ctx, obj, variables); err != nil {
Expand Down