Skip to content

Commit

Permalink
Fix up additonal log lines
Browse files Browse the repository at this point in the history
Signed-off-by: killianmuldoon <[email protected]>
  • Loading branch information
killianmuldoon committed Jul 27, 2022
1 parent 35d4dd7 commit de2a5e1
Show file tree
Hide file tree
Showing 25 changed files with 74 additions and 62 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,7 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
return ctrl.Result{}, err
}

log = log.WithValues("Cluster", klog.KObj(cluster))
ctx = ctrl.LoggerInto(ctx, log)
ctx = ctrl.LoggerInto(ctx, log.WithValues("cluster", klog.KObj(cluster)))

if annotations.IsPaused(cluster, config) {
log.Info("Reconciliation is paused for this object")
Expand Down
2 changes: 1 addition & 1 deletion cmd/clusterctl/client/cluster/mover.go
Original file line number Diff line number Diff line change
Expand Up @@ -540,7 +540,7 @@ func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool) err
setClusterPauseBackoff := newWriteBackoff()
for i := range clusters {
cluster := clusters[i]
log.V(5).Info("Set Cluster.Spec.Paused", "Paused", value, "Cluster", cluster.identity.Name, "Namespace", cluster.identity.Namespace)
log.V(5).Info("Set Cluster.Spec.Paused", "paused", value, "cluster", cluster.identity.Name, "namespace", cluster.identity.Namespace)

// Nb. The operation is wrapped in a retry loop to make setClusterPause more resilient to unexpected conditions.
if err := retryWithExponentialBackoff(setClusterPauseBackoff, func() error {
Expand Down
12 changes: 6 additions & 6 deletions controllers/remote/cluster_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,11 +236,11 @@ func (t *ClusterCacheTracker) deleteAccessor(cluster client.ObjectKey) {
return
}

t.log.V(2).Info("Deleting clusterAccessor", "Cluster", cluster.String())

t.log.V(4).Info("Stopping cache", "Cluster", cluster.String())
t.log.WithValues("cluster", klog.KRef(cluster.Namespace, cluster.Name))
t.log.V(2).Info("Deleting clusterAccessor")
t.log.V(4).Info("Stopping cache")
a.cache.Stop()
t.log.V(4).Info("Cache stopped", "Cluster", cluster.String())
t.log.V(4).Info("Cache stopped")

delete(t.clusterAccessors, cluster)
}
Expand Down Expand Up @@ -287,7 +287,7 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error
}

if a.watches.Has(input.Name) {
t.log.V(6).Info("Watch already exists", "Namespace", klog.KRef(input.Cluster.Namespace, ""), "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "Name", input.Name)
t.log.V(6).Info("Watch already exists", "namespace", klog.KRef(input.Cluster.Namespace, ""), "cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "Name", input.Name)
return nil
}

Expand Down Expand Up @@ -392,7 +392,7 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health
// NB. we are ignoring ErrWaitTimeout because this error happens when the channel is close, that in this case
// happens when the cache is explicitly stopped.
if err != nil && err != wait.ErrWaitTimeout {
t.log.Error(err, "Error health checking cluster", "Cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name))
t.log.Error(err, "Error health checking cluster", "cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name))
t.deleteAccessor(in.cluster)
}
}
4 changes: 2 additions & 2 deletions controlplane/kubeadm/internal/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
log.Info("Cluster Controller has not yet set OwnerRef")
return ctrl.Result{}, nil
}
log = log.WithValues("Cluster", klog.KObj(cluster))
log = log.WithValues("cluster", klog.KObj(cluster))
ctx = ctrl.LoggerInto(ctx, log)

if annotations.IsPaused(cluster, kcp) {
Expand Down Expand Up @@ -463,7 +463,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu
var errs []error
for i := range machinesToDelete {
m := machinesToDelete[i]
logger := log.WithValues("Machine", klog.KObj(m))
logger := log.WithValues("machine", klog.KObj(m))
if err := r.Client.Delete(ctx, machinesToDelete[i]); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "Failed to cleanup owned machine")
errs = append(errs, err)
Expand Down
2 changes: 1 addition & 1 deletion controlplane/kubeadm/internal/controllers/remediation.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C
if err := patchHelper.Patch(ctx, machineToBeRemediated, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
clusterv1.MachineOwnerRemediatedCondition,
}}); err != nil {
log.Error(err, "Failed to patch control plane Machine", "Machine", machineToBeRemediated.Name)
log.Error(err, "Failed to patch control plane Machine", "machine", machineToBeRemediated.Name)
if retErr == nil {
retErr = errors.Wrapf(err, "failed to patch control plane Machine %s", machineToBeRemediated.Name)
}
Expand Down
4 changes: 2 additions & 2 deletions controlplane/kubeadm/internal/controllers/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
return ctrl.Result{}, err
}

logger = logger.WithValues("Machine", klog.KObj(machineToDelete))
logger = logger.WithValues("machine", klog.KObj(machineToDelete))
if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "Failed to delete control plane machine")
r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleDown",
Expand Down Expand Up @@ -201,7 +201,7 @@ loopmachines:
}

for _, condition := range allMachineHealthConditions {
if err := preflightCheckCondition("Machine", machine, condition); err != nil {
if err := preflightCheckCondition("machine", machine, condition); err != nil {
machineErrors = append(machineErrors, err)
}
}
Expand Down
3 changes: 2 additions & 1 deletion controlplane/kubeadm/internal/controllers/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"context"

"github.com/pkg/errors"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand All @@ -33,7 +34,7 @@ import (
// updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the
// resource status subresourcs up-to-date.
func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error {
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster))

selector := collections.ControlPlaneSelectorForCluster(cluster.Name)
// Copy label selector to its status counterpart in string format.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ func (r *ClusterResourceSetReconciler) getClustersByClusterResourceSetSelector(c
// It applies resources best effort and continue on scenarios like: unsupported resource types, failure during creation, missing resources.
// TODO: If a resource already exists in the cluster but not applied by ClusterResourceSet, the resource will be updated ?
func (r *ClusterResourceSetReconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clusterv1.Cluster, clusterResourceSet *addonsv1.ClusterResourceSet) error {
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)

remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
if err != nil {
Expand Down
5 changes: 3 additions & 2 deletions exp/internal/controllers/machinepool_controller_noderef.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (

"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"

Expand All @@ -47,7 +48,7 @@ type getNodeReferencesResult struct {
}

func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
// Check that the MachinePool hasn't been deleted or in the process.
if !mp.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil
Expand All @@ -65,7 +66,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *
return ctrl.Result{}, nil
}

log = log.WithValues("Cluster", cluster.Name)
log = log.WithValues("cluster", klog.KObj(cluster))

// Check that the MachinePool has valid ProviderIDList.
if len(mp.Spec.ProviderIDList) == 0 && (mp.Spec.Replicas == nil || *mp.Spec.Replicas != 0) {
Expand Down
5 changes: 3 additions & 2 deletions exp/internal/controllers/machinepool_controller_phases.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/klog/v2"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
Expand Down Expand Up @@ -166,7 +167,7 @@ func (r *MachinePoolReconciler) reconcileExternal(ctx context.Context, cluster *

// reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a MachinePool.
func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *expv1.MachinePool) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster))

// Call generic external reconciler if we have an external reference.
var bootstrapConfig *unstructured.Unstructured
Expand Down Expand Up @@ -226,7 +227,7 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster

// reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a MachinePool.
func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name)
log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster))

// Call generic external reconciler.
infraReconcileResult, err := r.reconcileExternal(ctx, cluster, mp, &mp.Spec.Template.Spec.InfrastructureRef)
Expand Down
3 changes: 3 additions & 0 deletions internal/controllers/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
Expand Down Expand Up @@ -113,6 +114,8 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
return ctrl.Result{}, err
}

ctrl.LoggerInto(ctx, log.WithValues("cluster", klog.KObj(cluster)))

// Return early if the object or Cluster is paused.
if annotations.IsPaused(cluster, cluster) {
log.Info("Reconciliation is paused for this object")
Expand Down
14 changes: 7 additions & 7 deletions internal/controllers/machine/machine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
m.Spec.ClusterName, m.Name, m.Namespace)
}

log = log.WithValues("Cluster", klog.KObj(cluster))
log = log.WithValues("cluster", klog.KObj(cluster))
ctx = ctrl.LoggerInto(ctx, log)

// Return early if the object or Cluster is paused.
Expand Down Expand Up @@ -355,7 +355,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu
r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedWaitForVolumeDetach", "error wait for volume detach, node %q: %v", m.Status.NodeRef.Name, err)
return ctrl.Result{}, err
}
log.Info("Waiting for node volumes to be detached", "Node", klog.KRef("", m.Status.NodeRef.Name))
log.Info("Waiting for node volumes to be detached", "node", klog.KRef("", m.Status.NodeRef.Name))
return ctrl.Result{}, nil
}
conditions.MarkTrue(m, clusterv1.VolumeDetachSucceededCondition)
Expand Down Expand Up @@ -395,7 +395,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu
// We only delete the node after the underlying infrastructure is gone.
// https://github.com/kubernetes-sigs/cluster-api/issues/2565
if isDeleteNodeAllowed {
log.Info("Deleting node", "Node", klog.KRef("", m.Status.NodeRef.Name))
log.Info("Deleting node", "node", klog.KRef("", m.Status.NodeRef.Name))

var deleteNodeErr error
waitErr := wait.PollImmediate(2*time.Second, r.nodeDeletionRetryTimeout, func() (bool, error) {
Expand All @@ -405,7 +405,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu
return true, nil
})
if waitErr != nil {
log.Error(deleteNodeErr, "Timed out deleting node", "Node", klog.KRef("", m.Status.NodeRef.Name))
log.Error(deleteNodeErr, "Timed out deleting node", "node", klog.KRef("", m.Status.NodeRef.Name))
conditions.MarkFalse(m, clusterv1.MachineNodeHealthyCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "")
r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDeleteNode", "error deleting Machine's node: %v", deleteNodeErr)

Expand Down Expand Up @@ -513,7 +513,7 @@ func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1
}

func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "Node", klog.KRef("", nodeName))
log := ctrl.LoggerFrom(ctx, "node", klog.KRef("", nodeName))

restConfig, err := remote.RESTConfig(ctx, controllerName, r.Client, util.ObjectKey(cluster))
if err != nil {
Expand Down Expand Up @@ -552,7 +552,7 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster,
verbStr = "Evicted"
}
log.Info(fmt.Sprintf("%s pod from Node", verbStr),
"Pod", klog.KObj(pod))
"pod", klog.KObj(pod))
},
Out: writer{log.Info},
ErrOut: writer{func(msg string, keysAndValues ...interface{}) {
Expand Down Expand Up @@ -587,7 +587,7 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster,
// because if the node is deleted before detach success, then the underline VMDK will be deleted together with the Machine
// so after node draining we need to check if all volumes are detached before deleting the node.
func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (bool, error) {
log := ctrl.LoggerFrom(ctx, "Node", klog.KRef("", nodeName))
log := ctrl.LoggerFrom(ctx, "node", klog.KRef("", nodeName))

remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion internal/controllers/machine/machine_controller_noderef.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ func (r *Reconciler) getNode(ctx context.Context, c client.Reader, providerID *n
for key, node := range nl.Items {
nodeProviderID, err := noderefutil.NewProviderID(node.Spec.ProviderID)
if err != nil {
log.Error(err, "Failed to parse ProviderID", "Node", klog.KRef("", nl.Items[key].GetName()))
log.Error(err, "Failed to parse ProviderID", "node", klog.KRef("", nl.Items[key].GetName()))
continue
}

Expand Down
2 changes: 1 addition & 1 deletion internal/controllers/machine/machine_controller_phases.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func (r *Reconciler) reconcilePhase(_ context.Context, m *clusterv1.Machine) {

// reconcileExternal handles generic unstructured objects referenced by a Machine.
func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine, ref *corev1.ObjectReference) (external.ReconcileOutput, error) {
log := ctrl.LoggerFrom(ctx, "Cluster", klog.KObj(cluster))
log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster))

if err := utilconversion.UpdateReferenceAPIContract(ctx, r.Client, r.APIReader, ref); err != nil {
return external.ReconcileOutput{}, err
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,15 +119,15 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
return ctrl.Result{}, err
}

log = log.WithValues("MachineDeployment", klog.KObj(deployment))
log = log.WithValues("machineDeployment", klog.KObj(deployment))
ctx = ctrl.LoggerInto(ctx, log)

cluster, err := util.GetClusterByName(ctx, r.Client, deployment.Namespace, deployment.Spec.ClusterName)
if err != nil {
return ctrl.Result{}, err
}

log = log.WithValues("Cluster", klog.KObj(cluster))
log = log.WithValues("cluster", klog.KObj(cluster))
ctx = ctrl.LoggerInto(ctx, log)

// Return early if the object or Cluster is paused.
Expand Down Expand Up @@ -264,33 +264,33 @@ func (r *Reconciler) getMachineSetsForDeployment(ctx context.Context, d *cluster
filtered := make([]*clusterv1.MachineSet, 0, len(machineSets.Items))
for idx := range machineSets.Items {
ms := &machineSets.Items[idx]

log.WithValues("machineSet", klog.KObj(ms))
selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector)
if err != nil {
log.Error(err, "Skipping MachineSet, failed to get label selector from spec selector", "MachineSet", klog.KObj(ms))
log.Error(err, "Skipping MachineSet, failed to get label selector from spec selector")
continue
}

// If a MachineDeployment with a nil or empty selector creeps in, it should match nothing, not everything.
if selector.Empty() {
log.Info("Skipping MachineSet as the selector is empty", "MachineSet", klog.KObj(ms))
log.Info("Skipping MachineSet as the selector is empty")
continue
}

// Skip this MachineSet unless either selector matches or it has a controller ref pointing to this MachineDeployment
if !selector.Matches(labels.Set(ms.Labels)) && !metav1.IsControlledBy(ms, d) {
log.V(4).Info("Skipping MachineSet, label mismatch", "MachineSet", klog.KObj(ms))
log.V(4).Info("Skipping MachineSet, label mismatch")
continue
}

// Attempt to adopt machine if it meets previous conditions and it has no controller references.
if metav1.GetControllerOf(ms) == nil {
if err := r.adoptOrphan(ctx, d, ms); err != nil {
log.Error(err, "Failed to adopt MachineSet into MachineDeployment", "machineset", ms.Name)
log.Error(err, "Failed to adopt MachineSet into MachineDeployment")
r.recorder.Eventf(d, corev1.EventTypeWarning, "FailedAdopt", "Failed to adopt MachineSet %q: %v", ms.Name, err)
continue
}
log.Info("Adopted MachineSet into MachineDeployment", "MachineSet", klog.KObj(ms))
log.Info("Adopted MachineSet into MachineDeployment")
r.recorder.Eventf(d, corev1.EventTypeNormal, "SuccessfulAdopt", "Adopted MachineSet %q", ms.Name)
}

Expand Down
2 changes: 1 addition & 1 deletion internal/controllers/machinedeployment/mdutil/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ func getMaxReplicasAnnotation(ms *clusterv1.MachineSet, logger logr.Logger) (int
}

func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger logr.Logger) (int32, bool) {
logger = logger.WithValues("MachineSet", klog.KObj(ms))
logger = logger.WithValues("machineSet", klog.KObj(ms))

annotationValue, ok := ms.Annotations[annotationKey]
if !ok {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
return ctrl.Result{}, err
}

log = log.WithValues("Cluster", klog.KRef(m.Namespace, m.Spec.ClusterName))
log = log.WithValues("cluster", klog.KRef(m.Namespace, m.Spec.ClusterName))
ctx = ctrl.LoggerInto(ctx, log)

cluster, err := util.GetClusterByName(ctx, r.Client, m.Namespace, m.Spec.ClusterName)
Expand Down Expand Up @@ -357,7 +357,7 @@ func (r *Reconciler) patchHealthyTargets(ctx context.Context, logger logr.Logger
}

if err := t.patchHelper.Patch(ctx, t.Machine); err != nil {
logger.Error(err, "failed to patch healthy machine status for machine", "Machine", t.Machine.GetName())
logger.Error(err, "failed to patch healthy machine status for machine", "machine", t.Machine.GetName())
errList = append(errList, errors.Wrapf(err, "failed to patch healthy machine status for machine: %s/%s", t.Machine.Namespace, t.Machine.Name))
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand Down Expand Up @@ -199,9 +200,10 @@ func (r *Reconciler) getTargetsFromMHC(ctx context.Context, logger logr.Logger,

targets := []healthCheckTarget{}
for k := range machines {
logger.WithValues("machine", klog.KObj(&machines[k]))
skip, reason := shouldSkipRemediation(&machines[k])
if skip {
logger.Info("skipping remediation", "Machine", machines[k].Name, "Reason", reason)
logger.Info("skipping remediation", "reason", reason)
continue
}

Expand Down
Loading

0 comments on commit de2a5e1

Please sign in to comment.