From 35d4dd7a52bac995795598869ee9d7e4d8b4e366 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Tue, 22 Feb 2022 11:14:34 +0100 Subject: [PATCH] logging: cleanup k/v pairs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan Büringer buringerst@vmware.com --- .gitignore | 1 + .../controllers/kubeadmconfig_controller.go | 6 +++- controllers/remote/cluster_cache.go | 11 +++--- .../kubeadm/internal/control_plane.go | 6 ---- .../internal/controllers/controller.go | 12 ++++--- .../internal/controllers/remediation.go | 2 +- .../kubeadm/internal/controllers/scale.go | 15 ++++---- .../kubeadm/internal/controllers/status.go | 2 +- .../kubeadm/internal/controllers/upgrade.go | 2 +- .../clusterresourceset_controller.go | 2 +- .../machinepool_controller_noderef.go | 4 +-- .../machinepool_controller_phases.go | 4 +-- hack/observability/promtail/values.yaml | 14 ++++++++ .../controllers/cluster/cluster_controller.go | 2 +- .../clusterclass/clusterclass_controller.go | 2 +- .../controllers/machine/machine_controller.go | 36 +++++++++++-------- .../machine/machine_controller_noderef.go | 6 ++-- .../machine/machine_controller_phases.go | 7 ++-- .../machinedeployment_controller.go | 15 +++++--- .../machinedeployment/mdutil/util.go | 7 ++-- .../machinehealthcheck_controller.go | 5 +-- .../machinehealthcheck_targets.go | 2 +- .../machineset/machineset_controller.go | 29 +++++++++------ .../topology/cluster/cluster_controller.go | 4 +++ .../dockermachinepool_controller.go | 2 +- .../controllers/dockercluster_controller.go | 4 ++- .../controllers/dockermachine_controller.go | 9 ++--- .../docker/internal/docker/loadbalancer.go | 2 +- util/predicates/cluster_predicates.go | 12 +++---- 29 files changed, 137 insertions(+), 88 deletions(-) diff --git a/.gitignore b/.gitignore index ee8689ed58c8..5fa4decc8123 100644 --- a/.gitignore +++ b/.gitignore @@ -67,6 +67,7 @@ tilt-settings.json tilt-settings.yaml tilt_modules .tiltbuild +tilt_modules # User-supplied clusterctl hacks settings clusterctl-settings.json diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 0a5fd52b2cdb..fa423fc4f854 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -172,7 +173,7 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques if configOwner == nil { return ctrl.Result{}, nil } - log = log.WithValues("kind", configOwner.GetKind(), "version", configOwner.GetResourceVersion(), "name", configOwner.GetName()) + log = log.WithValues(configOwner.GetKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion()) // Lookup the cluster the config owner is associated with cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName()) @@ -190,6 +191,9 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + if annotations.IsPaused(cluster, config) { log.Info("Reconciliation is paused for this object") return ctrl.Result{}, nil diff --git a/controllers/remote/cluster_cache.go b/controllers/remote/cluster_cache.go index 92f141855e54..221369127406 100644 --- a/controllers/remote/cluster_cache.go +++ b/controllers/remote/cluster_cache.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -235,11 +236,11 @@ func (t *ClusterCacheTracker) deleteAccessor(cluster client.ObjectKey) { return } - t.log.V(2).Info("Deleting clusterAccessor", "cluster", cluster.String()) + t.log.V(2).Info("Deleting clusterAccessor", "Cluster", cluster.String()) - t.log.V(4).Info("Stopping cache", "cluster", cluster.String()) + t.log.V(4).Info("Stopping cache", "Cluster", cluster.String()) a.cache.Stop() - t.log.V(4).Info("Cache stopped", "cluster", cluster.String()) + t.log.V(4).Info("Cache stopped", "Cluster", cluster.String()) delete(t.clusterAccessors, cluster) } @@ -286,7 +287,7 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error } if a.watches.Has(input.Name) { - t.log.V(6).Info("Watch already exists", "namespace", input.Cluster.Namespace, "cluster", input.Cluster.Name, "name", input.Name) + t.log.V(6).Info("Watch already exists", "Namespace", klog.KRef(input.Cluster.Namespace, ""), "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "Name", input.Name) return nil } @@ -391,7 +392,7 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health // NB. we are ignoring ErrWaitTimeout because this error happens when the channel is close, that in this case // happens when the cache is explicitly stopped. if err != nil && err != wait.ErrWaitTimeout { - t.log.Error(err, "Error health checking cluster", "cluster", in.cluster.String()) + t.log.Error(err, "Error health checking cluster", "Cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name)) t.deleteAccessor(in.cluster) } } diff --git a/controlplane/kubeadm/internal/control_plane.go b/controlplane/kubeadm/internal/control_plane.go index 7293a5074ed6..e649959c9925 100644 --- a/controlplane/kubeadm/internal/control_plane.go +++ b/controlplane/kubeadm/internal/control_plane.go @@ -19,7 +19,6 @@ package internal import ( "context" - "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -90,11 +89,6 @@ func NewControlPlane(ctx context.Context, client client.Client, cluster *cluster }, nil } -// Logger returns a logger with useful context. -func (c *ControlPlane) Logger() logr.Logger { - return Log.WithValues("namespace", c.KCP.Namespace, "name", c.KCP.Name, "cluster-name", c.Cluster.Name) -} - // FailureDomains returns a slice of failure domain objects synced from the infrastructure provider into Cluster.Status. func (c *ControlPlane) FailureDomains() clusterv1.FailureDomains { if c.Cluster.Status.FailureDomains == nil { diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 081c57de3026..f186a90db096 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -142,7 +143,8 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. log.Info("Cluster Controller has not yet set OwnerRef") return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) if annotations.IsPaused(cluster, kcp) { log.Info("Reconciliation is paused for this object") @@ -241,7 +243,7 @@ func patchKubeadmControlPlane(ctx context.Context, patchHelper *patch.Helper, kc // reconcile handles KubeadmControlPlane reconciliation. func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (res ctrl.Result, reterr error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) log.Info("Reconcile KubeadmControlPlane") // Make sure to reconcile the external infrastructure reference. @@ -407,7 +409,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * // The implementation does not take non-control plane workloads into consideration. This may or may not change in the future. // Please see https://github.com/kubernetes-sigs/cluster-api/issues/2064. func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) log.Info("Reconcile KubeadmControlPlane deletion") // Gets all machines, not just control plane machines. @@ -461,7 +463,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu var errs []error for i := range machinesToDelete { m := machinesToDelete[i] - logger := log.WithValues("machine", m) + logger := log.WithValues("Machine", klog.KObj(m)) if err := r.Client.Delete(ctx, machinesToDelete[i]); err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to cleanup owned machine") errs = append(errs, err) @@ -525,7 +527,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileControlPlaneConditions(ctx cont // // NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneConditions before this. func (r *KubeadmControlPlaneReconciler) reconcileEtcdMembers(ctx context.Context, controlPlane *internal.ControlPlane) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", controlPlane.Cluster.Name) + log := ctrl.LoggerFrom(ctx) // If etcd is not managed by KCP this is a no-op. if !controlPlane.IsEtcdManaged() { diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index 38f1e23091c3..b7a8946596b4 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -97,7 +97,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C if err := patchHelper.Patch(ctx, machineToBeRemediated, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ clusterv1.MachineOwnerRemediatedCondition, }}); err != nil { - log.Error(err, "Failed to patch control plane Machine", "machine", machineToBeRemediated.Name) + log.Error(err, "Failed to patch control plane Machine", "Machine", machineToBeRemediated.Name) if retErr == nil { retErr = errors.Wrapf(err, "failed to patch control plane Machine %s", machineToBeRemediated.Name) } diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index 0fcf1d022d38..6d30f0938622 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -25,6 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -36,7 +37,7 @@ import ( ) func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) { - logger := controlPlane.Logger() + logger := ctrl.LoggerFrom(ctx) // Perform an uncached read of all the owned machines. This check is in place to make sure // that the controller cache is not misbehaving and we end up initializing the cluster more than once. @@ -65,7 +66,7 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte } func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) { - logger := controlPlane.Logger() + logger := ctrl.LoggerFrom(ctx) // Run preflight checks to ensure that the control plane is stable before proceeding with a scale up/scale down operation; if not, wait. if result, err := r.preflightChecks(ctx, controlPlane); err != nil || !result.IsZero() { @@ -92,7 +93,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( controlPlane *internal.ControlPlane, outdatedMachines collections.Machines, ) (ctrl.Result, error) { - logger := controlPlane.Logger() + logger := ctrl.LoggerFrom(ctx) // Pick the Machine that we should scale down. machineToDelete, err := selectMachineForScaleDown(controlPlane, outdatedMachines) @@ -140,7 +141,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( return ctrl.Result{}, err } - logger = logger.WithValues("machine", machineToDelete.Name) + logger = logger.WithValues("Machine", klog.KObj(machineToDelete)) if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to delete control plane machine") r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleDown", @@ -160,8 +161,8 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( // If the control plane is not passing preflight checks, it requeue. // // NOTE: this func uses KCP conditions, it is required to call reconcileControlPlaneConditions before this. -func (r *KubeadmControlPlaneReconciler) preflightChecks(_ context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) (ctrl.Result, error) { //nolint:unparam - logger := controlPlane.Logger() +func (r *KubeadmControlPlaneReconciler) preflightChecks(ctx context.Context, controlPlane *internal.ControlPlane, excludeFor ...*clusterv1.Machine) (ctrl.Result, error) { //nolint:unparam + logger := ctrl.LoggerFrom(ctx) // If there is no KCP-owned control-plane machines, then control-plane has not been initialized yet, // so it is considered ok to proceed. @@ -200,7 +201,7 @@ loopmachines: } for _, condition := range allMachineHealthConditions { - if err := preflightCheckCondition("machine", machine, condition); err != nil { + if err := preflightCheckCondition("Machine", machine, condition); err != nil { machineErrors = append(machineErrors, err) } } diff --git a/controlplane/kubeadm/internal/controllers/status.go b/controlplane/kubeadm/internal/controllers/status.go index 1617827ff3e5..c57b1e629486 100644 --- a/controlplane/kubeadm/internal/controllers/status.go +++ b/controlplane/kubeadm/internal/controllers/status.go @@ -33,7 +33,7 @@ import ( // updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the // resource status subresourcs up-to-date. func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name) selector := collections.ControlPlaneSelectorForCluster(cluster.Name) // Copy label selector to its status counterpart in string format. diff --git a/controlplane/kubeadm/internal/controllers/upgrade.go b/controlplane/kubeadm/internal/controllers/upgrade.go index 9913bb6ad576..df268b5b9fee 100644 --- a/controlplane/kubeadm/internal/controllers/upgrade.go +++ b/controlplane/kubeadm/internal/controllers/upgrade.go @@ -37,7 +37,7 @@ func (r *KubeadmControlPlaneReconciler) upgradeControlPlane( controlPlane *internal.ControlPlane, machinesRequireUpgrade collections.Machines, ) (ctrl.Result, error) { - logger := controlPlane.Logger() + logger := ctrl.LoggerFrom(ctx) if kcp.Spec.RolloutStrategy == nil || kcp.Spec.RolloutStrategy.RollingUpdate == nil { return ctrl.Result{}, errors.New("rolloutStrategy is not set") diff --git a/exp/addons/internal/controllers/clusterresourceset_controller.go b/exp/addons/internal/controllers/clusterresourceset_controller.go index 0e1f542cf273..4d1c73271047 100644 --- a/exp/addons/internal/controllers/clusterresourceset_controller.go +++ b/exp/addons/internal/controllers/clusterresourceset_controller.go @@ -236,7 +236,7 @@ func (r *ClusterResourceSetReconciler) getClustersByClusterResourceSetSelector(c // It applies resources best effort and continue on scenarios like: unsupported resource types, failure during creation, missing resources. // TODO: If a resource already exists in the cluster but not applied by ClusterResourceSet, the resource will be updated ? func (r *ClusterResourceSetReconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clusterv1.Cluster, clusterResourceSet *addonsv1.ClusterResourceSet) error { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name) remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { diff --git a/exp/internal/controllers/machinepool_controller_noderef.go b/exp/internal/controllers/machinepool_controller_noderef.go index 0ee7260a6dba..630ddf9bca94 100644 --- a/exp/internal/controllers/machinepool_controller_noderef.go +++ b/exp/internal/controllers/machinepool_controller_noderef.go @@ -47,7 +47,7 @@ type getNodeReferencesResult struct { } func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name) // Check that the MachinePool hasn't been deleted or in the process. if !mp.DeletionTimestamp.IsZero() { return ctrl.Result{}, nil @@ -65,7 +65,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster * return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) + log = log.WithValues("Cluster", cluster.Name) // Check that the MachinePool has valid ProviderIDList. if len(mp.Spec.ProviderIDList) == 0 && (mp.Spec.Replicas == nil || *mp.Spec.Replicas != 0) { diff --git a/exp/internal/controllers/machinepool_controller_phases.go b/exp/internal/controllers/machinepool_controller_phases.go index ea0748d8cc50..33c42ea003bc 100644 --- a/exp/internal/controllers/machinepool_controller_phases.go +++ b/exp/internal/controllers/machinepool_controller_phases.go @@ -166,7 +166,7 @@ func (r *MachinePoolReconciler) reconcileExternal(ctx context.Context, cluster * // reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a MachinePool. func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *expv1.MachinePool) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name) // Call generic external reconciler if we have an external reference. var bootstrapConfig *unstructured.Unstructured @@ -226,7 +226,7 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster // reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a MachinePool. func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx, "Cluster", cluster.Name) // Call generic external reconciler. infraReconcileResult, err := r.reconcileExternal(ctx, cluster, mp, &mp.Spec.Template.Spec.InfrastructureRef) diff --git a/hack/observability/promtail/values.yaml b/hack/observability/promtail/values.yaml index cdc51bf0c0f7..38761da53323 100644 --- a/hack/observability/promtail/values.yaml +++ b/hack/observability/promtail/values.yaml @@ -3,3 +3,17 @@ config: # publish data to loki lokiAddress: http://loki:3100/loki/api/v1/push + + snippets: + pipelineStages: + # Parse cluster and machine to make them available as labels. + - cri: { } + - json: + expressions: + controller: + cluster: join('/',[cluster.namespace,cluster.name]) + machine: join('/',[machine.namespace,machine.name]) + - labels: + controller: + cluster: + machine: diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 856a63b719a5..fb70a9ee0d62 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -179,7 +179,7 @@ func patchCluster(ctx context.Context, patchHelper *patch.Helper, cluster *clust // reconcile handles cluster reconciliation. func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) if cluster.Spec.Topology != nil { if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.InfrastructureRef == nil { diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index b9798e4e7592..2a865f4ed984 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -59,7 +59,7 @@ type Reconciler struct { func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { err := ctrl.NewControllerManagedBy(mgr). For(&clusterv1.ClusterClass{}). - Named("topology/clusterclass"). + Named("clusterclass"). WithOptions(options). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). Complete(r) diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 0ed6d12c5f8b..78da82018e86 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" kubedrain "k8s.io/kubectl/pkg/drain" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -157,6 +158,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re m.Spec.ClusterName, m.Name, m.Namespace) } + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, m) { log.Info("Reconciliation is paused for this object") @@ -285,15 +289,19 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, return res, kerrors.NewAggregate(errs) } -func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) +func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) { //nolint:gocyclo + log := ctrl.LoggerFrom(ctx) err := r.isDeleteNodeAllowed(ctx, cluster, m) isDeleteNodeAllowed := err == nil if err != nil { switch err { case errNoControlPlaneNodes, errLastControlPlaneNode, errNilNodeRef, errClusterIsBeingDeleted, errControlPlaneIsBeingDeleted: - log.Info("Deleting Kubernetes Node associated with Machine is not allowed", "node", m.Status.NodeRef, "cause", err.Error()) + var nodeName = "" + if m.Status.NodeRef != nil { + nodeName = m.Status.NodeRef.Name + } + log.Info("Deleting Kubernetes Node associated with Machine is not allowed", "node", klog.KRef("", nodeName), "cause", err.Error()) default: return ctrl.Result{}, errors.Wrapf(err, "failed to check if Kubernetes Node deletion is allowed") } @@ -315,7 +323,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu return ctrl.Result{}, err } - log.Info("Draining node", "node", m.Status.NodeRef.Name) + log.Info("Draining node", "node", klog.KRef("", m.Status.NodeRef.Name)) // The DrainingSucceededCondition never exists before the node is drained for the first time, // so its transition time can be used to record the first time draining. // This `if` condition prevents the transition time to be changed more than once. @@ -342,12 +350,12 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu if conditions.Get(m, clusterv1.VolumeDetachSucceededCondition) == nil { conditions.MarkFalse(m, clusterv1.VolumeDetachSucceededCondition, clusterv1.WaitingForVolumeDetachReason, clusterv1.ConditionSeverityInfo, "Waiting for node volumes to be detached") } - if ok, err := r.shouldWaitForNodeVolumes(ctx, cluster, m.Status.NodeRef.Name, m.Name); ok || err != nil { + if ok, err := r.shouldWaitForNodeVolumes(ctx, cluster, m.Status.NodeRef.Name); ok || err != nil { if err != nil { r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedWaitForVolumeDetach", "error wait for volume detach, node %q: %v", m.Status.NodeRef.Name, err) return ctrl.Result{}, err } - log.Info("Waiting for node volumes to be detached", "node", m.Status.NodeRef.Name) + log.Info("Waiting for node volumes to be detached", "Node", klog.KRef("", m.Status.NodeRef.Name)) return ctrl.Result{}, nil } conditions.MarkTrue(m, clusterv1.VolumeDetachSucceededCondition) @@ -387,7 +395,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu // We only delete the node after the underlying infrastructure is gone. // https://github.com/kubernetes-sigs/cluster-api/issues/2565 if isDeleteNodeAllowed { - log.Info("Deleting node", "node", m.Status.NodeRef.Name) + log.Info("Deleting node", "Node", klog.KRef("", m.Status.NodeRef.Name)) var deleteNodeErr error waitErr := wait.PollImmediate(2*time.Second, r.nodeDeletionRetryTimeout, func() (bool, error) { @@ -397,7 +405,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu return true, nil }) if waitErr != nil { - log.Error(deleteNodeErr, "Timed out deleting node", "node", m.Status.NodeRef.Name) + log.Error(deleteNodeErr, "Timed out deleting node", "Node", klog.KRef("", m.Status.NodeRef.Name)) conditions.MarkFalse(m, clusterv1.MachineNodeHealthyCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "") r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDeleteNode", "error deleting Machine's node: %v", deleteNodeErr) @@ -445,7 +453,7 @@ func (r *Reconciler) nodeDrainTimeoutExceeded(machine *clusterv1.Machine) bool { // isDeleteNodeAllowed returns nil only if the Machine's NodeRef is not nil // and if the Machine is not the last control plane node in the cluster. func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) // Return early if the cluster is being deleted. if !cluster.DeletionTimestamp.IsZero() { return errClusterIsBeingDeleted @@ -505,7 +513,7 @@ func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1 } func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name, "node", nodeName) + log := ctrl.LoggerFrom(ctx, "Node", klog.KRef("", nodeName)) restConfig, err := remote.RESTConfig(ctx, controllerName, r.Client, util.ObjectKey(cluster)) if err != nil { @@ -544,7 +552,7 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, verbStr = "Evicted" } log.Info(fmt.Sprintf("%s pod from Node", verbStr), - "pod", fmt.Sprintf("%s/%s", pod.Name, pod.Namespace)) + "Pod", klog.KObj(pod)) }, Out: writer{log.Info}, ErrOut: writer{func(msg string, keysAndValues ...interface{}) { @@ -578,8 +586,8 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, // this could cause issue for some storage provisioner, for example, vsphere-volume this is problematic // because if the node is deleted before detach success, then the underline VMDK will be deleted together with the Machine // so after node draining we need to check if all volumes are detached before deleting the node. -func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, cluster *clusterv1.Cluster, nodeName string, machineName string) (bool, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name, "node", nodeName, "machine", machineName) +func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (bool, error) { + log := ctrl.LoggerFrom(ctx, "Node", klog.KRef("", nodeName)) remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { @@ -599,7 +607,7 @@ func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, cluster *clus } func (r *Reconciler) deleteNode(ctx context.Context, cluster *clusterv1.Cluster, name string) error { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index 3641258a385f..65a4563f9ee0 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -23,6 +23,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -41,8 +42,7 @@ var ( ) func (r *Reconciler) reconcileNode(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "machine", machine.Name, "namespace", machine.Namespace) - log = log.WithValues("cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) // Check that the Machine has a valid ProviderID. if machine.Spec.ProviderID == nil || *machine.Spec.ProviderID == "" { @@ -189,7 +189,7 @@ func (r *Reconciler) getNode(ctx context.Context, c client.Reader, providerID *n for key, node := range nl.Items { nodeProviderID, err := noderefutil.NewProviderID(node.Spec.ProviderID) if err != nil { - log.Error(err, "Failed to parse ProviderID", "node", client.ObjectKeyFromObject(&nl.Items[key]).String()) + log.Error(err, "Failed to parse ProviderID", "Node", klog.KRef("", nl.Items[key].GetName())) continue } diff --git a/internal/controllers/machine/machine_controller_phases.go b/internal/controllers/machine/machine_controller_phases.go index 8ffe77e9a2cc..79dfad632b4b 100644 --- a/internal/controllers/machine/machine_controller_phases.go +++ b/internal/controllers/machine/machine_controller_phases.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -88,7 +89,7 @@ func (r *Reconciler) reconcilePhase(_ context.Context, m *clusterv1.Machine) { // reconcileExternal handles generic unstructured objects referenced by a Machine. func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine, ref *corev1.ObjectReference) (external.ReconcileOutput, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx, "Cluster", klog.KObj(cluster)) if err := utilconversion.UpdateReferenceAPIContract(ctx, r.Client, r.APIReader, ref); err != nil { return external.ReconcileOutput{}, err @@ -172,7 +173,7 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.C // reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a Machine. func (r *Reconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) // If the bootstrap data is populated, set ready and return. if m.Spec.Bootstrap.DataSecretName != nil { @@ -237,7 +238,7 @@ func (r *Reconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1. // reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a Machine. func (r *Reconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) // Call generic external reconciler. infraReconcileResult, err := r.reconcileExternal(ctx, cluster, m, &m.Spec.InfrastructureRef) diff --git a/internal/controllers/machinedeployment/machinedeployment_controller.go b/internal/controllers/machinedeployment/machinedeployment_controller.go index 2ba6ee903ea1..23579ffffece 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/labels" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -118,11 +119,17 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } + log = log.WithValues("MachineDeployment", klog.KObj(deployment)) + ctx = ctrl.LoggerInto(ctx, log) + cluster, err := util.GetClusterByName(ctx, r.Client, deployment.Namespace, deployment.Spec.ClusterName) if err != nil { return ctrl.Result{}, err } + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, deployment) { log.Info("Reconciliation is paused for this object") @@ -260,19 +267,19 @@ func (r *Reconciler) getMachineSetsForDeployment(ctx context.Context, d *cluster selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) if err != nil { - log.Error(err, "Skipping MachineSet, failed to get label selector from spec selector", "machineset", ms.Name) + log.Error(err, "Skipping MachineSet, failed to get label selector from spec selector", "MachineSet", klog.KObj(ms)) continue } // If a MachineDeployment with a nil or empty selector creeps in, it should match nothing, not everything. if selector.Empty() { - log.Info("Skipping MachineSet as the selector is empty", "machineset", ms.Name) + log.Info("Skipping MachineSet as the selector is empty", "MachineSet", klog.KObj(ms)) continue } // Skip this MachineSet unless either selector matches or it has a controller ref pointing to this MachineDeployment if !selector.Matches(labels.Set(ms.Labels)) && !metav1.IsControlledBy(ms, d) { - log.V(4).Info("Skipping MachineSet, label mismatch", "machineset", ms.Name) + log.V(4).Info("Skipping MachineSet, label mismatch", "MachineSet", klog.KObj(ms)) continue } @@ -283,7 +290,7 @@ func (r *Reconciler) getMachineSetsForDeployment(ctx context.Context, d *cluster r.recorder.Eventf(d, corev1.EventTypeWarning, "FailedAdopt", "Failed to adopt MachineSet %q: %v", ms.Name, err) continue } - log.Info("Adopted MachineSet into MachineDeployment", "machineset", ms.Name) + log.Info("Adopted MachineSet into MachineDeployment", "MachineSet", klog.KObj(ms)) r.recorder.Eventf(d, corev1.EventTypeNormal, "SuccessfulAdopt", "Adopted MachineSet %q", ms.Name) } diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index 751ad34c0656..837d20d643a8 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -33,6 +33,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" intstrutil "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" "k8s.io/utils/integer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -168,7 +169,7 @@ func getMaxReplicasAnnotation(ms *clusterv1.MachineSet, logger logr.Logger) (int } func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger logr.Logger) (int32, bool) { - logger = logger.WithValues("machineset", ms.Name, "annotationKey", annotationKey) + logger = logger.WithValues("MachineSet", klog.KObj(ms)) annotationValue, ok := ms.Annotations[annotationKey] if !ok { @@ -176,7 +177,7 @@ func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger } intValue, err := strconv.ParseInt(annotationValue, 10, 32) if err != nil { - logger.V(2).Info("Cannot convert the value to integer", "annotationValue", annotationValue) + logger.V(2).Info(fmt.Sprintf("Cannot convert annotation %q with value %q to integer", annotationKey, annotationValue)) return int32(0), false } return int32(intValue), true @@ -185,7 +186,7 @@ func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger // SetNewMachineSetAnnotations sets new machine set's annotations appropriately by updating its revision and // copying required deployment annotations to it; it returns true if machine set's annotation is changed. func SetNewMachineSetAnnotations(deployment *clusterv1.MachineDeployment, newMS *clusterv1.MachineSet, newRevision string, exists bool, logger logr.Logger) bool { - logger = logger.WithValues("machineset", newMS.Name) + logger = logger.WithValues("MachineSet", klog.KObj(newMS)) // First, copy deployment's annotations (except for apply and revision annotations) annotationChanged := copyDeploymentAnnotationsToMachineSet(deployment, newMS) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index be39ea2cc101..101d95267307 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -34,6 +34,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -132,7 +133,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } - log = log.WithValues("cluster", m.Spec.ClusterName) + log = log.WithValues("Cluster", klog.KRef(m.Namespace, m.Spec.ClusterName)) ctx = ctrl.LoggerInto(ctx, log) cluster, err := util.GetClusterByName(ctx, r.Client, m.Namespace, m.Spec.ClusterName) @@ -356,7 +357,7 @@ func (r *Reconciler) patchHealthyTargets(ctx context.Context, logger logr.Logger } if err := t.patchHelper.Patch(ctx, t.Machine); err != nil { - logger.Error(err, "failed to patch healthy machine status for machine", "machine", t.Machine.GetName()) + logger.Error(err, "failed to patch healthy machine status for machine", "Machine", t.Machine.GetName()) errList = append(errList, errors.Wrapf(err, "failed to patch healthy machine status for machine: %s/%s", t.Machine.Namespace, t.Machine.Name)) } } diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go index bbe5a54d760e..d052881352eb 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go @@ -201,7 +201,7 @@ func (r *Reconciler) getTargetsFromMHC(ctx context.Context, logger logr.Logger, for k := range machines { skip, reason := shouldSkipRemediation(&machines[k]) if skip { - logger.Info("skipping remediation", "machine", machines[k].Name, "reason", reason) + logger.Info("skipping remediation", "Machine", machines[k].Name, "Reason", reason) continue } diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 6a4a5a7b1dad..508f719a043b 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/labels" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -135,6 +136,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, machineSet) { log.Info("Reconciliation is paused for this object") @@ -260,11 +264,11 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, // Attempt to adopt machine if it meets previous conditions and it has no controller references. if metav1.GetControllerOf(machine) == nil { if err := r.adoptOrphan(ctx, machineSet, machine); err != nil { - log.Error(err, "Failed to adopt Machine", "machine", machine.Name) + log.Error(err, "Failed to adopt Machine", "Machine", klog.KObj(machine)) r.recorder.Eventf(machineSet, corev1.EventTypeWarning, "FailedAdopt", "Failed to adopt Machine %q: %v", machine.Name, err) continue } - log.Info("Adopted Machine", "machine", machine.Name) + log.Info("Adopted Machine", "Machine", klog.KObj(machine)) r.recorder.Eventf(machineSet, corev1.EventTypeNormal, "SuccessfulAdopt", "Adopted Machine %q", machine.Name) } @@ -279,7 +283,7 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, continue } if conditions.IsFalse(machine, clusterv1.MachineOwnerRemediatedCondition) { - log.Info("Deleting unhealthy machine", "machine", machine.GetName()) + log.Info("Deleting unhealthy machine", "Machine", klog.KObj(machine)) patch := client.MergeFrom(machine.DeepCopy()) if err := r.Client.Delete(ctx, machine); err != nil { errs = append(errs, errors.Wrap(err, "failed to delete")) @@ -413,7 +417,7 @@ func (r *Reconciler) syncReplicas(ctx context.Context, ms *clusterv1.MachineSet, machine.Spec.InfrastructureRef = *infraRef if err := r.Client.Create(ctx, machine); err != nil { - log.Error(err, "Unable to create Machine", "machine", machine.Name) + log.Error(err, "Unable to create Machine", "Machine", machine.Name) r.recorder.Eventf(ms, corev1.EventTypeWarning, "FailedCreate", "Failed to create machine %q: %v", machine.Name, err) errs = append(errs, err) conditions.MarkFalse(ms, clusterv1.MachinesCreatedCondition, clusterv1.MachineCreationFailedReason, @@ -453,12 +457,12 @@ func (r *Reconciler) syncReplicas(ctx context.Context, ms *clusterv1.MachineSet, machinesToDelete := getMachinesToDeletePrioritized(machines, diff, deletePriorityFunc) for _, machine := range machinesToDelete { if err := r.Client.Delete(ctx, machine); err != nil { - log.Error(err, "Unable to delete Machine", "machine", machine.Name) + log.Error(err, "Unable to delete Machine", "Machine", machine.Name) r.recorder.Eventf(ms, corev1.EventTypeWarning, "FailedDelete", "Failed to delete machine %q: %v", machine.Name, err) errs = append(errs, err) continue } - log.Info("Deleted machine", "machine", machine.Name) + log.Info("Deleted machine", "Machine", machine.Name) r.recorder.Eventf(ms, corev1.EventTypeNormal, "SuccessfulDelete", "Deleted machine %q", machine.Name) } @@ -565,9 +569,6 @@ func (r *Reconciler) waitForMachineDeletion(ctx context.Context, machineList []* // MachineToMachineSets is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation // for MachineSets that might adopt an orphaned Machine. func (r *Reconciler) MachineToMachineSets(o client.Object) []ctrl.Request { - ctx := context.Background() - // This won't log unless the global logger is set - log := ctrl.LoggerFrom(ctx, "object", client.ObjectKeyFromObject(o)) result := []ctrl.Request{} m, ok := o.(*clusterv1.Machine) @@ -575,6 +576,10 @@ func (r *Reconciler) MachineToMachineSets(o client.Object) []ctrl.Request { panic(fmt.Sprintf("Expected a Machine but got a %T", o)) } + // This won't log unless the global logger is set + ctx := context.Background() + log := ctrl.LoggerFrom(ctx, "Machine", klog.KObj(m)) + // Check if the controller reference is already set and // return an empty result when one is found. for _, ref := range m.ObjectMeta.OwnerReferences { @@ -651,12 +656,14 @@ func (r *Reconciler) updateStatus(ctx context.Context, cluster *clusterv1.Cluste templateLabel := labels.Set(ms.Spec.Template.Labels).AsSelectorPreValidated() for _, machine := range filteredMachines { + log = log.WithValues("Machine", klog.KObj(machine)) + if templateLabel.Matches(labels.Set(machine.Labels)) { fullyLabeledReplicasCount++ } if machine.Status.NodeRef == nil { - log.V(2).Info("Unable to retrieve Node status, missing NodeRef", "machine", machine.Name) + log.V(2).Info("Unable to retrieve Node status, missing NodeRef") continue } @@ -690,7 +697,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, cluster *clusterv1.Cluste newStatus.ObservedGeneration = ms.Generation newStatus.DeepCopyInto(&ms.Status) - log.V(4).Info(fmt.Sprintf("Updating status for %v: %s/%s, ", ms.Kind, ms.Namespace, ms.Name) + + log.V(4).Info("Updating status: " + fmt.Sprintf("replicas %d->%d (need %d), ", ms.Status.Replicas, newStatus.Replicas, desiredReplicas) + fmt.Sprintf("fullyLabeledReplicas %d->%d, ", ms.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) + fmt.Sprintf("readyReplicas %d->%d, ", ms.Status.ReadyReplicas, newStatus.ReadyReplicas) + diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index 8921981802af..9e3f8b329d48 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -147,6 +148,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re cluster.APIVersion = clusterv1.GroupVersion.String() cluster.Kind = "Cluster" + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + // Return early, if the Cluster does not use a managed topology. // NOTE: We're already filtering events, but this is a safeguard for cases like e.g. when // there are MachineDeployments which have the topology owned label, but the corresponding diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index 5f93045b8a38..fec5d081d2bd 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -95,7 +95,7 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) + log = log.WithValues("Cluster", cluster.Name) // Initialize the patch helper patchHelper, err := patch.NewHelper(dockerMachinePool, r.Client) diff --git a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go index 189aa401c7e0..f775d7d7aec2 100644 --- a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -73,7 +74,8 @@ func (r *DockerClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) // Create a helper for managing a docker container hosting the loadbalancer. externalLoadBalancer, err := docker.NewLoadBalancer(ctx, cluster, dockerCluster) diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index a727a5150e32..d1932aac9512 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -82,7 +83,8 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - log = log.WithValues("machine", machine.Name) + log = log.WithValues("Machine", klog.KObj(machine)) + ctx = ctrl.LoggerInto(ctx, log) // Fetch the Cluster. cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) @@ -95,7 +97,8 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, dockerMachine) { @@ -114,8 +117,6 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - log = log.WithValues("docker-cluster", dockerCluster.Name) - // Initialize the patch helper patchHelper, err := patch.NewHelper(dockerMachine, r) if err != nil { diff --git a/test/infrastructure/docker/internal/docker/loadbalancer.go b/test/infrastructure/docker/internal/docker/loadbalancer.go index badf1079d870..3eb8b8c1c769 100644 --- a/test/infrastructure/docker/internal/docker/loadbalancer.go +++ b/test/infrastructure/docker/internal/docker/loadbalancer.go @@ -107,7 +107,7 @@ func (s *LoadBalancer) containerName() string { // Create creates a docker container hosting a load balancer for the cluster. func (s *LoadBalancer) Create(ctx context.Context) error { log := ctrl.LoggerFrom(ctx) - log = log.WithValues("cluster", s.name, "ipFamily", s.ipFamily) + log = log.WithValues("IPFamily", s.ipFamily) listenAddr := "0.0.0.0" if s.ipFamily == clusterv1.IPv6IPFamily { diff --git a/util/predicates/cluster_predicates.go b/util/predicates/cluster_predicates.go index 0f2c29d5909b..112334f155de 100644 --- a/util/predicates/cluster_predicates.go +++ b/util/predicates/cluster_predicates.go @@ -41,7 +41,7 @@ func ClusterCreateInfraReady(logger logr.Logger) predicate.Funcs { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.Object)) return false } - log = log.WithValues("namespace", c.Namespace, "cluster", c.Name) + log = log.WithValues("Namespace", c.Namespace, "Cluster", c.Name) // Only need to trigger a reconcile if the Cluster.Status.InfrastructureReady is true if c.Status.InfrastructureReady { @@ -70,7 +70,7 @@ func ClusterCreateNotPaused(logger logr.Logger) predicate.Funcs { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.Object)) return false } - log = log.WithValues("namespace", c.Namespace, "cluster", c.Name) + log = log.WithValues("Namespace", c.Namespace, "Cluster", c.Name) // Only need to trigger a reconcile if the Cluster.Spec.Paused is false if !c.Spec.Paused { @@ -99,7 +99,7 @@ func ClusterUpdateInfraReady(logger logr.Logger) predicate.Funcs { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) return false } - log = log.WithValues("namespace", oldCluster.Namespace, "cluster", oldCluster.Name) + log = log.WithValues("Namespace", oldCluster.Namespace, "Cluster", oldCluster.Name) newCluster := e.ObjectNew.(*clusterv1.Cluster) @@ -129,7 +129,7 @@ func ClusterUpdateUnpaused(logger logr.Logger) predicate.Funcs { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) return false } - log = log.WithValues("namespace", oldCluster.Namespace, "cluster", oldCluster.Name) + log = log.WithValues("Namespace", oldCluster.Namespace, "Cluster", oldCluster.Name) newCluster := e.ObjectNew.(*clusterv1.Cluster) @@ -188,7 +188,7 @@ func ClusterControlPlaneInitialized(logger logr.Logger) predicate.Funcs { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) return false } - log = log.WithValues("namespace", oldCluster.Namespace, "cluster", oldCluster.Name) + log = log.WithValues("Namespace", oldCluster.Namespace, "Cluster", oldCluster.Name) newCluster := e.ObjectNew.(*clusterv1.Cluster) @@ -259,7 +259,7 @@ func processIfTopologyManaged(logger logr.Logger, object client.Object) bool { return false } - log := logger.WithValues("namespace", cluster.Namespace, "cluster", cluster.Name) + log := logger.WithValues("Namespace", cluster.Namespace, "Cluster", cluster.Name) if cluster.Spec.Topology != nil { log.V(6).Info("Cluster has topology, allowing further processing")