diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go index 088970e1e8c2..7a4743826317 100644 --- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go @@ -173,7 +173,10 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques if configOwner == nil { return ctrl.Result{}, nil } - log = log.WithValues(configOwner.LowerCamelCaseKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion()) + log = log.WithValues(configOwner.GetKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion()) + + log = log.WithValues("Cluster", klog.KRef(configOwner.GetNamespace(), configOwner.ClusterName())) + ctx = ctrl.LoggerInto(ctx, log) // Lookup the cluster the config owner is associated with cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName()) @@ -191,8 +194,6 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } - ctx = ctrl.LoggerInto(ctx, log.WithValues("cluster", klog.KObj(cluster))) - if annotations.IsPaused(cluster, config) { log.Info("Reconciliation is paused for this object") return ctrl.Result{}, nil @@ -1001,7 +1002,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope if !apierrors.IsAlreadyExists(err) { return errors.Wrapf(err, "failed to create bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name) } - log.Info("bootstrap data secret for KubeadmConfig already exists, updating", "secret", klog.KObj(secret)) + log.Info("bootstrap data secret for KubeadmConfig already exists, updating", "Secret", klog.KObj(secret)) if err := r.Client.Update(ctx, secret); err != nil { return errors.Wrapf(err, "failed to update bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name) } diff --git a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go index 41bac632cc5a..4ea28815cfc5 100644 --- a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go +++ b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go @@ -51,7 +51,7 @@ func NewControlPlaneInitMutex(client client.Client) *ControlPlaneInitMutex { func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool { sema := newSemaphore() cmName := configMapName(cluster.Name) - log := ctrl.LoggerFrom(ctx, "configMap", klog.KRef(cluster.Namespace, cmName)) + log := ctrl.LoggerFrom(ctx, "ConfigMap", klog.KRef(cluster.Namespace, cmName)) err := c.client.Get(ctx, client.ObjectKey{ Namespace: cluster.Namespace, Name: cmName, @@ -113,7 +113,7 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu func (c *ControlPlaneInitMutex) Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool { sema := newSemaphore() cmName := configMapName(cluster.Name) - log := ctrl.LoggerFrom(ctx, "configMap", klog.KRef(cluster.Namespace, cmName)) + log := ctrl.LoggerFrom(ctx, "ConfigMap", klog.KRef(cluster.Namespace, cmName)) err := c.client.Get(ctx, client.ObjectKey{ Namespace: cluster.Namespace, Name: cmName, diff --git a/bootstrap/util/configowner.go b/bootstrap/util/configowner.go index b531d08cf9d3..1d42e4c7e7c6 100644 --- a/bootstrap/util/configowner.go +++ b/bootstrap/util/configowner.go @@ -31,7 +31,6 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/util" ) // ConfigOwner provides a data interface for different config owner types. @@ -124,12 +123,6 @@ func (co ConfigOwner) KubernetesVersion() string { return version } -// LowerCamelCaseKind mirrors how controller runtime formats the object's kind when used as a logging key -// for the object being reconciled. -func (co ConfigOwner) LowerCamelCaseKind() string { - return util.LowerCamelCaseKind(co.Unstructured) -} - // GetConfigOwner returns the Unstructured object owning the current resource. func GetConfigOwner(ctx context.Context, c client.Client, obj metav1.Object) (*ConfigOwner, error) { allowedGKs := []schema.GroupKind{ diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index 5e4086ee6f8a..dca8997f5565 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -30,6 +30,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/version" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -540,7 +541,7 @@ func setClusterPause(proxy Proxy, clusters []*node, value bool, dryRun bool) err setClusterPauseBackoff := newWriteBackoff() for i := range clusters { cluster := clusters[i] - log.V(5).Info("Set Cluster.Spec.Paused", "paused", value, "cluster", cluster.identity.Name, "namespace", cluster.identity.Namespace) + log.V(5).Info("Set Cluster.Spec.Paused", "paused", value, "Cluster", klog.KRef(cluster.identity.Namespace, cluster.identity.Name)) // Nb. The operation is wrapped in a retry loop to make setClusterPause more resilient to unexpected conditions. if err := retryWithExponentialBackoff(setClusterPauseBackoff, func() error { diff --git a/controllers/remote/cluster_cache_tracker.go b/controllers/remote/cluster_cache_tracker.go index fe6dfe4f20a7..77e4649fc1f5 100644 --- a/controllers/remote/cluster_cache_tracker.go +++ b/controllers/remote/cluster_cache_tracker.go @@ -345,7 +345,7 @@ func (t *ClusterCacheTracker) deleteAccessor(_ context.Context, cluster client.O return } - log := t.log.WithValues("cluster", klog.KRef(cluster.Namespace, cluster.Name)) + log := t.log.WithValues("Cluster", klog.KRef(cluster.Namespace, cluster.Name)) log.V(2).Info("Deleting clusterAccessor") log.V(4).Info("Stopping cache") a.cache.Stop() @@ -396,7 +396,7 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error } if a.watches.Has(input.Name) { - t.log.V(6).Info("Watch already exists", "namespace", klog.KRef(input.Cluster.Namespace, ""), "cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "name", input.Name) + t.log.V(6).Info("Watch already exists", "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "name", input.Name) return nil } @@ -501,7 +501,7 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health // NB. we are ignoring ErrWaitTimeout because this error happens when the channel is close, that in this case // happens when the cache is explicitly stopped.F if err != nil && err != wait.ErrWaitTimeout { - t.log.Error(err, "Error health checking cluster", "cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name)) + t.log.Error(err, "Error health checking cluster", "Cluster", klog.KRef(in.cluster.Namespace, in.cluster.Name)) t.deleteAccessor(ctx, in.cluster) } } diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 38ab85d157bc..22ec85fdb551 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -143,7 +143,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl. log.Info("Cluster Controller has not yet set OwnerRef") return ctrl.Result{}, nil } - log = log.WithValues("cluster", klog.KObj(cluster)) + log = log.WithValues("Cluster", klog.KObj(cluster)) ctx = ctrl.LoggerInto(ctx, log) if annotations.IsPaused(cluster, kcp) { @@ -463,7 +463,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, clu var errs []error for i := range machinesToDelete { m := machinesToDelete[i] - logger := log.WithValues("machine", klog.KObj(m)) + logger := log.WithValues("Machine", klog.KObj(m)) if err := r.Client.Delete(ctx, machinesToDelete[i]); err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to cleanup owned machine") errs = append(errs, err) diff --git a/controlplane/kubeadm/internal/controllers/helpers.go b/controlplane/kubeadm/internal/controllers/helpers.go index 542be727da00..e1e800555215 100644 --- a/controlplane/kubeadm/internal/controllers/helpers.go +++ b/controlplane/kubeadm/internal/controllers/helpers.go @@ -104,7 +104,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, func (r *KubeadmControlPlaneReconciler) adoptKubeconfigSecret(ctx context.Context, cluster *clusterv1.Cluster, configSecret *corev1.Secret, controllerOwnerRef metav1.OwnerReference) error { log := ctrl.LoggerFrom(ctx) - log.Info("Adopting KubeConfig secret created by v1alpha2 controllers", "secret", klog.KObj(configSecret)) + log.Info("Adopting KubeConfig secret created by v1alpha2 controllers", "Secret", klog.KObj(configSecret)) patch, err := patch.NewHelper(configSecret, r.Client) if err != nil { diff --git a/controlplane/kubeadm/internal/controllers/scale.go b/controlplane/kubeadm/internal/controllers/scale.go index bb362b01b238..e467dd64d5c6 100644 --- a/controlplane/kubeadm/internal/controllers/scale.go +++ b/controlplane/kubeadm/internal/controllers/scale.go @@ -141,7 +141,7 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane( return ctrl.Result{}, err } - logger = logger.WithValues("machine", klog.KObj(machineToDelete)) + logger = logger.WithValues("Machine", klog.KObj(machineToDelete)) if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) { logger.Error(err, "Failed to delete control plane machine") r.recorder.Eventf(kcp, corev1.EventTypeWarning, "FailedScaleDown", diff --git a/controlplane/kubeadm/internal/controllers/status.go b/controlplane/kubeadm/internal/controllers/status.go index abc378a95344..c17d62c0cf7c 100644 --- a/controlplane/kubeadm/internal/controllers/status.go +++ b/controlplane/kubeadm/internal/controllers/status.go @@ -20,7 +20,6 @@ import ( "context" "github.com/pkg/errors" - "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -34,7 +33,7 @@ import ( // updateStatus is called after every reconcilitation loop in a defer statement to always make sure we have the // resource status subresourcs up-to-date. func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster) error { - log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster)) + log := ctrl.LoggerFrom(ctx) selector := collections.ControlPlaneSelectorForCluster(cluster.Name) // Copy label selector to its status counterpart in string format. diff --git a/docs/book/src/developer/providers/implementers-guide/controllers_and_reconciliation.md b/docs/book/src/developer/providers/implementers-guide/controllers_and_reconciliation.md index d172cc1ed1c4..0ba7a56eb0f3 100644 --- a/docs/book/src/developer/providers/implementers-guide/controllers_and_reconciliation.md +++ b/docs/book/src/developer/providers/implementers-guide/controllers_and_reconciliation.md @@ -27,7 +27,7 @@ type MailgunClusterReconciler struct { // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=mailgunclusters,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=mailgunclusters/status,verbs=get;update;patch -func (r *MailgunClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { +func (r *MailgunClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { _ = context.Background() _ = r.Log.WithValues("mailguncluster", req.NamespacedName) @@ -86,7 +86,7 @@ Reconcile is only passed a name, not an object, so let's retrieve ours. Here's a naive example: ``` -func (r *MailgunClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { +func (r *MailgunClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { ctx := context.Background() _ = r.Log.WithValues("mailguncluster", req.NamespacedName) diff --git a/exp/addons/internal/controllers/clusterresourceset_controller.go b/exp/addons/internal/controllers/clusterresourceset_controller.go index 92aed91066f4..0fc74041512a 100644 --- a/exp/addons/internal/controllers/clusterresourceset_controller.go +++ b/exp/addons/internal/controllers/clusterresourceset_controller.go @@ -134,7 +134,7 @@ func (r *ClusterResourceSetReconciler) Reconcile(ctx context.Context, req ctrl.R clusters, err := r.getClustersByClusterResourceSetSelector(ctx, clusterResourceSet) if err != nil { - log.Error(err, "Failed fetching clusters that matches ClusterResourceSet labels", "clusterResourceSet", klog.KObj(clusterResourceSet)) + log.Error(err, "Failed fetching clusters that matches ClusterResourceSet labels", "ClusterResourceSet", klog.KObj(clusterResourceSet)) conditions.MarkFalse(clusterResourceSet, addonsv1.ResourcesAppliedCondition, addonsv1.ClusterMatchFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return ctrl.Result{}, err } @@ -161,9 +161,9 @@ func (r *ClusterResourceSetReconciler) Reconcile(ctx context.Context, req ctrl.R // reconcileDelete removes the deleted ClusterResourceSet from all the ClusterResourceSetBindings it is added to. func (r *ClusterResourceSetReconciler) reconcileDelete(ctx context.Context, clusters []*clusterv1.Cluster, crs *addonsv1.ClusterResourceSet) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx) - for _, cluster := range clusters { + log := ctrl.LoggerFrom(ctx, "Cluster", klog.KObj(cluster)) + clusterResourceSetBinding := &addonsv1.ClusterResourceSetBinding{} clusterResourceSetBindingKey := client.ObjectKey{ Namespace: cluster.Namespace, @@ -237,7 +237,8 @@ func (r *ClusterResourceSetReconciler) getClustersByClusterResourceSetSelector(c // It applies resources best effort and continue on scenarios like: unsupported resource types, failure during creation, missing resources. // TODO: If a resource already exists in the cluster but not applied by ClusterResourceSet, the resource will be updated ? func (r *ClusterResourceSetReconciler) ApplyClusterResourceSet(ctx context.Context, cluster *clusterv1.Cluster, clusterResourceSet *addonsv1.ClusterResourceSet) error { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx, "Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { diff --git a/exp/internal/controllers/machinepool_controller.go b/exp/internal/controllers/machinepool_controller.go index 1e6b26d40606..1b5e81e2a180 100644 --- a/exp/internal/controllers/machinepool_controller.go +++ b/exp/internal/controllers/machinepool_controller.go @@ -115,9 +115,12 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } + log = log.WithValues("Cluster", klog.KRef(mp.ObjectMeta.Namespace, mp.Spec.ClusterName)) + ctx = ctrl.LoggerInto(ctx, log) + cluster, err := util.GetClusterByName(ctx, r.Client, mp.ObjectMeta.Namespace, mp.Spec.ClusterName) if err != nil { - log.Error(err, "Failed to get Cluster for MachinePool.", "machinePool", klog.KObj(mp), "cluster", klog.KRef(mp.ObjectMeta.Namespace, mp.Spec.ClusterName)) + log.Error(err, "Failed to get Cluster for MachinePool.", "MachinePool", klog.KObj(mp), "Cluster", klog.KRef(mp.ObjectMeta.Namespace, mp.Spec.ClusterName)) return ctrl.Result{}, errors.Wrapf(err, "failed to get cluster %q for machinepool %q in namespace %q", mp.Spec.ClusterName, mp.Name, mp.Namespace) } diff --git a/exp/internal/controllers/machinepool_controller_noderef.go b/exp/internal/controllers/machinepool_controller_noderef.go index 3f14420c1bd2..275027b0b6d4 100644 --- a/exp/internal/controllers/machinepool_controller_noderef.go +++ b/exp/internal/controllers/machinepool_controller_noderef.go @@ -23,7 +23,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,7 +47,7 @@ type getNodeReferencesResult struct { } func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name) + log := ctrl.LoggerFrom(ctx) // Check that the MachinePool hasn't been deleted or in the process. if !mp.DeletionTimestamp.IsZero() { return ctrl.Result{}, nil @@ -60,14 +59,6 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster * return ctrl.Result{}, nil } - // Check that Cluster isn't nil. - if cluster == nil { - log.V(2).Info("MachinePool doesn't have a linked cluster, won't assign NodeRef") - return ctrl.Result{}, nil - } - - log = log.WithValues("cluster", klog.KObj(cluster)) - // Check that the MachinePool has valid ProviderIDList. if len(mp.Spec.ProviderIDList) == 0 && (mp.Spec.Replicas == nil || *mp.Spec.Replicas != 0) { log.V(2).Info("MachinePool doesn't have any ProviderIDs yet") diff --git a/exp/internal/controllers/machinepool_controller_phases.go b/exp/internal/controllers/machinepool_controller_phases.go index 7421c8a57b42..c24f4b42b242 100644 --- a/exp/internal/controllers/machinepool_controller_phases.go +++ b/exp/internal/controllers/machinepool_controller_phases.go @@ -26,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/klog/v2" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -167,7 +166,7 @@ func (r *MachinePoolReconciler) reconcileExternal(ctx context.Context, cluster * // reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a MachinePool. func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *expv1.MachinePool) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster)) + log := ctrl.LoggerFrom(ctx) // Call generic external reconciler if we have an external reference. var bootstrapConfig *unstructured.Unstructured @@ -227,7 +226,7 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster // reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a MachinePool. func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster)) + log := ctrl.LoggerFrom(ctx) // Call generic external reconciler. infraReconcileResult, err := r.reconcileExternal(ctx, cluster, mp, &mp.Spec.Template.Spec.InfrastructureRef) diff --git a/hack/observability/promtail/values.yaml b/hack/observability/promtail/values.yaml index 38761da53323..c8dfa06c7271 100644 --- a/hack/observability/promtail/values.yaml +++ b/hack/observability/promtail/values.yaml @@ -11,8 +11,8 @@ config: - json: expressions: controller: - cluster: join('/',[cluster.namespace,cluster.name]) - machine: join('/',[machine.namespace,machine.name]) + cluster: join('/',[Cluster.namespace,Cluster.name]) + machine: join('/',[Machine.namespace,Machine.name]) - labels: controller: cluster: diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 61222b1ebeec..fb70a9ee0d62 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -114,8 +113,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } - ctrl.LoggerInto(ctx, log.WithValues("cluster", klog.KObj(cluster))) - // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, cluster) { log.Info("Reconciliation is paused for this object") diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 89c32cc54a7e..13bd5b99cdad 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -152,15 +152,15 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } + log = log.WithValues("Cluster", klog.KRef(m.ObjectMeta.Namespace, m.Spec.ClusterName)) + ctx = ctrl.LoggerInto(ctx, log) + cluster, err := util.GetClusterByName(ctx, r.Client, m.ObjectMeta.Namespace, m.Spec.ClusterName) if err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to get cluster %q for machine %q in namespace %q", m.Spec.ClusterName, m.Name, m.Namespace) } - log = log.WithValues("cluster", klog.KObj(cluster)) - ctx = ctrl.LoggerInto(ctx, log) - // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, m) { log.Info("Reconciliation is paused for this object") @@ -301,7 +301,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu if m.Status.NodeRef != nil { nodeName = m.Status.NodeRef.Name } - log.Info("Deleting Kubernetes Node associated with Machine is not allowed", "node", klog.KRef("", nodeName), "cause", err.Error()) + log.Info("Deleting Kubernetes Node associated with Machine is not allowed", "Node", klog.KRef("", nodeName), "cause", err.Error()) default: return ctrl.Result{}, errors.Wrapf(err, "failed to check if Kubernetes Node deletion is allowed") } @@ -323,7 +323,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu return ctrl.Result{}, err } - log.Info("Draining node", "node", klog.KRef("", m.Status.NodeRef.Name)) + log.Info("Draining node", "Node", klog.KRef("", m.Status.NodeRef.Name)) // The DrainingSucceededCondition never exists before the node is drained for the first time, // so its transition time can be used to record the first time draining. // This `if` condition prevents the transition time to be changed more than once. @@ -355,7 +355,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedWaitForVolumeDetach", "error wait for volume detach, node %q: %v", m.Status.NodeRef.Name, err) return ctrl.Result{}, err } - log.Info("Waiting for node volumes to be detached", "node", klog.KRef("", m.Status.NodeRef.Name)) + log.Info("Waiting for node volumes to be detached", "Node", klog.KRef("", m.Status.NodeRef.Name)) return ctrl.Result{}, nil } conditions.MarkTrue(m, clusterv1.VolumeDetachSucceededCondition) @@ -395,7 +395,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu // We only delete the node after the underlying infrastructure is gone. // https://github.com/kubernetes-sigs/cluster-api/issues/2565 if isDeleteNodeAllowed { - log.Info("Deleting node", "node", klog.KRef("", m.Status.NodeRef.Name)) + log.Info("Deleting node", "Node", klog.KRef("", m.Status.NodeRef.Name)) var deleteNodeErr error waitErr := wait.PollImmediate(2*time.Second, r.nodeDeletionRetryTimeout, func() (bool, error) { @@ -405,7 +405,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu return true, nil }) if waitErr != nil { - log.Error(deleteNodeErr, "Timed out deleting node", "node", klog.KRef("", m.Status.NodeRef.Name)) + log.Error(deleteNodeErr, "Timed out deleting node", "Node", klog.KRef("", m.Status.NodeRef.Name)) conditions.MarkFalse(m, clusterv1.MachineNodeHealthyCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "") r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDeleteNode", "error deleting Machine's node: %v", deleteNodeErr) @@ -513,7 +513,7 @@ func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1 } func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx, "node", klog.KRef("", nodeName)) + log := ctrl.LoggerFrom(ctx, "Node", klog.KRef("", nodeName)) restConfig, err := remote.RESTConfig(ctx, controllerName, r.Client, util.ObjectKey(cluster)) if err != nil { @@ -552,7 +552,7 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, verbStr = "Evicted" } log.Info(fmt.Sprintf("%s pod from Node", verbStr), - "pod", klog.KObj(pod)) + "Pod", klog.KObj(pod)) }, Out: writer{log.Info}, ErrOut: writer{func(msg string, keysAndValues ...interface{}) { @@ -587,7 +587,7 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, // because if the node is deleted before detach success, then the underline VMDK will be deleted together with the Machine // so after node draining we need to check if all volumes are detached before deleting the node. func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (bool, error) { - log := ctrl.LoggerFrom(ctx, "node", klog.KRef("", nodeName)) + log := ctrl.LoggerFrom(ctx, "Node", klog.KRef("", nodeName)) remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { diff --git a/internal/controllers/machine/machine_controller_noderef.go b/internal/controllers/machine/machine_controller_noderef.go index f230d0966fcd..29b5a397679f 100644 --- a/internal/controllers/machine/machine_controller_noderef.go +++ b/internal/controllers/machine/machine_controller_noderef.go @@ -46,7 +46,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, cluster *clusterv1.Clust // Check that the Machine has a valid ProviderID. if machine.Spec.ProviderID == nil || *machine.Spec.ProviderID == "" { - log.Info("Waiting for infrastructure provider to report spec.providerID", util.LowerCamelCase(machine.Spec.InfrastructureRef.Kind), klog.KRef(machine.Spec.InfrastructureRef.Namespace, machine.Spec.InfrastructureRef.Name)) + log.Info("Waiting for infrastructure provider to report spec.providerID", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Spec.InfrastructureRef.Namespace, machine.Spec.InfrastructureRef.Name)) conditions.MarkFalse(machine, clusterv1.MachineNodeHealthyCondition, clusterv1.WaitingForNodeRefReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -88,7 +88,7 @@ func (r *Reconciler) reconcileNode(ctx context.Context, cluster *clusterv1.Clust Name: node.Name, UID: node.UID, } - log.Info("Infrastructure provider reporting spec.providerID, Kubernetes node is now available", util.LowerCamelCase(machine.Spec.InfrastructureRef.Kind), klog.KRef(machine.Spec.InfrastructureRef.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", providerID, "node", klog.KRef("", machine.Status.NodeRef.Name)) + log.Info("Infrastructure provider reporting spec.providerID, Kubernetes node is now available", machine.Spec.InfrastructureRef.Kind, klog.KRef(machine.Spec.InfrastructureRef.Namespace, machine.Spec.InfrastructureRef.Name), "providerID", providerID, "node", klog.KRef("", machine.Status.NodeRef.Name)) r.recorder.Event(machine, corev1.EventTypeNormal, "SuccessfulSetNodeRef", machine.Status.NodeRef.Name) } @@ -189,7 +189,7 @@ func (r *Reconciler) getNode(ctx context.Context, c client.Reader, providerID *n for key, node := range nl.Items { nodeProviderID, err := noderefutil.NewProviderID(node.Spec.ProviderID) if err != nil { - log.Error(err, "Failed to parse ProviderID", "node", klog.KRef("", nl.Items[key].GetName())) + log.Error(err, "Failed to parse ProviderID", "Node", klog.KRef("", nl.Items[key].GetName())) continue } diff --git a/internal/controllers/machine/machine_controller_phases.go b/internal/controllers/machine/machine_controller_phases.go index 5d86a9d57a53..aa8e668b4934 100644 --- a/internal/controllers/machine/machine_controller_phases.go +++ b/internal/controllers/machine/machine_controller_phases.go @@ -89,7 +89,7 @@ func (r *Reconciler) reconcilePhase(_ context.Context, m *clusterv1.Machine) { // reconcileExternal handles generic unstructured objects referenced by a Machine. func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine, ref *corev1.ObjectReference) (external.ReconcileOutput, error) { - log := ctrl.LoggerFrom(ctx, "cluster", klog.KObj(cluster)) + log := ctrl.LoggerFrom(ctx) if err := utilconversion.UpdateReferenceAPIContract(ctx, r.Client, r.APIReader, ref); err != nil { return external.ReconcileOutput{}, err @@ -98,7 +98,7 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.C obj, err := external.Get(ctx, r.Client, ref, m.Namespace) if err != nil { if apierrors.IsNotFound(errors.Cause(err)) { - log.Info("could not find external ref, requeueing", "refGVK", ref.GroupVersionKind(), "refName", ref.Name, "machine", klog.KObj(m)) + log.Info("could not find external ref, requeueing", "refGVK", ref.GroupVersionKind(), "refName", ref.Name, "Machine", klog.KObj(m)) return external.ReconcileOutput{RequeueAfter: externalReadyWait}, nil } return external.ReconcileOutput{}, err @@ -219,7 +219,7 @@ func (r *Reconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1. // If the bootstrap provider is not ready, requeue. if !ready { - log.Info("Waiting for bootstrap provider to generate data secret and report status.ready", util.LowerCamelCaseKind(bootstrapConfig), klog.KObj(bootstrapConfig)) + log.Info("Waiting for bootstrap provider to generate data secret and report status.ready", bootstrapConfig.GetKind(), klog.KObj(bootstrapConfig)) return ctrl.Result{RequeueAfter: externalReadyWait}, nil } @@ -232,7 +232,7 @@ func (r *Reconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1. } m.Spec.Bootstrap.DataSecretName = pointer.StringPtr(secretName) if !m.Status.BootstrapReady { - log.Info("Bootstrap provider generated data secret and reports status.ready", util.LowerCamelCaseKind(bootstrapConfig), klog.KObj(bootstrapConfig), "secret", klog.KRef(m.Namespace, secretName)) + log.Info("Bootstrap provider generated data secret and reports status.ready", bootstrapConfig.GetKind(), klog.KObj(bootstrapConfig), "secret", klog.KRef(m.Namespace, secretName)) } m.Status.BootstrapReady = true return ctrl.Result{}, nil @@ -274,7 +274,7 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, cluster *clust return ctrl.Result{}, err } if ready && !m.Status.InfrastructureReady { - log.Info("Infrastructure provider has completed machine infrastructure provisioning and reports status.ready", util.LowerCamelCaseKind(infraConfig), klog.KObj(infraConfig)) + log.Info("Infrastructure provider has completed machine infrastructure provisioning and reports status.ready", infraConfig.GetKind(), klog.KObj(infraConfig)) } m.Status.InfrastructureReady = ready @@ -286,7 +286,7 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, cluster *clust // If the infrastructure provider is not ready, return early. if !ready { - log.Info("Waiting for infrastructure provider to create machine infrastructure and report status.ready", util.LowerCamelCaseKind(infraConfig), klog.KObj(infraConfig)) + log.Info("Waiting for infrastructure provider to create machine infrastructure and report status.ready", infraConfig.GetKind(), klog.KObj(infraConfig)) return ctrl.Result{RequeueAfter: externalReadyWait}, nil } diff --git a/internal/controllers/machinedeployment/machinedeployment_controller.go b/internal/controllers/machinedeployment/machinedeployment_controller.go index 2705749a46f3..c3e19efe5dbc 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller.go @@ -119,7 +119,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } - log = log.WithValues("machineDeployment", klog.KObj(deployment)) + log = log.WithValues("Cluster", klog.KRef(deployment.Namespace, deployment.Spec.ClusterName)) ctx = ctrl.LoggerInto(ctx, log) cluster, err := util.GetClusterByName(ctx, r.Client, deployment.Namespace, deployment.Spec.ClusterName) @@ -127,9 +127,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } - log = log.WithValues("cluster", klog.KObj(cluster)) - ctx = ctrl.LoggerInto(ctx, log) - // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, deployment) { log.Info("Reconciliation is paused for this object") @@ -264,7 +261,7 @@ func (r *Reconciler) getMachineSetsForDeployment(ctx context.Context, d *cluster filtered := make([]*clusterv1.MachineSet, 0, len(machineSets.Items)) for idx := range machineSets.Items { ms := &machineSets.Items[idx] - log.WithValues("machineSet", klog.KObj(ms)) + log.WithValues("MachineSet", klog.KObj(ms)) selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) if err != nil { log.Error(err, "Skipping MachineSet, failed to get label selector from spec selector") diff --git a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go index 31bb6c9ee98b..7f3db87750be 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go +++ b/internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go @@ -85,7 +85,7 @@ func (r *Reconciler) reconcileOldMachineSetsOnDelete(ctx context.Context, oldMSs totalReplicas := mdutil.GetReplicaCountForMachineSets(allMSs) scaleDownAmount := totalReplicas - *deployment.Spec.Replicas for _, oldMS := range oldMSs { - log = log.WithValues("machineSet", klog.KObj(oldMS)) + log = log.WithValues("MachineSet", klog.KObj(oldMS)) if oldMS.Spec.Replicas == nil || *oldMS.Spec.Replicas <= 0 { log.V(4).Info("fully scaled down") continue @@ -138,7 +138,7 @@ func (r *Reconciler) reconcileOldMachineSetsOnDelete(ctx context.Context, oldMSs } log.V(4).Info("Finished reconcile of Old MachineSets to account for deleted machines. Now analyzing if there's more potential to scale down") for _, oldMS := range oldMSs { - log = log.WithValues("machineSet", klog.KObj(oldMS)) + log = log.WithValues("MachineSet", klog.KObj(oldMS)) if scaleDownAmount <= 0 { break } @@ -166,7 +166,7 @@ func (r *Reconciler) reconcileOldMachineSetsOnDelete(ctx context.Context, oldMSs // reconcileNewMachineSetOnDelete handles reconciliation of the latest MachineSet associated with the MachineDeployment in the OnDelete MachineDeploymentStrategyType. func (r *Reconciler) reconcileNewMachineSetOnDelete(ctx context.Context, allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet, deployment *clusterv1.MachineDeployment) error { // logic same as reconcile logic for RollingUpdate - log := ctrl.LoggerFrom(ctx, "machineSet", klog.KObj(newMS)) + log := ctrl.LoggerFrom(ctx, "MachineSet", klog.KObj(newMS)) if newMS.Annotations != nil { if _, ok := newMS.Annotations[clusterv1.DisableMachineCreate]; ok { diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index 519a96ed17cb..141f1243f43e 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -328,7 +328,7 @@ func (r *Reconciler) scale(ctx context.Context, deployment *clusterv1.MachineDep for i := range allMSs { ms := allMSs[i] if ms.Spec.Replicas == nil { - log.Info("Spec.Replicas for machine set is nil, this is unexpected.", "machineset", ms.Name) + log.Info("Spec.Replicas for machine set is nil, this is unexpected.", "MachineSet", ms.Name) continue } diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index bfa88719febc..837d20d643a8 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -169,7 +169,7 @@ func getMaxReplicasAnnotation(ms *clusterv1.MachineSet, logger logr.Logger) (int } func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger logr.Logger) (int32, bool) { - logger = logger.WithValues("machineSet", klog.KObj(ms)) + logger = logger.WithValues("MachineSet", klog.KObj(ms)) annotationValue, ok := ms.Annotations[annotationKey] if !ok { @@ -186,7 +186,7 @@ func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger // SetNewMachineSetAnnotations sets new machine set's annotations appropriately by updating its revision and // copying required deployment annotations to it; it returns true if machine set's annotation is changed. func SetNewMachineSetAnnotations(deployment *clusterv1.MachineDeployment, newMS *clusterv1.MachineSet, newRevision string, exists bool, logger logr.Logger) bool { - logger = logger.WithValues("machineSet", klog.KObj(newMS)) + logger = logger.WithValues("MachineSet", klog.KObj(newMS)) // First, copy deployment's annotations (except for apply and revision annotations) annotationChanged := copyDeploymentAnnotationsToMachineSet(deployment, newMS) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go index fa7bc45df366..01b8f22095e7 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_controller.go @@ -133,7 +133,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } - log = log.WithValues("cluster", klog.KRef(m.Namespace, m.Spec.ClusterName)) + log = log.WithValues("Cluster", klog.KRef(m.Namespace, m.Spec.ClusterName)) ctx = ctrl.LoggerInto(ctx, log) cluster, err := util.GetClusterByName(ctx, r.Client, m.Namespace, m.Spec.ClusterName) diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go index 8a3231fd3036..8f48dab05d53 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go @@ -200,7 +200,7 @@ func (r *Reconciler) getTargetsFromMHC(ctx context.Context, logger logr.Logger, targets := []healthCheckTarget{} for k := range machines { - logger.WithValues("machine", klog.KObj(&machines[k])) + logger.WithValues("Machine", klog.KObj(&machines[k])) skip, reason := shouldSkipRemediation(&machines[k]) if skip { logger.Info("skipping remediation", "reason", reason) diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index cbe99e28e6a2..a8a11283e3d6 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -131,14 +131,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } + log = log.WithValues("Cluster", klog.KRef(machineSet.ObjectMeta.Namespace, machineSet.Spec.ClusterName)) + ctx = ctrl.LoggerInto(ctx, log) + cluster, err := util.GetClusterByName(ctx, r.Client, machineSet.ObjectMeta.Namespace, machineSet.Spec.ClusterName) if err != nil { return ctrl.Result{}, err } - log = log.WithValues("cluster", klog.KObj(cluster)) - ctx = ctrl.LoggerInto(ctx, log) - // Return early if the object or Cluster is paused. if annotations.IsPaused(cluster, machineSet) { log.Info("Reconciliation is paused for this object") @@ -257,7 +257,7 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, filteredMachines := make([]*clusterv1.Machine, 0, len(allMachines.Items)) for idx := range allMachines.Items { machine := &allMachines.Items[idx] - log.WithValues("machine", klog.KObj(machine)) + log = log.WithValues("Machine", klog.KObj(machine)) if shouldExcludeMachine(machineSet, machine) { continue } @@ -278,7 +278,7 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, var errs []error for _, machine := range filteredMachines { - log.WithValues("machine", klog.KObj(machine)) + log = log.WithValues("Machine", klog.KObj(machine)) // filteredMachines contains machines in deleting status to calculate correct status. // skip remediation for those in deleting status. if !machine.DeletionTimestamp.IsZero() { @@ -369,7 +369,7 @@ func (r *Reconciler) syncReplicas(ctx context.Context, ms *clusterv1.MachineSet, i+1, diff, *(ms.Spec.Replicas), len(machines))) machine := r.getNewMachine(ms) - log.WithValues("machine", klog.KObj(machine)) + log = log.WithValues("Machine", klog.KObj(machine)) // Clone and set the infrastructure and bootstrap references. var ( @@ -459,7 +459,7 @@ func (r *Reconciler) syncReplicas(ctx context.Context, ms *clusterv1.MachineSet, var errs []error machinesToDelete := getMachinesToDeletePrioritized(machines, diff, deletePriorityFunc) for _, machine := range machinesToDelete { - log.WithValues("machine", klog.KObj(machine)) + log = log.WithValues("Machine", klog.KObj(machine)) if err := r.Client.Delete(ctx, machine); err != nil { log.Error(err, "Unable to delete Machine") r.recorder.Eventf(ms, corev1.EventTypeWarning, "FailedDelete", "Failed to delete machine %q: %v", machine.Name, err) @@ -582,7 +582,7 @@ func (r *Reconciler) MachineToMachineSets(o client.Object) []ctrl.Request { // This won't log unless the global logger is set ctx := context.Background() - log := ctrl.LoggerFrom(ctx, "machine", klog.KObj(m)) + log := ctrl.LoggerFrom(ctx, "Machine", klog.KObj(m)) // Check if the controller reference is already set and // return an empty result when one is found. diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index f9a98a2345e0..8921981802af 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" - "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -148,9 +147,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re cluster.APIVersion = clusterv1.GroupVersion.String() cluster.Kind = "Cluster" - log = log.WithValues("cluster", klog.KObj(cluster)) - ctx = ctrl.LoggerInto(ctx, log) - // Return early, if the Cluster does not use a managed topology. // NOTE: We're already filtering events, but this is a safeguard for cases like e.g. when // there are MachineDeployments which have the topology owned label, but the corresponding diff --git a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go index 5910f4ab1951..0ce43847f6f4 100644 --- a/internal/controllers/topology/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/topology/machinedeployment/machinedeployment_controller.go @@ -21,6 +21,7 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -94,6 +95,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, errors.Wrapf(err, "failed to get MachineDeployment/%s", req.NamespacedName.Name) } + log = log.WithValues("Cluster", klog.KRef(md.Namespace, md.Spec.ClusterName)) + ctx = ctrl.LoggerInto(ctx, log) + cluster, err := util.GetClusterByName(ctx, r.Client, md.Namespace, md.Spec.ClusterName) if err != nil { return ctrl.Result{}, err diff --git a/internal/controllers/topology/machineset/machineset_controller.go b/internal/controllers/topology/machineset/machineset_controller.go index 09d129f2b113..396d34cf6d25 100644 --- a/internal/controllers/topology/machineset/machineset_controller.go +++ b/internal/controllers/topology/machineset/machineset_controller.go @@ -23,6 +23,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -95,6 +96,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, errors.Wrapf(err, "failed to get MachineSet/%s", req.NamespacedName.Name) } + log = log.WithValues("Cluster", klog.KRef(ms.Namespace, ms.Spec.ClusterName)) + ctx = ctrl.LoggerInto(ctx, log) + cluster, err := util.GetClusterByName(ctx, r.Client, ms.Namespace, ms.Spec.ClusterName) if err != nil { return ctrl.Result{}, err diff --git a/internal/log/log.go b/internal/log/log.go index 53053cd23b78..60b12b6ca278 100644 --- a/internal/log/log.go +++ b/internal/log/log.go @@ -28,7 +28,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util" ) // LoggerFrom returns a logger with predefined values from a context.Context. @@ -93,7 +92,7 @@ func (l *topologyReconcileLogger) WithObject(obj client.Object) Logger { Group: obj.GetObjectKind().GroupVersionKind().GroupKind().Group, Resource: obj.GetObjectKind().GroupVersionKind().Kind, }, - util.LowerCamelCaseKind(obj), klog.KObj(obj), + obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj), ), } } @@ -108,7 +107,7 @@ func (l *topologyReconcileLogger) WithRef(ref *corev1.ObjectReference) Logger { Group: ref.GetObjectKind().GroupVersionKind().GroupKind().Group, Resource: ref.GetObjectKind().GroupVersionKind().Kind, }, - util.LowerCamelCaseKind(ref), klog.KRef(ref.Namespace, ref.Name), + ref.GetObjectKind().GroupVersionKind().Kind, klog.KRef(ref.Namespace, ref.Name), ), } } @@ -118,8 +117,8 @@ func (l *topologyReconcileLogger) WithMachineDeployment(md *clusterv1.MachineDep topologyName := md.Labels[clusterv1.ClusterTopologyMachineDeploymentLabelName] return &topologyReconcileLogger{ Logger: l.Logger.WithValues( - "machineDeployment", klog.KObj(md), - "machineDeploymentTopology", topologyName, + "MachineDeployment", klog.KObj(md), + "MachineDeploymentTopology", topologyName, ), } } diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index 5f93045b8a38..c524dcfe2e9c 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -81,7 +82,7 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil } - log = log.WithValues("machine-pool", machinePool.Name) + log = log.WithValues("MachinePool", machinePool.Name) // Fetch the Cluster. cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) @@ -95,7 +96,8 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) // Initialize the patch helper patchHelper, err := patch.NewHelper(dockerMachinePool, r.Client) diff --git a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go index b977a963a349..f775d7d7aec2 100644 --- a/test/infrastructure/docker/internal/controllers/dockercluster_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockercluster_controller.go @@ -74,7 +74,7 @@ func (r *DockerClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - log = log.WithValues("cluster", klog.KObj(cluster)) + log = log.WithValues("Cluster", klog.KObj(cluster)) ctx = ctrl.LoggerInto(ctx, log) // Create a helper for managing a docker container hosting the loadbalancer. diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index eae7c3c8b998..4a0f73f7b55d 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -83,7 +83,7 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - log = log.WithValues("machine", klog.KObj(machine)) + log = log.WithValues("Machine", klog.KObj(machine)) ctx = ctrl.LoggerInto(ctx, log) // Fetch the Cluster. @@ -97,7 +97,7 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - log = log.WithValues("cluster", klog.KObj(cluster)) + log = log.WithValues("Cluster", klog.KObj(cluster)) ctx = ctrl.LoggerInto(ctx, log) // Return early if the object or Cluster is paused. diff --git a/util/predicates/cluster_predicates.go b/util/predicates/cluster_predicates.go index e636a44d2994..ebff711ac668 100644 --- a/util/predicates/cluster_predicates.go +++ b/util/predicates/cluster_predicates.go @@ -42,7 +42,7 @@ func ClusterCreateInfraReady(logger logr.Logger) predicate.Funcs { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.Object)) return false } - log = log.WithValues("cluster", klog.KObj(c)) + log = log.WithValues("Cluster", klog.KObj(c)) // Only need to trigger a reconcile if the Cluster.Status.InfrastructureReady is true if c.Status.InfrastructureReady { @@ -71,7 +71,7 @@ func ClusterCreateNotPaused(logger logr.Logger) predicate.Funcs { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.Object)) return false } - log = log.WithValues("cluster", klog.KObj(c)) + log = log.WithValues("Cluster", klog.KObj(c)) // Only need to trigger a reconcile if the Cluster.Spec.Paused is false if !c.Spec.Paused { @@ -100,7 +100,7 @@ func ClusterUpdateInfraReady(logger logr.Logger) predicate.Funcs { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) return false } - log = log.WithValues("cluster", klog.KObj(oldCluster)) + log = log.WithValues("Cluster", klog.KObj(oldCluster)) newCluster := e.ObjectNew.(*clusterv1.Cluster) @@ -130,7 +130,7 @@ func ClusterUpdateUnpaused(logger logr.Logger) predicate.Funcs { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) return false } - log = log.WithValues("cluster", klog.KObj(oldCluster)) + log = log.WithValues("Cluster", klog.KObj(oldCluster)) newCluster := e.ObjectNew.(*clusterv1.Cluster) @@ -189,7 +189,7 @@ func ClusterControlPlaneInitialized(logger logr.Logger) predicate.Funcs { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) return false } - log = log.WithValues("cluster", klog.KObj(oldCluster)) + log = log.WithValues("Cluster", klog.KObj(oldCluster)) newCluster := e.ObjectNew.(*clusterv1.Cluster) @@ -260,7 +260,7 @@ func processIfTopologyManaged(logger logr.Logger, object client.Object) bool { return false } - log := logger.WithValues("cluster", klog.KObj(cluster)) + log := logger.WithValues("Cluster", klog.KObj(cluster)) if cluster.Spec.Topology != nil { log.V(6).Info("Cluster has topology, allowing further processing") diff --git a/util/util.go b/util/util.go index a5333c19c9ca..4836d7afcba4 100644 --- a/util/util.go +++ b/util/util.go @@ -604,18 +604,3 @@ func IsNil(i interface{}) bool { } return false } - -// LowerCamelCaseKind mirrors how controller runtime formats the object's kind when used as a logging key -// for the object being reconciled. -func LowerCamelCaseKind(obj runtime.Object) string { - return LowerCamelCase(obj.GetObjectKind().GroupVersionKind().Kind) -} - -// LowerCamelCase mirrors how controller runtime formats the object's kind when used as a logging key -// for the object being reconciled. -func LowerCamelCase(s string) string { - if s != "" { - return strings.ToLower(s[:1]) + s[1:] - } - return "" -}