From 1c4a5ef89ab9e010a8efe62792ced6af469a1c98 Mon Sep 17 00:00:00 2001 From: Feruzjon Muyassarov Date: Mon, 20 May 2024 22:19:33 +0300 Subject: [PATCH] Ensure consistent de-capitalization in logging messages Signed-off-by: Feruzjon Muyassarov --- pkg/controllers/disruption/controller.go | 2 +- pkg/controllers/node/termination/terminator/eviction.go | 2 +- pkg/controllers/nodeclaim/consistency/controller.go | 2 +- pkg/controllers/nodeclaim/lifecycle/launch.go | 2 +- pkg/controllers/provisioning/provisioner.go | 4 ++-- pkg/controllers/provisioning/scheduling/scheduler.go | 2 +- pkg/controllers/state/cluster.go | 4 ++-- pkg/operator/controller/controller.go | 2 +- pkg/operator/operator.go | 1 - pkg/scheduling/volumeusage.go | 2 +- 10 files changed, 11 insertions(+), 12 deletions(-) diff --git a/pkg/controllers/disruption/controller.go b/pkg/controllers/disruption/controller.go index e8bc10ea00..364508debe 100644 --- a/pkg/controllers/disruption/controller.go +++ b/pkg/controllers/disruption/controller.go @@ -284,7 +284,7 @@ func (c *Controller) logAbnormalRuns(ctx context.Context) { func (c *Controller) logInvalidBudgets(ctx context.Context) { nodePoolList := &v1beta1.NodePoolList{} if err := c.kubeClient.List(ctx, nodePoolList); err != nil { - log.FromContext(ctx).Error(fmt.Errorf("listing nodepools, %w", err), "Disruption error") + log.FromContext(ctx).Error(fmt.Errorf("listing nodepools, %w", err), "disruption error") return } var buf bytes.Buffer diff --git a/pkg/controllers/node/termination/terminator/eviction.go b/pkg/controllers/node/termination/terminator/eviction.go index 1dcdf3bc25..a3f85c4de5 100644 --- a/pkg/controllers/node/termination/terminator/eviction.go +++ b/pkg/controllers/node/termination/terminator/eviction.go @@ -178,7 +178,7 @@ func (q *Queue) Evict(ctx context.Context, key QueueKey) bool { }}, fmt.Errorf("evicting pod %s/%s violates a PDB", key.Namespace, key.Name))) return false } - log.FromContext(ctx).Error(err, "Eviction error") + log.FromContext(ctx).Error(err, "eviction error") return false } q.recorder.Publish(terminatorevents.EvictPod(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: key.Name, Namespace: key.Namespace}})) diff --git a/pkg/controllers/nodeclaim/consistency/controller.go b/pkg/controllers/nodeclaim/consistency/controller.go index 103dc19b0c..ebb169019f 100644 --- a/pkg/controllers/nodeclaim/consistency/controller.go +++ b/pkg/controllers/nodeclaim/consistency/controller.go @@ -102,7 +102,7 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeClaim return reconcile.Result{}, fmt.Errorf("checking node with %T, %w", check, err) } for _, issue := range issues { - log.FromContext(ctx).Error(err, "Consistency error") + log.FromContext(ctx).Error(err, "consistency error") consistencyErrors.With(prometheus.Labels{checkLabel: reflect.TypeOf(check).Elem().Name()}).Inc() c.recorder.Publish(FailedConsistencyCheckEvent(nodeClaim, string(issue))) } diff --git a/pkg/controllers/nodeclaim/lifecycle/launch.go b/pkg/controllers/nodeclaim/lifecycle/launch.go index 7f515404c8..1bc1ffbe43 100644 --- a/pkg/controllers/nodeclaim/lifecycle/launch.go +++ b/pkg/controllers/nodeclaim/lifecycle/launch.go @@ -83,7 +83,7 @@ func (l *Launch) launchNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeCla switch { case cloudprovider.IsInsufficientCapacityError(err): l.recorder.Publish(InsufficientCapacityErrorEvent(nodeClaim, err)) - log.FromContext(ctx).Error(fmt.Errorf("launching nodeclaim, %w", err), "NodeClaim lifecycle error") + log.FromContext(ctx).Error(fmt.Errorf("launching nodeclaim, %w", err), "nodeClaim lifecycle error") if err = l.kubeClient.Delete(ctx, nodeClaim); err != nil { return nil, client.IgnoreNotFound(err) diff --git a/pkg/controllers/provisioning/provisioner.go b/pkg/controllers/provisioning/provisioner.go index 273c0d8b0b..30d984b465 100644 --- a/pkg/controllers/provisioning/provisioner.go +++ b/pkg/controllers/provisioning/provisioner.go @@ -199,7 +199,7 @@ func (p *Provisioner) NewScheduler(ctx context.Context, pods []*v1.Pod, stateNod } nodePoolList.Items = lo.Filter(nodePoolList.Items, func(n v1beta1.NodePool, _ int) bool { if err := n.RuntimeValidate(); err != nil { - log.FromContext(ctx).WithValues("nodepool", n.Name).Error(fmt.Errorf("nodepool failed validation, %w", err), "Provisioner error") + log.FromContext(ctx).WithValues("nodepool", n.Name).Error(fmt.Errorf("nodepool failed validation, %w", err), "provisioner error") return false } return n.DeletionTimestamp.IsZero() @@ -317,7 +317,7 @@ func (p *Provisioner) Schedule(ctx context.Context) (scheduler.Results, error) { s, err := p.NewScheduler(ctx, pods, nodes.Active()) if err != nil { if errors.Is(err, ErrNodePoolsNotFound) { - log.FromContext(ctx).Info(ErrNodePoolsNotFound.Error()) + log.FromContext(ctx).Error(ErrNodePoolsNotFound, "nodepool not found") return scheduler.Results{}, nil } return scheduler.Results{}, fmt.Errorf("creating scheduler, %w", err) diff --git a/pkg/controllers/provisioning/scheduling/scheduler.go b/pkg/controllers/provisioning/scheduling/scheduler.go index cc76687811..6717315be2 100644 --- a/pkg/controllers/provisioning/scheduling/scheduler.go +++ b/pkg/controllers/provisioning/scheduling/scheduler.go @@ -104,7 +104,7 @@ type Results struct { func (r Results) Record(ctx context.Context, recorder events.Recorder, cluster *state.Cluster) { // Report failures and nominations for p, err := range r.PodErrors { - log.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(p)).Error(fmt.Errorf("could not schedule pod, %w", err), "Scheduler error") + log.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(p)).Error(fmt.Errorf("could not schedule pod, %w", err), "scheduler error") recorder.Publish(PodFailedToScheduleEvent(p, err)) } for _, existing := range r.ExistingNodes { diff --git a/pkg/controllers/state/cluster.go b/pkg/controllers/state/cluster.go index b06ad166f3..8c664ac98f 100644 --- a/pkg/controllers/state/cluster.go +++ b/pkg/controllers/state/cluster.go @@ -94,12 +94,12 @@ func (c *Cluster) Synced(ctx context.Context) (synced bool) { }() nodeClaimList := &v1beta1.NodeClaimList{} if err := c.kubeClient.List(ctx, nodeClaimList); err != nil { - log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "Cluster state error") + log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "cluster state error") return false } nodeList := &v1.NodeList{} if err := c.kubeClient.List(ctx, nodeList); err != nil { - log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "Cluster state error") + log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "cluster state error") return false } c.mu.RLock() diff --git a/pkg/operator/controller/controller.go b/pkg/operator/controller/controller.go index be91db5fd3..83d0b3c005 100644 --- a/pkg/operator/controller/controller.go +++ b/pkg/operator/controller/controller.go @@ -119,7 +119,7 @@ func (s *Singleton) reconcile(ctx context.Context) time.Duration { case err != nil: reconcileErrors.WithLabelValues(s.name).Inc() reconcileTotal.WithLabelValues(s.name, labelError).Inc() - log.FromContext(ctx).Error(err, "Reconciler error") + log.FromContext(ctx).Error(err, "reconciler error") return s.rateLimiter.When(singletonRequest) case res.Requeue: reconcileTotal.WithLabelValues(s.name, labelRequeue).Inc() diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 898e4a2971..66ab806e59 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -73,7 +73,6 @@ const ( var ( BuildInfo = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ Namespace: metrics.Namespace, Name: "build_info", diff --git a/pkg/scheduling/volumeusage.go b/pkg/scheduling/volumeusage.go index 69ec8b2ed3..834fbd5892 100644 --- a/pkg/scheduling/volumeusage.go +++ b/pkg/scheduling/volumeusage.go @@ -88,7 +88,7 @@ func GetVolumes(ctx context.Context, kubeClient client.Client, pod *v1.Pod) (Vol // computing limits, otherwise Karpenter may never be able to update its cluster state. if err != nil { if errors.IsNotFound(err) { - log.FromContext(ctx).WithValues("pod", pod.Name, "volume", volume.Name).Error(fmt.Errorf("failed tracking CSI volume limits for volume, %w", err), "Volume usage tracking error") + log.FromContext(ctx).WithValues("pod", pod.Name, "volume", volume.Name).Error(fmt.Errorf("failed tracking CSI volume limits for volume, %w", err), "volume usage tracking error") continue } return nil, fmt.Errorf("failed updating volume limits, %w", err)