Skip to content

Commit

Permalink
Ensure consistent de-capitalization in logging messages
Browse files Browse the repository at this point in the history
Signed-off-by: Feruzjon Muyassarov <[email protected]>
  • Loading branch information
fmuyassarov committed May 20, 2024
1 parent 40d7cf0 commit 1c4a5ef
Show file tree
Hide file tree
Showing 10 changed files with 11 additions and 12 deletions.
2 changes: 1 addition & 1 deletion pkg/controllers/disruption/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ func (c *Controller) logAbnormalRuns(ctx context.Context) {
func (c *Controller) logInvalidBudgets(ctx context.Context) {
nodePoolList := &v1beta1.NodePoolList{}
if err := c.kubeClient.List(ctx, nodePoolList); err != nil {
log.FromContext(ctx).Error(fmt.Errorf("listing nodepools, %w", err), "Disruption error")
log.FromContext(ctx).Error(fmt.Errorf("listing nodepools, %w", err), "disruption error")
return
}
var buf bytes.Buffer
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/node/termination/terminator/eviction.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ func (q *Queue) Evict(ctx context.Context, key QueueKey) bool {
}}, fmt.Errorf("evicting pod %s/%s violates a PDB", key.Namespace, key.Name)))
return false
}
log.FromContext(ctx).Error(err, "Eviction error")
log.FromContext(ctx).Error(err, "eviction error")
return false
}
q.recorder.Publish(terminatorevents.EvictPod(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: key.Name, Namespace: key.Namespace}}))
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclaim/consistency/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeClaim
return reconcile.Result{}, fmt.Errorf("checking node with %T, %w", check, err)
}
for _, issue := range issues {
log.FromContext(ctx).Error(err, "Consistency error")
log.FromContext(ctx).Error(err, "consistency error")
consistencyErrors.With(prometheus.Labels{checkLabel: reflect.TypeOf(check).Elem().Name()}).Inc()
c.recorder.Publish(FailedConsistencyCheckEvent(nodeClaim, string(issue)))
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclaim/lifecycle/launch.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ func (l *Launch) launchNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeCla
switch {
case cloudprovider.IsInsufficientCapacityError(err):
l.recorder.Publish(InsufficientCapacityErrorEvent(nodeClaim, err))
log.FromContext(ctx).Error(fmt.Errorf("launching nodeclaim, %w", err), "NodeClaim lifecycle error")
log.FromContext(ctx).Error(fmt.Errorf("launching nodeclaim, %w", err), "nodeClaim lifecycle error")

if err = l.kubeClient.Delete(ctx, nodeClaim); err != nil {
return nil, client.IgnoreNotFound(err)
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/provisioning/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ func (p *Provisioner) NewScheduler(ctx context.Context, pods []*v1.Pod, stateNod
}
nodePoolList.Items = lo.Filter(nodePoolList.Items, func(n v1beta1.NodePool, _ int) bool {
if err := n.RuntimeValidate(); err != nil {
log.FromContext(ctx).WithValues("nodepool", n.Name).Error(fmt.Errorf("nodepool failed validation, %w", err), "Provisioner error")
log.FromContext(ctx).WithValues("nodepool", n.Name).Error(fmt.Errorf("nodepool failed validation, %w", err), "provisioner error")
return false
}
return n.DeletionTimestamp.IsZero()
Expand Down Expand Up @@ -317,7 +317,7 @@ func (p *Provisioner) Schedule(ctx context.Context) (scheduler.Results, error) {
s, err := p.NewScheduler(ctx, pods, nodes.Active())
if err != nil {
if errors.Is(err, ErrNodePoolsNotFound) {
log.FromContext(ctx).Info(ErrNodePoolsNotFound.Error())
log.FromContext(ctx).Error(ErrNodePoolsNotFound, "nodepool not found")
return scheduler.Results{}, nil
}
return scheduler.Results{}, fmt.Errorf("creating scheduler, %w", err)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/provisioning/scheduling/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ type Results struct {
func (r Results) Record(ctx context.Context, recorder events.Recorder, cluster *state.Cluster) {
// Report failures and nominations
for p, err := range r.PodErrors {
log.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(p)).Error(fmt.Errorf("could not schedule pod, %w", err), "Scheduler error")
log.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(p)).Error(fmt.Errorf("could not schedule pod, %w", err), "scheduler error")
recorder.Publish(PodFailedToScheduleEvent(p, err))
}
for _, existing := range r.ExistingNodes {
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/state/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,12 +94,12 @@ func (c *Cluster) Synced(ctx context.Context) (synced bool) {
}()
nodeClaimList := &v1beta1.NodeClaimList{}
if err := c.kubeClient.List(ctx, nodeClaimList); err != nil {
log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "Cluster state error")
log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "cluster state error")
return false
}
nodeList := &v1.NodeList{}
if err := c.kubeClient.List(ctx, nodeList); err != nil {
log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "Cluster state error")
log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "cluster state error")
return false
}
c.mu.RLock()
Expand Down
2 changes: 1 addition & 1 deletion pkg/operator/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (s *Singleton) reconcile(ctx context.Context) time.Duration {
case err != nil:
reconcileErrors.WithLabelValues(s.name).Inc()
reconcileTotal.WithLabelValues(s.name, labelError).Inc()
log.FromContext(ctx).Error(err, "Reconciler error")
log.FromContext(ctx).Error(err, "reconciler error")
return s.rateLimiter.When(singletonRequest)
case res.Requeue:
reconcileTotal.WithLabelValues(s.name, labelRequeue).Inc()
Expand Down
1 change: 0 additions & 1 deletion pkg/operator/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ const (

var (
BuildInfo = prometheus.NewGaugeVec(

prometheus.GaugeOpts{
Namespace: metrics.Namespace,
Name: "build_info",
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduling/volumeusage.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func GetVolumes(ctx context.Context, kubeClient client.Client, pod *v1.Pod) (Vol
// computing limits, otherwise Karpenter may never be able to update its cluster state.
if err != nil {
if errors.IsNotFound(err) {
log.FromContext(ctx).WithValues("pod", pod.Name, "volume", volume.Name).Error(fmt.Errorf("failed tracking CSI volume limits for volume, %w", err), "Volume usage tracking error")
log.FromContext(ctx).WithValues("pod", pod.Name, "volume", volume.Name).Error(fmt.Errorf("failed tracking CSI volume limits for volume, %w", err), "volume usage tracking error")
continue
}
return nil, fmt.Errorf("failed updating volume limits, %w", err)
Expand Down

0 comments on commit 1c4a5ef

Please sign in to comment.