Skip to content

Commit

Permalink
Ensure consistent de-capitalization in logging messages
Browse files Browse the repository at this point in the history
Signed-off-by: Feruzjon Muyassarov <[email protected]>
  • Loading branch information
fmuyassarov committed May 20, 2024
1 parent 40d7cf0 commit 8c716ef
Show file tree
Hide file tree
Showing 13 changed files with 19 additions and 20 deletions.
4 changes: 2 additions & 2 deletions pkg/controllers/disruption/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ func (c *Controller) logAbnormalRuns(ctx context.Context) {
func (c *Controller) logInvalidBudgets(ctx context.Context) {
nodePoolList := &v1beta1.NodePoolList{}
if err := c.kubeClient.List(ctx, nodePoolList); err != nil {
log.FromContext(ctx).Error(fmt.Errorf("listing nodepools, %w", err), "Disruption error")
log.FromContext(ctx).Error(fmt.Errorf("listing nodepools, %w", err), "disruption error")
return
}
var buf bytes.Buffer
Expand All @@ -295,6 +295,6 @@ func (c *Controller) logInvalidBudgets(ctx context.Context) {
}
}
if buf.Len() > 0 {
log.FromContext(ctx).Error(fmt.Errorf("detected disruption budget errors, %w", errors.New(buf.String())), "Disruption error")
log.FromContext(ctx).Error(fmt.Errorf("detected disruption budget errors, %w", errors.New(buf.String())), "disruption error")
}
}
4 changes: 2 additions & 2 deletions pkg/controllers/disruption/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ func GetPodEvictionCost(ctx context.Context, p *v1.Pod) float64 {
podDeletionCost, err := strconv.ParseFloat(podDeletionCostStr, 64)
if err != nil {
log.FromContext(ctx).Error(fmt.Errorf("parsing %s=%s from pod %s, %w",
v1.PodDeletionCost, podDeletionCostStr, client.ObjectKeyFromObject(p), err), "Disruption error")
v1.PodDeletionCost, podDeletionCostStr, client.ObjectKeyFromObject(p), err), "disruption error")
} else {
// the pod deletion disruptionCost is in [-2147483647, 2147483647]
// the min pod disruptionCost makes one pod ~ -15 pods, and the max pod disruptionCost to ~ 17 pods.
Expand Down Expand Up @@ -288,7 +288,7 @@ func BuildNodePoolMap(ctx context.Context, kubeClient client.Client, cloudProvid
if err != nil {
// don't error out on building the node pool, we just won't be able to handle any nodes that
// were created by it
log.FromContext(ctx).Error(fmt.Errorf("listing instance types for %s, %w", np.Name, err), "Disruption error")
log.FromContext(ctx).Error(fmt.Errorf("listing instance types for %s, %w", np.Name, err), "disruption error")
continue
}
if len(nodePoolInstanceTypes) == 0 {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/disruption/singlenodeconsolidation.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func (s *SingleNodeConsolidation) ComputeCommand(ctx context.Context, disruption
// compute a possible consolidation option
cmd, results, err := s.computeConsolidation(ctx, candidate)
if err != nil {
log.FromContext(ctx).Error(fmt.Errorf("computing consolidation, %w", err), "Disruption error")
log.FromContext(ctx).Error(fmt.Errorf("computing consolidation, %w", err), "disruption error")
continue
}
if cmd.Action() == NoOpAction {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/disruption/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ func NewCandidate(ctx context.Context, kubeClient client.Client, recorder events
}
pods, err := node.Pods(ctx, kubeClient)
if err != nil {
log.FromContext(ctx).Error(fmt.Errorf("determining node pods, %w", err), "Disruption error")
log.FromContext(ctx).Error(fmt.Errorf("determining node pods, %w", err), "disruption error")
return nil, fmt.Errorf("getting pods from state node, %w", err)
}
for _, po := range pods {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/node/termination/terminator/eviction.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ func (q *Queue) Evict(ctx context.Context, key QueueKey) bool {
}}, fmt.Errorf("evicting pod %s/%s violates a PDB", key.Namespace, key.Name)))
return false
}
log.FromContext(ctx).Error(err, "Eviction error")
log.FromContext(ctx).Error(err, "eviction error")
return false
}
q.recorder.Publish(terminatorevents.EvictPod(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: key.Name, Namespace: key.Namespace}}))
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclaim/consistency/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeClaim
return reconcile.Result{}, fmt.Errorf("checking node with %T, %w", check, err)
}
for _, issue := range issues {
log.FromContext(ctx).Error(err, "Consistency error")
log.FromContext(ctx).Error(err, "consistency error")
consistencyErrors.With(prometheus.Labels{checkLabel: reflect.TypeOf(check).Elem().Name()}).Inc()
c.recorder.Publish(FailedConsistencyCheckEvent(nodeClaim, string(issue)))
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclaim/lifecycle/launch.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ func (l *Launch) launchNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeCla
switch {
case cloudprovider.IsInsufficientCapacityError(err):
l.recorder.Publish(InsufficientCapacityErrorEvent(nodeClaim, err))
log.FromContext(ctx).Error(fmt.Errorf("launching nodeclaim, %w", err), "NodeClaim lifecycle error")
log.FromContext(ctx).Error(fmt.Errorf("launching nodeclaim, %w", err), "nodeClaim lifecycle error")

if err = l.kubeClient.Delete(ctx, nodeClaim); err != nil {
return nil, client.IgnoreNotFound(err)
Expand Down
8 changes: 4 additions & 4 deletions pkg/controllers/provisioning/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ func (p *Provisioner) NewScheduler(ctx context.Context, pods []*v1.Pod, stateNod
}
nodePoolList.Items = lo.Filter(nodePoolList.Items, func(n v1beta1.NodePool, _ int) bool {
if err := n.RuntimeValidate(); err != nil {
log.FromContext(ctx).WithValues("nodepool", n.Name).Error(fmt.Errorf("nodepool failed validation, %w", err), "Provisioner error")
log.FromContext(ctx).WithValues("nodepool", n.Name).Error(fmt.Errorf("nodepool failed validation, %w", err), "provisioner error")
return false
}
return n.DeletionTimestamp.IsZero()
Expand All @@ -221,7 +221,7 @@ func (p *Provisioner) NewScheduler(ctx context.Context, pods []*v1.Pod, stateNod
if err != nil {
// we just log an error and skip the provisioner to prevent a single mis-configured provisioner from stopping
// all scheduling
log.FromContext(ctx).WithValues("nodepool", nodePool.Name).Error(fmt.Errorf("skipping, unable to resolve instance types %w", err), "Provisioner error")
log.FromContext(ctx).WithValues("nodepool", nodePool.Name).Error(fmt.Errorf("skipping, unable to resolve instance types %w", err), "provisioner error")
continue
}
if len(instanceTypeOptions) == 0 {
Expand Down Expand Up @@ -317,7 +317,7 @@ func (p *Provisioner) Schedule(ctx context.Context) (scheduler.Results, error) {
s, err := p.NewScheduler(ctx, pods, nodes.Active())
if err != nil {
if errors.Is(err, ErrNodePoolsNotFound) {
log.FromContext(ctx).Info(ErrNodePoolsNotFound.Error())
log.FromContext(ctx).Error(ErrNodePoolsNotFound, "nodepool not found")
return scheduler.Results{}, nil
}
return scheduler.Results{}, fmt.Errorf("creating scheduler, %w", err)
Expand Down Expand Up @@ -437,7 +437,7 @@ func (p *Provisioner) injectVolumeTopologyRequirements(ctx context.Context, pods
var schedulablePods []*v1.Pod
for _, pod := range pods {
if err := p.volumeTopology.Inject(ctx, pod); err != nil {
log.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(pod)).Error(fmt.Errorf("getting volume topology requirements, %w", err), "Provisioner error")
log.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(pod)).Error(fmt.Errorf("getting volume topology requirements, %w", err), "provisioner error")
} else {
schedulablePods = append(schedulablePods, pod)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/provisioning/scheduling/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ type Results struct {
func (r Results) Record(ctx context.Context, recorder events.Recorder, cluster *state.Cluster) {
// Report failures and nominations
for p, err := range r.PodErrors {
log.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(p)).Error(fmt.Errorf("could not schedule pod, %w", err), "Scheduler error")
log.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(p)).Error(fmt.Errorf("could not schedule pod, %w", err), "scheduler error")
recorder.Publish(PodFailedToScheduleEvent(p, err))
}
for _, existing := range r.ExistingNodes {
Expand Down Expand Up @@ -223,7 +223,7 @@ func (s *Scheduler) Solve(ctx context.Context, pods []*v1.Pod) Results {
q.Push(pod, relaxed)
if relaxed {
if err := s.topology.Update(ctx, pod); err != nil {
log.FromContext(ctx).Error(fmt.Errorf("updating topology, %w", err), "Scheduler error")
log.FromContext(ctx).Error(fmt.Errorf("updating topology, %w", err), "scheduler error")
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/state/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,12 +94,12 @@ func (c *Cluster) Synced(ctx context.Context) (synced bool) {
}()
nodeClaimList := &v1beta1.NodeClaimList{}
if err := c.kubeClient.List(ctx, nodeClaimList); err != nil {
log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "Cluster state error")
log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "cluster state error")
return false
}
nodeList := &v1.NodeList{}
if err := c.kubeClient.List(ctx, nodeList); err != nil {
log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "Cluster state error")
log.FromContext(ctx).Error(fmt.Errorf("checking cluster state sync, %w", err), "cluster state error")
return false
}
c.mu.RLock()
Expand Down
2 changes: 1 addition & 1 deletion pkg/operator/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (s *Singleton) reconcile(ctx context.Context) time.Duration {
case err != nil:
reconcileErrors.WithLabelValues(s.name).Inc()
reconcileTotal.WithLabelValues(s.name, labelError).Inc()
log.FromContext(ctx).Error(err, "Reconciler error")
log.FromContext(ctx).Error(err, "reconciler error")
return s.rateLimiter.When(singletonRequest)
case res.Requeue:
reconcileTotal.WithLabelValues(s.name, labelRequeue).Inc()
Expand Down
1 change: 0 additions & 1 deletion pkg/operator/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ const (

var (
BuildInfo = prometheus.NewGaugeVec(

prometheus.GaugeOpts{
Namespace: metrics.Namespace,
Name: "build_info",
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduling/volumeusage.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func GetVolumes(ctx context.Context, kubeClient client.Client, pod *v1.Pod) (Vol
// computing limits, otherwise Karpenter may never be able to update its cluster state.
if err != nil {
if errors.IsNotFound(err) {
log.FromContext(ctx).WithValues("pod", pod.Name, "volume", volume.Name).Error(fmt.Errorf("failed tracking CSI volume limits for volume, %w", err), "Volume usage tracking error")
log.FromContext(ctx).WithValues("pod", pod.Name, "volume", volume.Name).Error(fmt.Errorf("failed tracking CSI volume limits for volume, %w", err), "volume usage tracking error")
continue
}
return nil, fmt.Errorf("failed updating volume limits, %w", err)
Expand Down

0 comments on commit 8c716ef

Please sign in to comment.