Skip to content

Commit

Permalink
Migrate knative logger to controller-runtime injected logger
Browse files Browse the repository at this point in the history
Signed-off-by: Feruzjon Muyassarov <[email protected]>
  • Loading branch information
fmuyassarov committed Feb 28, 2024
1 parent 01b8127 commit 9a6bd6a
Show file tree
Hide file tree
Showing 64 changed files with 273 additions and 190 deletions.
3 changes: 3 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,11 @@ require (
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
knative.dev/pkg v0.0.0-20230712131115-7051d301e7f4
sigs.k8s.io/controller-runtime v0.17.2
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240215124517-56159419231e
)

require github.com/spf13/afero v1.6.0 // indirect

require (
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect
Expand Down
7 changes: 7 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
Expand Down Expand Up @@ -254,6 +255,7 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
Expand Down Expand Up @@ -294,6 +296,8 @@ github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXn
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
Expand Down Expand Up @@ -341,6 +345,7 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
Expand Down Expand Up @@ -683,6 +688,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0=
sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s=
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240215124517-56159419231e h1:zlN3M47kIntFr5Z6qMOSMg8nO6lrywD94H29TPDZjZk=
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240215124517-56159419231e/go.mod h1:TF/lVLWS+JNNaVqJuDDictY2hZSXSsIHCx4FClMvqFg=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
Expand Down
3 changes: 2 additions & 1 deletion pkg/apis/v1beta1/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
. "knative.dev/pkg/logging/testing"

. "sigs.k8s.io/karpenter/pkg/utils/testing"

"sigs.k8s.io/karpenter/pkg/apis"
"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
Expand Down
16 changes: 8 additions & 8 deletions pkg/controllers/disruption/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,16 @@ import (
"sync"
"time"

"github.com/go-logr/logr"
"github.com/samber/lo"
"go.uber.org/multierr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"

operatorlogging "sigs.k8s.io/karpenter/pkg/operator/logging"
controllerruntime "sigs.k8s.io/controller-runtime"

"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
"sigs.k8s.io/karpenter/pkg/cloudprovider"
Expand Down Expand Up @@ -114,7 +114,7 @@ func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconc
// with making any scheduling decision off of our state nodes. Otherwise, we have the potential to make
// a scheduling decision based on a smaller subset of nodes in our cluster state than actually exist.
if !c.cluster.Synced(ctx) {
logging.FromContext(ctx).Debugf("waiting on cluster sync")
controllerruntime.LoggerFrom(ctx).V(3).Info("waiting on cluster sync")
return reconcile.Result{RequeueAfter: time.Second}, nil
}

Expand Down Expand Up @@ -184,7 +184,7 @@ func (c *Controller) disrupt(ctx context.Context, disruption Method) (bool, erro
// 3. Add Command to orchestration.Queue to wait to delete the candiates.
func (c *Controller) executeCommand(ctx context.Context, m Method, cmd Command, schedulingResults scheduling.Results) error {
commandID := uuid.NewUUID()
logging.FromContext(ctx).With("command-id", commandID).Infof("disrupting via %s %s", m.Type(), cmd)
controllerruntime.LoggerFrom(ctx).WithValues("command-id", commandID).Info("disrupting via %s %s", m.Type(), cmd)

stateNodes := lo.Map(cmd.candidates, func(c *Candidate, _ int) *state.StateNode {
return c.StateNode
Expand Down Expand Up @@ -213,7 +213,7 @@ func (c *Controller) executeCommand(ctx context.Context, m Method, cmd Command,
// tainted with the Karpenter taint, the provisioning controller will continue
// to do scheduling simulations and nominate the pods on the candidate nodes until
// the node is cleaned up.
schedulingResults.Record(logging.WithLogger(ctx, operatorlogging.NopLogger), c.recorder, c.cluster)
schedulingResults.Record(controllerruntime.LoggerInto(ctx, logr.New(logr.Discard().GetSink())), c.recorder, c.cluster)

providerIDs := lo.Map(cmd.candidates, func(c *Candidate, _ int) string { return c.ProviderID() })
// We have the new NodeClaims created at the API server so mark the old NodeClaims for deletion
Expand Down Expand Up @@ -274,7 +274,7 @@ func (c *Controller) logAbnormalRuns(ctx context.Context) {
defer c.mu.Unlock()
for name, runTime := range c.lastRun {
if timeSince := c.clock.Since(runTime); timeSince > AbnormalTimeLimit {
logging.FromContext(ctx).Debugf("abnormal time between runs of %s = %s", name, timeSince)
controllerruntime.LoggerFrom(ctx).V(3).Info("abnormal time between runs of %s = %s", name, timeSince)
}
}
}
Expand All @@ -283,7 +283,7 @@ func (c *Controller) logAbnormalRuns(ctx context.Context) {
func (c *Controller) logInvalidBudgets(ctx context.Context) {
nodePoolList := &v1beta1.NodePoolList{}
if err := c.kubeClient.List(ctx, nodePoolList); err != nil {
logging.FromContext(ctx).Errorf("listing nodepools, %s", err)
controllerruntime.LoggerFrom(ctx).Error(err, "listing nodepools, %s")
return
}
var buf bytes.Buffer
Expand All @@ -294,6 +294,6 @@ func (c *Controller) logInvalidBudgets(ctx context.Context) {
}
}
if buf.Len() > 0 {
logging.FromContext(ctx).Errorf("detected disruption budget errors: %s", buf.String())
controllerruntime.LoggerFrom(ctx).Error(nil, "detected disruption budget errors: %s", buf.String())
}
}
6 changes: 3 additions & 3 deletions pkg/controllers/disruption/emptynodeconsolidation.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
"errors"
"fmt"

"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"

"sigs.k8s.io/karpenter/pkg/controllers/provisioning/scheduling"
"sigs.k8s.io/karpenter/pkg/metrics"
Expand Down Expand Up @@ -90,7 +90,7 @@ func (c *EmptyNodeConsolidation) ComputeCommand(ctx context.Context, disruptionB
}
validationCandidates, err := GetCandidates(ctx, c.cluster, c.kubeClient, c.recorder, c.clock, c.cloudProvider, c.ShouldDisrupt, c.queue)
if err != nil {
logging.FromContext(ctx).Errorf("computing validation candidates %s", err)
controllerruntime.LoggerFrom(ctx).Error(err, "computing validation candidates %s")
return Command{}, scheduling.Results{}, err
}
// Get the current representation of the proposed candidates from before the validation timeout
Expand All @@ -108,7 +108,7 @@ func (c *EmptyNodeConsolidation) ComputeCommand(ctx context.Context, disruptionB
// 3. the number of candidates for a given nodepool can no longer be disrupted as it would violate the budget
for _, n := range candidatesToDelete {
if len(n.reschedulablePods) != 0 || c.cluster.IsNodeNominated(n.ProviderID()) || postValidationMapping[n.nodePool.Name] == 0 {
logging.FromContext(ctx).Debugf("abandoning empty node consolidation attempt due to pod churn, command is no longer valid, %s", cmd)
controllerruntime.LoggerFrom(ctx).V(4).Info("abandoning empty node consolidation attempt due to pod churn, command is no longer valid, %s", cmd)
return Command{}, scheduling.Results{}, nil
}
postValidationMapping[n.nodePool.Name]--
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/disruption/expiration.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (

"k8s.io/utils/clock"

"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"

"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
Expand Down Expand Up @@ -116,7 +116,7 @@ func (e *Expiration) ComputeCommand(ctx context.Context, disruptionBudgetMapping
e.recorder.Publish(disruptionevents.Blocked(candidate.Node, candidate.NodeClaim, "Scheduling simulation failed to schedule all pods")...)
continue
}
logging.FromContext(ctx).With("ttl", candidates[0].nodePool.Spec.Disruption.ExpireAfter.String()).Infof("triggering termination for expired node after TTL")
controllerruntime.LoggerFrom(ctx).WithValues("ttl", candidates[0].nodePool.Spec.Disruption.ExpireAfter.String()).Info("triggering termination for expired node after TTL")
return Command{
candidates: []*Candidate{candidate},
replacements: results.NewNodeClaims,
Expand Down
14 changes: 7 additions & 7 deletions pkg/controllers/disruption/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"math"
"strconv"

"github.com/go-logr/logr"
"github.com/samber/lo"

disruptionevents "sigs.k8s.io/karpenter/pkg/controllers/disruption/events"
Expand All @@ -30,7 +31,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"

"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
Expand All @@ -41,7 +42,6 @@ import (
"sigs.k8s.io/karpenter/pkg/controllers/state"
"sigs.k8s.io/karpenter/pkg/events"
"sigs.k8s.io/karpenter/pkg/metrics"
operatorlogging "sigs.k8s.io/karpenter/pkg/operator/logging"
"sigs.k8s.io/karpenter/pkg/scheduling"
)

Expand Down Expand Up @@ -79,7 +79,7 @@ func SimulateScheduling(ctx context.Context, kubeClient client.Client, cluster *
pods = append(pods, n.reschedulablePods...)
}
pods = append(pods, deletingNodePods...)
scheduler, err := provisioner.NewScheduler(logging.WithLogger(ctx, operatorlogging.NopLogger), pods, stateNodes)
scheduler, err := provisioner.NewScheduler(controllerruntime.LoggerInto(ctx, logr.New(logr.Discard().GetSink())), pods, stateNodes)
if err != nil {
return pscheduling.Results{}, fmt.Errorf("creating scheduler, %w", err)
}
Expand All @@ -88,7 +88,7 @@ func SimulateScheduling(ctx context.Context, kubeClient client.Client, cluster *
return client.ObjectKeyFromObject(p), nil
})

results := scheduler.Solve(logging.WithLogger(ctx, operatorlogging.NopLogger), pods).TruncateInstanceTypes(pscheduling.MaxInstanceTypes)
results := scheduler.Solve(controllerruntime.LoggerInto(ctx, logr.New(logr.Discard().GetSink())), pods).TruncateInstanceTypes(pscheduling.MaxInstanceTypes)
for _, n := range results.ExistingNodes {
// We consider existing nodes for scheduling. When these nodes are unmanaged, their taint logic should
// tell us if we can schedule to them or not; however, if these nodes are managed, we will still schedule to them
Expand Down Expand Up @@ -127,8 +127,8 @@ func GetPodEvictionCost(ctx context.Context, p *v1.Pod) float64 {
if ok {
podDeletionCost, err := strconv.ParseFloat(podDeletionCostStr, 64)
if err != nil {
logging.FromContext(ctx).Errorf("parsing %s=%s from pod %s, %s",
v1.PodDeletionCost, podDeletionCostStr, client.ObjectKeyFromObject(p), err)
controllerruntime.LoggerFrom(ctx).Error(err, "parsing %s=%s from pod %s, %s",
v1.PodDeletionCost, podDeletionCostStr, client.ObjectKeyFromObject(p))
} else {
// the pod deletion disruptionCost is in [-2147483647, 2147483647]
// the min pod disruptionCost makes one pod ~ -15 pods, and the max pod disruptionCost to ~ 17 pods.
Expand Down Expand Up @@ -268,7 +268,7 @@ func BuildNodePoolMap(ctx context.Context, kubeClient client.Client, cloudProvid
if err != nil {
// don't error out on building the node pool, we just won't be able to handle any nodes that
// were created by it
logging.FromContext(ctx).Errorf("listing instance types for %s, %s", np.Name, err)
controllerruntime.LoggerFrom(ctx).Error(err, "listing instance types for %s, %s", np.Name)
continue
}
if len(nodePoolInstanceTypes) == 0 {
Expand Down
9 changes: 5 additions & 4 deletions pkg/controllers/disruption/multinodeconsolidation.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ import (

"github.com/samber/lo"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/logging"

controllerruntime "sigs.k8s.io/controller-runtime"

"sigs.k8s.io/karpenter/pkg/cloudprovider"
"sigs.k8s.io/karpenter/pkg/controllers/provisioning/scheduling"
Expand Down Expand Up @@ -98,7 +99,7 @@ func (m *MultiNodeConsolidation) ComputeCommand(ctx context.Context, disruptionB
}

if !isValid {
logging.FromContext(ctx).Debugf("abandoning multi-node consolidation attempt due to pod churn, command is no longer valid, %s", cmd)
controllerruntime.LoggerFrom(ctx).V(3).Info("abandoning multi-node consolidation attempt due to pod churn, command is no longer valid, %s", cmd)
return Command{}, scheduling.Results{}, nil
}
return cmd, results, nil
Expand All @@ -125,9 +126,9 @@ func (m *MultiNodeConsolidation) firstNConsolidationOption(ctx context.Context,
if m.clock.Now().After(timeout) {
ConsolidationTimeoutTotalCounter.WithLabelValues(m.ConsolidationType()).Inc()
if lastSavedCommand.candidates == nil {
logging.FromContext(ctx).Debugf("failed to find a multi-node consolidation after timeout, last considered batch had %d", (min+max)/2)
controllerruntime.LoggerFrom(ctx).V(3).Info("failed to find a multi-node consolidation after timeout, last considered batch had %d", (min+max)/2)
} else {
logging.FromContext(ctx).Debugf("stopping multi-node consolidation after timeout, returning last valid command %s", lastSavedCommand)
controllerruntime.LoggerFrom(ctx).V(3).Info("stopping multi-node consolidation after timeout, returning last valid command %s", lastSavedCommand)
}
return lastSavedCommand, lastSavedResults, nil
}
Expand Down
11 changes: 6 additions & 5 deletions pkg/controllers/disruption/orchestration/queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllertest"
"sigs.k8s.io/controller-runtime/pkg/manager"
Expand Down Expand Up @@ -179,7 +179,8 @@ func (q *Queue) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.R
panic("unexpected failure, disruption queue has shut down")
}
cmd := item.(*Command)
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("command-id", string(cmd.id)))
ctx = controllerruntime.LoggerInto(ctx, controllerruntime.LoggerFrom(ctx).WithValues("command-id", string(cmd.id)))

if err := q.waitOrTerminate(ctx, cmd); err != nil {
// If recoverable, re-queue and try again.
if !IsUnrecoverableError(err) {
Expand All @@ -203,13 +204,13 @@ func (q *Queue) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.R
}).Add(float64(len(failedLaunches)))
multiErr := multierr.Combine(err, cmd.lastError, state.RequireNoScheduleTaint(ctx, q.kubeClient, false, cmd.candidates...))
// Log the error
logging.FromContext(ctx).With("nodes", strings.Join(lo.Map(cmd.candidates, func(s *state.StateNode, _ int) string {
controllerruntime.LoggerFrom(ctx).WithValues("nodes", strings.Join(lo.Map(cmd.candidates, func(s *state.StateNode, _ int) string {
return s.Name()
}), ",")).Errorf("failed to disrupt nodes, %s", multiErr)
}), ",")).Error(multiErr, "failed to disrupt nodes, %s")
}
// If command is complete, remove command from queue.
q.Remove(cmd)
logging.FromContext(ctx).Infof("command succeeded")
controllerruntime.LoggerFrom(ctx).Info("command succeeded")
return reconcile.Result{RequeueAfter: controller.Immediately}, nil
}

Expand Down
3 changes: 2 additions & 1 deletion pkg/controllers/disruption/orchestration/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,10 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
. "knative.dev/pkg/logging/testing"
"sigs.k8s.io/controller-runtime/pkg/client"

. "sigs.k8s.io/karpenter/pkg/utils/testing"

"sigs.k8s.io/karpenter/pkg/apis"
"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
"sigs.k8s.io/karpenter/pkg/cloudprovider/fake"
Expand Down
8 changes: 4 additions & 4 deletions pkg/controllers/disruption/singlenodeconsolidation.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
"fmt"
"time"

"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"

"sigs.k8s.io/karpenter/pkg/controllers/provisioning/scheduling"
"sigs.k8s.io/karpenter/pkg/metrics"
Expand Down Expand Up @@ -66,13 +66,13 @@ func (s *SingleNodeConsolidation) ComputeCommand(ctx context.Context, disruption
}
if s.clock.Now().After(timeout) {
ConsolidationTimeoutTotalCounter.WithLabelValues(s.ConsolidationType()).Inc()
logging.FromContext(ctx).Debugf("abandoning single-node consolidation due to timeout after evaluating %d candidates", i)
controllerruntime.LoggerFrom(ctx).V(3).Info("abandoning single-node consolidation due to timeout after evaluating %d candidates", i)
return Command{}, scheduling.Results{}, nil
}
// compute a possible consolidation option
cmd, results, err := s.computeConsolidation(ctx, candidate)
if err != nil {
logging.FromContext(ctx).Errorf("computing consolidation %s", err)
controllerruntime.LoggerFrom(ctx).Error(err, "computing consolidation %s")
continue
}
if cmd.Action() == NoOpAction {
Expand All @@ -83,7 +83,7 @@ func (s *SingleNodeConsolidation) ComputeCommand(ctx context.Context, disruption
return Command{}, scheduling.Results{}, fmt.Errorf("validating consolidation, %w", err)
}
if !isValid {
logging.FromContext(ctx).Debugf("abandoning single-node consolidation attempt due to pod churn, command is no longer valid, %s", cmd)
controllerruntime.LoggerFrom(ctx).V(3).Info("abandoning single-node consolidation attempt due to pod churn, command is no longer valid, %s", cmd)
return Command{}, scheduling.Results{}, nil
}
return cmd, results, nil
Expand Down
3 changes: 2 additions & 1 deletion pkg/controllers/disruption/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,11 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
clock "k8s.io/utils/clock/testing"
. "knative.dev/pkg/logging/testing"
"knative.dev/pkg/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"

. "sigs.k8s.io/karpenter/pkg/utils/testing"

coreapis "sigs.k8s.io/karpenter/pkg/apis"
"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
"sigs.k8s.io/karpenter/pkg/cloudprovider"
Expand Down
Loading

0 comments on commit 9a6bd6a

Please sign in to comment.