diff --git a/go.mod b/go.mod index 2ddaebc232..7afa5a7c68 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( k8s.io/utils v0.0.0-20230726121419-3b25d923346b knative.dev/pkg v0.0.0-20230712131115-7051d301e7f4 sigs.k8s.io/controller-runtime v0.17.2 + sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240215124517-56159419231e ) require ( @@ -78,6 +79,7 @@ require ( github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/statsd_exporter v0.21.0 // indirect github.com/robfig/cron/v3 v3.0.1 + github.com/spf13/afero v1.6.0 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.24.0 // indirect diff --git a/go.sum b/go.sum index 0854bedb0c..fb4c5057ac 100644 --- a/go.sum +++ b/go.sum @@ -217,6 +217,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -254,6 +255,7 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -294,6 +296,8 @@ github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXn github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -341,6 +345,7 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -683,6 +688,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240215124517-56159419231e h1:zlN3M47kIntFr5Z6qMOSMg8nO6lrywD94H29TPDZjZk= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240215124517-56159419231e/go.mod h1:TF/lVLWS+JNNaVqJuDDictY2hZSXSsIHCx4FClMvqFg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/pkg/apis/v1beta1/suite_test.go b/pkg/apis/v1beta1/suite_test.go index e408e36856..89fee5e4b0 100644 --- a/pkg/apis/v1beta1/suite_test.go +++ b/pkg/apis/v1beta1/suite_test.go @@ -24,7 +24,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/samber/lo" - . "knative.dev/pkg/logging/testing" + + . "sigs.k8s.io/karpenter/pkg/utils/testing" "sigs.k8s.io/karpenter/pkg/apis" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" diff --git a/pkg/controllers/disruption/controller.go b/pkg/controllers/disruption/controller.go index b3c33a6c7d..66bbf18c15 100644 --- a/pkg/controllers/disruption/controller.go +++ b/pkg/controllers/disruption/controller.go @@ -23,16 +23,16 @@ import ( "sync" "time" + "github.com/go-logr/logr" "github.com/samber/lo" "go.uber.org/multierr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/utils/clock" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - operatorlogging "sigs.k8s.io/karpenter/pkg/operator/logging" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/cloudprovider" @@ -114,7 +114,7 @@ func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconc // with making any scheduling decision off of our state nodes. Otherwise, we have the potential to make // a scheduling decision based on a smaller subset of nodes in our cluster state than actually exist. if !c.cluster.Synced(ctx) { - logging.FromContext(ctx).Debugf("waiting on cluster sync") + ctrl.FromContext(ctx).V(1).Info("waiting on cluster sync") return reconcile.Result{RequeueAfter: time.Second}, nil } @@ -184,7 +184,7 @@ func (c *Controller) disrupt(ctx context.Context, disruption Method) (bool, erro // 3. Add Command to orchestration.Queue to wait to delete the candiates. func (c *Controller) executeCommand(ctx context.Context, m Method, cmd Command, schedulingResults scheduling.Results) error { commandID := uuid.NewUUID() - logging.FromContext(ctx).With("command-id", commandID).Infof("disrupting via %s %s", m.Type(), cmd) + ctrl.FromContext(ctx).WithValues("command-id", commandID).Info("disrupting via %s %s", m.Type(), cmd) stateNodes := lo.Map(cmd.candidates, func(c *Candidate, _ int) *state.StateNode { return c.StateNode @@ -213,7 +213,7 @@ func (c *Controller) executeCommand(ctx context.Context, m Method, cmd Command, // tainted with the Karpenter taint, the provisioning controller will continue // to do scheduling simulations and nominate the pods on the candidate nodes until // the node is cleaned up. - schedulingResults.Record(logging.WithLogger(ctx, operatorlogging.NopLogger), c.recorder, c.cluster) + schedulingResults.Record(ctrl.IntoContext(ctx, logr.New(logr.Discard().GetSink())), c.recorder, c.cluster) providerIDs := lo.Map(cmd.candidates, func(c *Candidate, _ int) string { return c.ProviderID() }) // We have the new NodeClaims created at the API server so mark the old NodeClaims for deletion @@ -274,7 +274,7 @@ func (c *Controller) logAbnormalRuns(ctx context.Context) { defer c.mu.Unlock() for name, runTime := range c.lastRun { if timeSince := c.clock.Since(runTime); timeSince > AbnormalTimeLimit { - logging.FromContext(ctx).Debugf("abnormal time between runs of %s = %s", name, timeSince) + ctrl.FromContext(ctx).V(1).Info("abnormal time between runs of %s = %s", name, timeSince) } } } @@ -283,7 +283,7 @@ func (c *Controller) logAbnormalRuns(ctx context.Context) { func (c *Controller) logInvalidBudgets(ctx context.Context) { nodePoolList := &v1beta1.NodePoolList{} if err := c.kubeClient.List(ctx, nodePoolList); err != nil { - logging.FromContext(ctx).Errorf("listing nodepools, %s", err) + ctrl.FromContext(ctx).Error(err, "listing nodepools, %s") return } var buf bytes.Buffer @@ -294,6 +294,6 @@ func (c *Controller) logInvalidBudgets(ctx context.Context) { } } if buf.Len() > 0 { - logging.FromContext(ctx).Errorf("detected disruption budget errors: %s", buf.String()) + ctrl.FromContext(ctx).Error(nil, "detected disruption budget errors: %s", buf.String()) } } diff --git a/pkg/controllers/disruption/emptynodeconsolidation.go b/pkg/controllers/disruption/emptynodeconsolidation.go index 2a9ccf31fc..a75b5f6648 100644 --- a/pkg/controllers/disruption/emptynodeconsolidation.go +++ b/pkg/controllers/disruption/emptynodeconsolidation.go @@ -21,7 +21,7 @@ import ( "errors" "fmt" - "knative.dev/pkg/logging" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/karpenter/pkg/controllers/provisioning/scheduling" "sigs.k8s.io/karpenter/pkg/metrics" @@ -90,7 +90,7 @@ func (c *EmptyNodeConsolidation) ComputeCommand(ctx context.Context, disruptionB } validationCandidates, err := GetCandidates(ctx, c.cluster, c.kubeClient, c.recorder, c.clock, c.cloudProvider, c.ShouldDisrupt, c.queue) if err != nil { - logging.FromContext(ctx).Errorf("computing validation candidates %s", err) + ctrl.FromContext(ctx).Error(err, "computing validation candidates %s") return Command{}, scheduling.Results{}, err } // Get the current representation of the proposed candidates from before the validation timeout @@ -108,7 +108,7 @@ func (c *EmptyNodeConsolidation) ComputeCommand(ctx context.Context, disruptionB // 3. the number of candidates for a given nodepool can no longer be disrupted as it would violate the budget for _, n := range candidatesToDelete { if len(n.reschedulablePods) != 0 || c.cluster.IsNodeNominated(n.ProviderID()) || postValidationMapping[n.nodePool.Name] == 0 { - logging.FromContext(ctx).Debugf("abandoning empty node consolidation attempt due to pod churn, command is no longer valid, %s", cmd) + ctrl.FromContext(ctx).V(1).Info("abandoning empty node consolidation attempt due to pod churn, command is no longer valid, %s", cmd) return Command{}, scheduling.Results{}, nil } postValidationMapping[n.nodePool.Name]-- diff --git a/pkg/controllers/disruption/expiration.go b/pkg/controllers/disruption/expiration.go index d4d30c88ca..ea0ec51a64 100644 --- a/pkg/controllers/disruption/expiration.go +++ b/pkg/controllers/disruption/expiration.go @@ -23,8 +23,8 @@ import ( "k8s.io/utils/clock" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" disruptionevents "sigs.k8s.io/karpenter/pkg/controllers/disruption/events" @@ -116,7 +116,7 @@ func (e *Expiration) ComputeCommand(ctx context.Context, disruptionBudgetMapping e.recorder.Publish(disruptionevents.Blocked(candidate.Node, candidate.NodeClaim, "Scheduling simulation failed to schedule all pods")...) continue } - logging.FromContext(ctx).With("ttl", candidates[0].nodePool.Spec.Disruption.ExpireAfter.String()).Infof("triggering termination for expired node after TTL") + ctrl.FromContext(ctx).WithValues("ttl", candidates[0].nodePool.Spec.Disruption.ExpireAfter.String()).Info("triggering termination for expired node after TTL") return Command{ candidates: []*Candidate{candidate}, replacements: results.NewNodeClaims, diff --git a/pkg/controllers/disruption/helpers.go b/pkg/controllers/disruption/helpers.go index 20492c3c21..b3f039e10f 100644 --- a/pkg/controllers/disruption/helpers.go +++ b/pkg/controllers/disruption/helpers.go @@ -22,6 +22,7 @@ import ( "math" "strconv" + "github.com/go-logr/logr" "github.com/samber/lo" disruptionevents "sigs.k8s.io/karpenter/pkg/controllers/disruption/events" @@ -30,8 +31,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/clock" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/cloudprovider" @@ -41,7 +42,6 @@ import ( "sigs.k8s.io/karpenter/pkg/controllers/state" "sigs.k8s.io/karpenter/pkg/events" "sigs.k8s.io/karpenter/pkg/metrics" - operatorlogging "sigs.k8s.io/karpenter/pkg/operator/logging" "sigs.k8s.io/karpenter/pkg/scheduling" ) @@ -79,7 +79,7 @@ func SimulateScheduling(ctx context.Context, kubeClient client.Client, cluster * pods = append(pods, n.reschedulablePods...) } pods = append(pods, deletingNodePods...) - scheduler, err := provisioner.NewScheduler(logging.WithLogger(ctx, operatorlogging.NopLogger), pods, stateNodes) + scheduler, err := provisioner.NewScheduler(ctrl.IntoContext(ctx, logr.New(logr.Discard().GetSink())), pods, stateNodes) if err != nil { return pscheduling.Results{}, fmt.Errorf("creating scheduler, %w", err) } @@ -88,7 +88,7 @@ func SimulateScheduling(ctx context.Context, kubeClient client.Client, cluster * return client.ObjectKeyFromObject(p), nil }) - results := scheduler.Solve(logging.WithLogger(ctx, operatorlogging.NopLogger), pods).TruncateInstanceTypes(pscheduling.MaxInstanceTypes) + results := scheduler.Solve(ctrl.IntoContext(ctx, logr.New(logr.Discard().GetSink())), pods).TruncateInstanceTypes(pscheduling.MaxInstanceTypes) for _, n := range results.ExistingNodes { // We consider existing nodes for scheduling. When these nodes are unmanaged, their taint logic should // tell us if we can schedule to them or not; however, if these nodes are managed, we will still schedule to them @@ -127,8 +127,8 @@ func GetPodEvictionCost(ctx context.Context, p *v1.Pod) float64 { if ok { podDeletionCost, err := strconv.ParseFloat(podDeletionCostStr, 64) if err != nil { - logging.FromContext(ctx).Errorf("parsing %s=%s from pod %s, %s", - v1.PodDeletionCost, podDeletionCostStr, client.ObjectKeyFromObject(p), err) + ctrl.FromContext(ctx).Error(err, "parsing %s=%s from pod %s, %s", + v1.PodDeletionCost, podDeletionCostStr, client.ObjectKeyFromObject(p)) } else { // the pod deletion disruptionCost is in [-2147483647, 2147483647] // the min pod disruptionCost makes one pod ~ -15 pods, and the max pod disruptionCost to ~ 17 pods. @@ -268,7 +268,7 @@ func BuildNodePoolMap(ctx context.Context, kubeClient client.Client, cloudProvid if err != nil { // don't error out on building the node pool, we just won't be able to handle any nodes that // were created by it - logging.FromContext(ctx).Errorf("listing instance types for %s, %s", np.Name, err) + ctrl.FromContext(ctx).Error(err, "listing instance types for %s, %s", np.Name) continue } if len(nodePoolInstanceTypes) == 0 { diff --git a/pkg/controllers/disruption/multinodeconsolidation.go b/pkg/controllers/disruption/multinodeconsolidation.go index 660958f48f..7413ca969e 100644 --- a/pkg/controllers/disruption/multinodeconsolidation.go +++ b/pkg/controllers/disruption/multinodeconsolidation.go @@ -24,7 +24,8 @@ import ( "github.com/samber/lo" "k8s.io/apimachinery/pkg/util/sets" - "knative.dev/pkg/logging" + + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/controllers/provisioning/scheduling" @@ -98,7 +99,7 @@ func (m *MultiNodeConsolidation) ComputeCommand(ctx context.Context, disruptionB } if !isValid { - logging.FromContext(ctx).Debugf("abandoning multi-node consolidation attempt due to pod churn, command is no longer valid, %s", cmd) + ctrl.FromContext(ctx).V(1).Info("abandoning multi-node consolidation attempt due to pod churn, command is no longer valid, %s", cmd) return Command{}, scheduling.Results{}, nil } return cmd, results, nil @@ -125,9 +126,9 @@ func (m *MultiNodeConsolidation) firstNConsolidationOption(ctx context.Context, if m.clock.Now().After(timeout) { ConsolidationTimeoutTotalCounter.WithLabelValues(m.ConsolidationType()).Inc() if lastSavedCommand.candidates == nil { - logging.FromContext(ctx).Debugf("failed to find a multi-node consolidation after timeout, last considered batch had %d", (min+max)/2) + ctrl.FromContext(ctx).V(1).Info("failed to find a multi-node consolidation after timeout, last considered batch had %d", (min+max)/2) } else { - logging.FromContext(ctx).Debugf("stopping multi-node consolidation after timeout, returning last valid command %s", lastSavedCommand) + ctrl.FromContext(ctx).V(1).Info("stopping multi-node consolidation after timeout, returning last valid command %s", lastSavedCommand) } return lastSavedCommand, lastSavedResults, nil } diff --git a/pkg/controllers/disruption/orchestration/queue.go b/pkg/controllers/disruption/orchestration/queue.go index a99fe834b4..01bdc670ab 100644 --- a/pkg/controllers/disruption/orchestration/queue.go +++ b/pkg/controllers/disruption/orchestration/queue.go @@ -31,9 +31,9 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "k8s.io/utils/clock" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -179,7 +179,8 @@ func (q *Queue) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.R panic("unexpected failure, disruption queue has shut down") } cmd := item.(*Command) - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("command-id", string(cmd.id))) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithValues("command-id", string(cmd.id))) + if err := q.waitOrTerminate(ctx, cmd); err != nil { // If recoverable, re-queue and try again. if !IsUnrecoverableError(err) { @@ -203,13 +204,13 @@ func (q *Queue) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.R }).Add(float64(len(failedLaunches))) multiErr := multierr.Combine(err, cmd.lastError, state.RequireNoScheduleTaint(ctx, q.kubeClient, false, cmd.candidates...)) // Log the error - logging.FromContext(ctx).With("nodes", strings.Join(lo.Map(cmd.candidates, func(s *state.StateNode, _ int) string { + ctrl.FromContext(ctx).WithValues("nodes", strings.Join(lo.Map(cmd.candidates, func(s *state.StateNode, _ int) string { return s.Name() - }), ",")).Errorf("failed to disrupt nodes, %s", multiErr) + }), ",")).Error(multiErr, "failed to disrupt nodes, %s") } // If command is complete, remove command from queue. q.Remove(cmd) - logging.FromContext(ctx).Infof("command succeeded") + ctrl.FromContext(ctx).Info("command succeeded") return reconcile.Result{RequeueAfter: controller.Immediately}, nil } diff --git a/pkg/controllers/disruption/orchestration/suite_test.go b/pkg/controllers/disruption/orchestration/suite_test.go index 8f0697c1ac..51fa0390dd 100644 --- a/pkg/controllers/disruption/orchestration/suite_test.go +++ b/pkg/controllers/disruption/orchestration/suite_test.go @@ -27,9 +27,10 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - . "knative.dev/pkg/logging/testing" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + "sigs.k8s.io/karpenter/pkg/apis" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/cloudprovider/fake" diff --git a/pkg/controllers/disruption/singlenodeconsolidation.go b/pkg/controllers/disruption/singlenodeconsolidation.go index 9bb4cd9b69..3090c77a5f 100644 --- a/pkg/controllers/disruption/singlenodeconsolidation.go +++ b/pkg/controllers/disruption/singlenodeconsolidation.go @@ -21,7 +21,7 @@ import ( "fmt" "time" - "knative.dev/pkg/logging" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/karpenter/pkg/controllers/provisioning/scheduling" "sigs.k8s.io/karpenter/pkg/metrics" @@ -66,13 +66,13 @@ func (s *SingleNodeConsolidation) ComputeCommand(ctx context.Context, disruption } if s.clock.Now().After(timeout) { ConsolidationTimeoutTotalCounter.WithLabelValues(s.ConsolidationType()).Inc() - logging.FromContext(ctx).Debugf("abandoning single-node consolidation due to timeout after evaluating %d candidates", i) + ctrl.FromContext(ctx).V(1).Info("abandoning single-node consolidation due to timeout after evaluating %d candidates", i) return Command{}, scheduling.Results{}, nil } // compute a possible consolidation option cmd, results, err := s.computeConsolidation(ctx, candidate) if err != nil { - logging.FromContext(ctx).Errorf("computing consolidation %s", err) + ctrl.FromContext(ctx).Error(err, "computing consolidation %s") continue } if cmd.Action() == NoOpAction { @@ -83,7 +83,7 @@ func (s *SingleNodeConsolidation) ComputeCommand(ctx context.Context, disruption return Command{}, scheduling.Results{}, fmt.Errorf("validating consolidation, %w", err) } if !isValid { - logging.FromContext(ctx).Debugf("abandoning single-node consolidation attempt due to pod churn, command is no longer valid, %s", cmd) + ctrl.FromContext(ctx).V(1).Info("abandoning single-node consolidation attempt due to pod churn, command is no longer valid, %s", cmd) return Command{}, scheduling.Results{}, nil } return cmd, results, nil diff --git a/pkg/controllers/disruption/suite_test.go b/pkg/controllers/disruption/suite_test.go index 1e95bb774e..5ae46fa216 100644 --- a/pkg/controllers/disruption/suite_test.go +++ b/pkg/controllers/disruption/suite_test.go @@ -34,10 +34,11 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" clock "k8s.io/utils/clock/testing" - . "knative.dev/pkg/logging/testing" "knative.dev/pkg/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + coreapis "sigs.k8s.io/karpenter/pkg/apis" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/cloudprovider" diff --git a/pkg/controllers/disruption/types.go b/pkg/controllers/disruption/types.go index bf3d8be308..1363ead175 100644 --- a/pkg/controllers/disruption/types.go +++ b/pkg/controllers/disruption/types.go @@ -24,8 +24,8 @@ import ( "github.com/samber/lo" v1 "k8s.io/api/core/v1" "k8s.io/utils/clock" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/karpenter/pkg/utils/pod" @@ -116,7 +116,7 @@ func NewCandidate(ctx context.Context, kubeClient client.Client, recorder events } pods, err := node.Pods(ctx, kubeClient) if err != nil { - logging.FromContext(ctx).Errorf("determining node pods, %s", err) + ctrl.FromContext(ctx).Error(err, "determining node pods, %s") return nil, fmt.Errorf("getting pods from state node, %w", err) } for _, po := range pods { diff --git a/pkg/controllers/leasegarbagecollection/controller.go b/pkg/controllers/leasegarbagecollection/controller.go index f66a6c2093..9e71eb8316 100644 --- a/pkg/controllers/leasegarbagecollection/controller.go +++ b/pkg/controllers/leasegarbagecollection/controller.go @@ -20,10 +20,10 @@ import ( "context" v1 "k8s.io/api/coordination/v1" - "knative.dev/pkg/logging" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -56,7 +56,7 @@ func (c *Controller) Reconcile(ctx context.Context, l *v1.Lease) (reconcile.Resu } err := c.kubeClient.Delete(ctx, l) if err == nil { - logging.FromContext(ctx).Debug("found and delete leaked lease") + ctrl.FromContext(ctx).V(1).Info("found and delete leaked lease") NodeLeaseDeletedCounter.WithLabelValues().Inc() } diff --git a/pkg/controllers/leasegarbagecollection/suite_test.go b/pkg/controllers/leasegarbagecollection/suite_test.go index 444aa85cde..41d0cf9c06 100644 --- a/pkg/controllers/leasegarbagecollection/suite_test.go +++ b/pkg/controllers/leasegarbagecollection/suite_test.go @@ -35,7 +35,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "knative.dev/pkg/logging/testing" + + . "sigs.k8s.io/karpenter/pkg/utils/testing" . "sigs.k8s.io/karpenter/pkg/test/expectations" ) diff --git a/pkg/controllers/metrics/node/suite_test.go b/pkg/controllers/metrics/node/suite_test.go index ad52ddabd2..d1ce47dd9b 100644 --- a/pkg/controllers/metrics/node/suite_test.go +++ b/pkg/controllers/metrics/node/suite_test.go @@ -41,7 +41,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "knative.dev/pkg/logging/testing" + + . "sigs.k8s.io/karpenter/pkg/utils/testing" . "sigs.k8s.io/karpenter/pkg/test/expectations" ) diff --git a/pkg/controllers/metrics/nodepool/controller.go b/pkg/controllers/metrics/nodepool/controller.go index 130c2e9dca..f6604a67b4 100644 --- a/pkg/controllers/metrics/nodepool/controller.go +++ b/pkg/controllers/metrics/nodepool/controller.go @@ -23,12 +23,12 @@ import ( "github.com/samber/lo" "k8s.io/apimachinery/pkg/api/errors" - "knative.dev/pkg/logging" "github.com/prometheus/client_golang/prometheus" v1 "k8s.io/api/core/v1" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -94,7 +94,8 @@ func (c *Controller) Name() string { // Reconcile executes a termination control loop for the resource func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(c.Name()).With("nodepool", req.Name)) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithName(c.Name()).WithValues("nodepool", req.Name)) + nodePool := &v1beta1.NodePool{} if err := c.kubeClient.Get(ctx, req.NamespacedName, nodePool); err != nil { if errors.IsNotFound(err) { diff --git a/pkg/controllers/metrics/nodepool/suite_test.go b/pkg/controllers/metrics/nodepool/suite_test.go index 55e90606fc..0f4b2598cc 100644 --- a/pkg/controllers/metrics/nodepool/suite_test.go +++ b/pkg/controllers/metrics/nodepool/suite_test.go @@ -25,9 +25,10 @@ import ( . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - . "knative.dev/pkg/logging/testing" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + "sigs.k8s.io/karpenter/pkg/apis" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/controllers/metrics/nodepool" diff --git a/pkg/controllers/metrics/pod/controller.go b/pkg/controllers/metrics/pod/controller.go index c4e3e9419f..d684f0cc70 100644 --- a/pkg/controllers/metrics/pod/controller.go +++ b/pkg/controllers/metrics/pod/controller.go @@ -27,9 +27,9 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - "knative.dev/pkg/logging" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -118,7 +118,7 @@ func (c *Controller) Name() string { // Reconcile executes a termination control loop for the resource func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(c.Name()).With("pod", req.Name)) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithName(c.Name()).WithValues("pod", req.Name)) pod := &v1.Pod{} if err := c.kubeClient.Get(ctx, req.NamespacedName, pod); err != nil { if errors.IsNotFound(err) { diff --git a/pkg/controllers/metrics/pod/suite_test.go b/pkg/controllers/metrics/pod/suite_test.go index 0ea4f40d53..d929d48adc 100644 --- a/pkg/controllers/metrics/pod/suite_test.go +++ b/pkg/controllers/metrics/pod/suite_test.go @@ -23,9 +23,10 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" - . "knative.dev/pkg/logging/testing" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + "sigs.k8s.io/karpenter/pkg/controllers/metrics/pod" "sigs.k8s.io/karpenter/pkg/operator/controller" "sigs.k8s.io/karpenter/pkg/operator/scheme" diff --git a/pkg/controllers/node/termination/controller.go b/pkg/controllers/node/termination/controller.go index 7cefe5b2aa..6731804fd6 100644 --- a/pkg/controllers/node/termination/controller.go +++ b/pkg/controllers/node/termination/controller.go @@ -26,11 +26,11 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/client-go/util/workqueue" - "knative.dev/pkg/logging" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -133,7 +133,7 @@ func (c *Controller) removeFinalizer(ctx context.Context, n *v1.Node) error { TerminationSummary.With(prometheus.Labels{ metrics.NodePoolLabel: n.Labels[v1beta1.NodePoolLabelKey], }).Observe(time.Since(stored.DeletionTimestamp.Time).Seconds()) - logging.FromContext(ctx).Infof("deleted node") + ctrl.FromContext(ctx).Info("deleted node") } return nil } diff --git a/pkg/controllers/node/termination/suite_test.go b/pkg/controllers/node/termination/suite_test.go index d7d28bcd6a..966b543b2d 100644 --- a/pkg/controllers/node/termination/suite_test.go +++ b/pkg/controllers/node/termination/suite_test.go @@ -44,7 +44,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "knative.dev/pkg/ptr" - . "knative.dev/pkg/logging/testing" + . "sigs.k8s.io/karpenter/pkg/utils/testing" . "sigs.k8s.io/karpenter/pkg/test/expectations" ) diff --git a/pkg/controllers/node/termination/terminator/eviction.go b/pkg/controllers/node/termination/terminator/eviction.go index 8fc3e429df..3df50d1ef0 100644 --- a/pkg/controllers/node/termination/terminator/eviction.go +++ b/pkg/controllers/node/termination/terminator/eviction.go @@ -31,8 +31,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/workqueue" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -153,7 +153,7 @@ func (q *Queue) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.R // Evict returns true if successful eviction call, and false if not an eviction-related error func (q *Queue) Evict(ctx context.Context, key QueueKey) bool { - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("pod", key.NamespacedName)) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithValues("pod", key.NamespacedName)) if err := q.kubeClient.SubResource("eviction").Create(ctx, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: key.Namespace, Name: key.Name}}, &policyv1.Eviction{ @@ -179,7 +179,7 @@ func (q *Queue) Evict(ctx context.Context, key QueueKey) bool { }}, fmt.Errorf("evicting pod %s/%s violates a PDB", key.Namespace, key.Name))) return false } - logging.FromContext(ctx).Errorf("evicting pod, %s", err) + ctrl.FromContext(ctx).Error(err, "evicting pod, %s") return false } q.recorder.Publish(terminatorevents.EvictPod(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: key.Name, Namespace: key.Namespace}})) diff --git a/pkg/controllers/node/termination/terminator/suite_test.go b/pkg/controllers/node/termination/terminator/suite_test.go index bd756ac601..a80bf54a11 100644 --- a/pkg/controllers/node/termination/terminator/suite_test.go +++ b/pkg/controllers/node/termination/terminator/suite_test.go @@ -29,9 +29,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" - . "knative.dev/pkg/logging/testing" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + "sigs.k8s.io/karpenter/pkg/controllers/node/termination/terminator" "sigs.k8s.io/karpenter/pkg/apis" diff --git a/pkg/controllers/node/termination/terminator/terminator.go b/pkg/controllers/node/termination/terminator/terminator.go index e05625349f..db4a79e206 100644 --- a/pkg/controllers/node/termination/terminator/terminator.go +++ b/pkg/controllers/node/termination/terminator/terminator.go @@ -24,8 +24,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/utils/clock" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" nodeutil "sigs.k8s.io/karpenter/pkg/utils/node" @@ -72,7 +72,7 @@ func (t *Terminator) Taint(ctx context.Context, node *v1.Node) error { if err := t.kubeClient.Patch(ctx, node, client.MergeFrom(stored)); err != nil { return err } - logging.FromContext(ctx).Infof("tainted node") + ctrl.FromContext(ctx).Info("tainted node") } return nil } diff --git a/pkg/controllers/nodeclaim/consistency/controller.go b/pkg/controllers/nodeclaim/consistency/controller.go index 21828c76d5..dffb8b232d 100644 --- a/pkg/controllers/nodeclaim/consistency/controller.go +++ b/pkg/controllers/nodeclaim/consistency/controller.go @@ -26,10 +26,10 @@ import ( "github.com/prometheus/client_golang/prometheus" v1 "k8s.io/api/core/v1" "k8s.io/utils/clock" - "knative.dev/pkg/logging" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -103,7 +103,7 @@ func (c *Controller) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeClaim return reconcile.Result{}, fmt.Errorf("checking node with %T, %w", check, err) } for _, issue := range issues { - logging.FromContext(ctx).Errorf("check failed, %s", issue) + ctrl.FromContext(ctx).Error(err, "check failed") consistencyErrors.With(prometheus.Labels{checkLabel: reflect.TypeOf(check).Elem().Name()}).Inc() c.recorder.Publish(FailedConsistencyCheckEvent(nodeClaim, string(issue))) } diff --git a/pkg/controllers/nodeclaim/consistency/suite_test.go b/pkg/controllers/nodeclaim/consistency/suite_test.go index eb019ee406..6571d20602 100644 --- a/pkg/controllers/nodeclaim/consistency/suite_test.go +++ b/pkg/controllers/nodeclaim/consistency/suite_test.go @@ -29,10 +29,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" clock "k8s.io/utils/clock/testing" - . "knative.dev/pkg/logging/testing" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/controllers/nodeclaim/consistency" . "sigs.k8s.io/karpenter/pkg/test/expectations" diff --git a/pkg/controllers/nodeclaim/disruption/controller.go b/pkg/controllers/nodeclaim/disruption/controller.go index d74459b65a..cfff16310e 100644 --- a/pkg/controllers/nodeclaim/disruption/controller.go +++ b/pkg/controllers/nodeclaim/disruption/controller.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/clock" - controllerruntime "sigs.k8s.io/controller-runtime" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -111,7 +111,7 @@ func (c *Controller) Name() string { } func (c *Controller) Builder(_ context.Context, m manager.Manager) operatorcontroller.Builder { - return operatorcontroller.Adapt(controllerruntime. + return operatorcontroller.Adapt(ctrl. NewControllerManagedBy(m). For(&v1beta1.NodeClaim{}). WithOptions(controller.Options{MaxConcurrentReconciles: 10}). diff --git a/pkg/controllers/nodeclaim/disruption/drift.go b/pkg/controllers/nodeclaim/disruption/drift.go index c9578cad91..0ea43c3d90 100644 --- a/pkg/controllers/nodeclaim/disruption/drift.go +++ b/pkg/controllers/nodeclaim/disruption/drift.go @@ -23,7 +23,8 @@ import ( v1 "k8s.io/api/core/v1" "knative.dev/pkg/apis" - "knative.dev/pkg/logging" + + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/prometheus/client_golang/prometheus" @@ -48,13 +49,14 @@ type Drift struct { func (d *Drift) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, nodeClaim *v1beta1.NodeClaim) (reconcile.Result, error) { hasDriftedCondition := nodeClaim.StatusConditions().GetCondition(v1beta1.Drifted) != nil + log := ctrl.FromContext(ctx).V(1) // From here there are three scenarios to handle: // 1. If drift is not enabled but the NodeClaim is drifted, remove the status condition if !options.FromContext(ctx).FeatureGates.Drift { _ = nodeClaim.StatusConditions().ClearCondition(v1beta1.Drifted) if hasDriftedCondition { - logging.FromContext(ctx).Debugf("removing drift status condition, drift has been disabled") + log.Info("removing drift status condition, drift has been disabled") } return reconcile.Result{}, nil } @@ -62,7 +64,7 @@ func (d *Drift) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, nodeC if launchCond := nodeClaim.StatusConditions().GetCondition(v1beta1.Launched); launchCond == nil || launchCond.IsFalse() { _ = nodeClaim.StatusConditions().ClearCondition(v1beta1.Drifted) if hasDriftedCondition { - logging.FromContext(ctx).Debugf("removing drift status condition, isn't launched") + log.Info("removing drift status condition, isn't launched") } return reconcile.Result{}, nil } @@ -74,7 +76,7 @@ func (d *Drift) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, nodeC if driftedReason == "" { _ = nodeClaim.StatusConditions().ClearCondition(v1beta1.Drifted) if hasDriftedCondition { - logging.FromContext(ctx).Debugf("removing drifted status condition, not drifted") + log.Info("removing drifted status condition, not drifted") } return reconcile.Result{RequeueAfter: 5 * time.Minute}, nil } @@ -86,7 +88,7 @@ func (d *Drift) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, nodeC Reason: string(driftedReason), }) if !hasDriftedCondition { - logging.FromContext(ctx).With("reason", string(driftedReason)).Debugf("marking drifted") + ctrl.FromContext(ctx).WithValues("reason", string(driftedReason)).Info("marking drifted") metrics.NodeClaimsDisruptedCounter.With(prometheus.Labels{ metrics.TypeLabel: metrics.DriftReason, metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey], diff --git a/pkg/controllers/nodeclaim/disruption/emptiness.go b/pkg/controllers/nodeclaim/disruption/emptiness.go index 1b07c19cec..ce760f1d35 100644 --- a/pkg/controllers/nodeclaim/disruption/emptiness.go +++ b/pkg/controllers/nodeclaim/disruption/emptiness.go @@ -25,8 +25,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/utils/clock" "knative.dev/pkg/apis" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" @@ -46,6 +46,7 @@ type Emptiness struct { //nolint:gocyclo func (e *Emptiness) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, nodeClaim *v1beta1.NodeClaim) (reconcile.Result, error) { hasEmptyCondition := nodeClaim.StatusConditions().GetCondition(v1beta1.Empty) != nil + log := ctrl.FromContext(ctx).V(1) // From here there are a few scenarios to handle: // 1. If ConsolidationPolicyWhenEmpty is not configured or ConsolidateAfter isn't configured, remove the emptiness status condition @@ -54,7 +55,7 @@ func (e *Emptiness) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, n nodePool.Spec.Disruption.ConsolidateAfter.Duration == nil { if hasEmptyCondition { _ = nodeClaim.StatusConditions().ClearCondition(v1beta1.Empty) - logging.FromContext(ctx).Debugf("removing emptiness status condition, emptiness is disabled") + log.Info("removing emptiness status condition, emptiness is disabled") } return reconcile.Result{}, nil } @@ -62,7 +63,7 @@ func (e *Emptiness) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, n if initCond := nodeClaim.StatusConditions().GetCondition(v1beta1.Initialized); initCond == nil || initCond.IsFalse() { if hasEmptyCondition { _ = nodeClaim.StatusConditions().ClearCondition(v1beta1.Empty) - logging.FromContext(ctx).Debugf("removing emptiness status condition, isn't initialized") + log.Info("removing emptiness status condition, isn't initialized") } return reconcile.Result{}, nil } @@ -73,7 +74,7 @@ func (e *Emptiness) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, n if nodeclaimutil.IsDuplicateNodeError(err) || nodeclaimutil.IsNodeNotFoundError(err) { _ = nodeClaim.StatusConditions().ClearCondition(v1beta1.Empty) if hasEmptyCondition { - logging.FromContext(ctx).Debugf("removing emptiness status condition, doesn't have a single node mapping") + log.Info("removing emptiness status condition, doesn't have a single node mapping") } return reconcile.Result{}, nil } @@ -86,7 +87,7 @@ func (e *Emptiness) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, n if e.cluster.IsNodeNominated(n.Spec.ProviderID) { _ = nodeClaim.StatusConditions().ClearCondition(v1beta1.Empty) if hasEmptyCondition { - logging.FromContext(ctx).Debugf("removing emptiness status condition, is nominated for pods") + log.Info("removing emptiness status condition, is nominated for pods") } return reconcile.Result{RequeueAfter: time.Second * 30}, nil } @@ -98,7 +99,7 @@ func (e *Emptiness) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, n if len(pods) > 0 { _ = nodeClaim.StatusConditions().ClearCondition(v1beta1.Empty) if hasEmptyCondition { - logging.FromContext(ctx).Debugf("removing emptiness status condition, not empty") + log.Info("removing emptiness status condition, not empty") } return reconcile.Result{}, nil } @@ -109,7 +110,8 @@ func (e *Emptiness) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, n Severity: apis.ConditionSeverityWarning, }) if !hasEmptyCondition { - logging.FromContext(ctx).Debugf("marking empty") + log.Info("marking empty") + metrics.NodeClaimsDisruptedCounter.With(prometheus.Labels{ metrics.TypeLabel: metrics.EmptinessReason, metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey], diff --git a/pkg/controllers/nodeclaim/disruption/expiration.go b/pkg/controllers/nodeclaim/disruption/expiration.go index 5ef53ce655..b2ca677d47 100644 --- a/pkg/controllers/nodeclaim/disruption/expiration.go +++ b/pkg/controllers/nodeclaim/disruption/expiration.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/utils/clock" "knative.dev/pkg/apis" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" @@ -39,13 +39,14 @@ type Expiration struct { func (e *Expiration) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, nodeClaim *v1beta1.NodeClaim) (reconcile.Result, error) { hasExpiredCondition := nodeClaim.StatusConditions().GetCondition(v1beta1.Expired) != nil + log := ctrl.FromContext(ctx).V(1) // From here there are three scenarios to handle: // 1. If ExpireAfter is not configured, remove the expired status condition if nodePool.Spec.Disruption.ExpireAfter.Duration == nil { _ = nodeClaim.StatusConditions().ClearCondition(v1beta1.Expired) if hasExpiredCondition { - logging.FromContext(ctx).Debugf("removing expiration status condition, expiration has been disabled") + log.Info("removing expiration status condition, expiration has been disabled") } return reconcile.Result{}, nil } @@ -54,7 +55,7 @@ func (e *Expiration) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, if e.clock.Now().Before(expirationTime) { _ = nodeClaim.StatusConditions().ClearCondition(v1beta1.Expired) if hasExpiredCondition { - logging.FromContext(ctx).Debugf("removing expired status condition, not expired") + log.Info("removing expired status condition, not expired") } // If the NodeClaim isn't expired and doesn't have the status condition, return. // Use t.Sub(clock.Now()) instead of time.Until() to ensure we're using the injected clock. @@ -67,7 +68,7 @@ func (e *Expiration) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, Severity: apis.ConditionSeverityWarning, }) if !hasExpiredCondition { - logging.FromContext(ctx).Debugf("marking expired") + log.Info("marking expired") metrics.NodeClaimsDisruptedCounter.With(prometheus.Labels{ metrics.TypeLabel: metrics.ExpirationReason, metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey], diff --git a/pkg/controllers/nodeclaim/disruption/suite_test.go b/pkg/controllers/nodeclaim/disruption/suite_test.go index ba094cc5ec..21ecba845d 100644 --- a/pkg/controllers/nodeclaim/disruption/suite_test.go +++ b/pkg/controllers/nodeclaim/disruption/suite_test.go @@ -27,10 +27,11 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clock "k8s.io/utils/clock/testing" - . "knative.dev/pkg/logging/testing" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + "sigs.k8s.io/karpenter/pkg/apis" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/cloudprovider/fake" diff --git a/pkg/controllers/nodeclaim/garbagecollection/controller.go b/pkg/controllers/nodeclaim/garbagecollection/controller.go index f2903536da..4879e9e37d 100644 --- a/pkg/controllers/nodeclaim/garbagecollection/controller.go +++ b/pkg/controllers/nodeclaim/garbagecollection/controller.go @@ -26,8 +26,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/workqueue" "k8s.io/utils/clock" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -83,13 +83,11 @@ func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconc errs[i] = client.IgnoreNotFound(err) return } - logging.FromContext(ctx). - With( - "nodeclaim", nodeClaims[i].Name, - "provider-id", nodeClaims[i].Status.ProviderID, - "nodepool", nodeClaims[i].Labels[v1beta1.NodePoolLabelKey], - ). - Debugf("garbage collecting nodeclaim with no cloudprovider representation") + ctrl.FromContext(ctx).WithValues( + "nodeclaim", nodeClaims[i].Name, + "provider-id", nodeClaims[i].Status.ProviderID, + "nodepool", nodeClaims[i].Labels[v1beta1.NodePoolLabelKey], + ).V(1).Info("garbage collecting nodeclaim with no cloudprovider representation") metrics.NodeClaimsTerminatedCounter.With(prometheus.Labels{ metrics.ReasonLabel: "garbage_collected", metrics.NodePoolLabel: nodeClaims[i].Labels[v1beta1.NodePoolLabelKey], diff --git a/pkg/controllers/nodeclaim/garbagecollection/suite_test.go b/pkg/controllers/nodeclaim/garbagecollection/suite_test.go index f3f792a756..e58f9527e5 100644 --- a/pkg/controllers/nodeclaim/garbagecollection/suite_test.go +++ b/pkg/controllers/nodeclaim/garbagecollection/suite_test.go @@ -32,7 +32,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/samber/lo" - . "knative.dev/pkg/logging/testing" + + . "sigs.k8s.io/karpenter/pkg/utils/testing" "sigs.k8s.io/karpenter/pkg/apis" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" diff --git a/pkg/controllers/nodeclaim/lifecycle/controller.go b/pkg/controllers/nodeclaim/lifecycle/controller.go index 675913ef9a..ac6d0bd3f3 100644 --- a/pkg/controllers/nodeclaim/lifecycle/controller.go +++ b/pkg/controllers/nodeclaim/lifecycle/controller.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - controllerruntime "sigs.k8s.io/controller-runtime" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -129,7 +129,7 @@ func (*Controller) Name() string { } func (c *Controller) Builder(_ context.Context, m manager.Manager) operatorcontroller.Builder { - return operatorcontroller.Adapt(controllerruntime. + return operatorcontroller.Adapt(ctrl. NewControllerManagedBy(m). For(&v1beta1.NodeClaim{}, builder.WithPredicates( predicate.Funcs{ diff --git a/pkg/controllers/nodeclaim/lifecycle/initialization.go b/pkg/controllers/nodeclaim/lifecycle/initialization.go index 9321d2ac5b..f388348d26 100644 --- a/pkg/controllers/nodeclaim/lifecycle/initialization.go +++ b/pkg/controllers/nodeclaim/lifecycle/initialization.go @@ -24,8 +24,8 @@ import ( "github.com/samber/lo" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" @@ -53,13 +53,13 @@ func (i *Initialization) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeC nodeClaim.StatusConditions().MarkFalse(v1beta1.Initialized, "NotLaunched", "Node not launched") return reconcile.Result{}, nil } - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provider-id", nodeClaim.Status.ProviderID)) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithValues("provider-id", nodeClaim.Status.ProviderID)) node, err := nodeclaimutil.NodeForNodeClaim(ctx, i.kubeClient, nodeClaim) if err != nil { nodeClaim.StatusConditions().MarkFalse(v1beta1.Initialized, "NodeNotFound", "Node not registered with cluster") return reconcile.Result{}, nil //nolint:nilerr } - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("node", node.Name)) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithValues("node", node.Name)) if nodeutil.GetCondition(node, v1.NodeReady).Status != v1.ConditionTrue { nodeClaim.StatusConditions().MarkFalse(v1beta1.Initialized, "NodeNotReady", "Node status is NotReady") return reconcile.Result{}, nil @@ -83,7 +83,7 @@ func (i *Initialization) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeC return reconcile.Result{}, err } } - logging.FromContext(ctx).With("allocatable", node.Status.Allocatable).Infof("initialized nodeclaim") + ctrl.FromContext(ctx).WithValues("allocatable", node.Status.Allocatable).Info("initialized nodeclaim") nodeClaim.StatusConditions().MarkTrue(v1beta1.Initialized) metrics.NodeClaimsInitializedCounter.With(prometheus.Labels{ metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey], diff --git a/pkg/controllers/nodeclaim/lifecycle/launch.go b/pkg/controllers/nodeclaim/lifecycle/launch.go index 72410508a6..e8efadf287 100644 --- a/pkg/controllers/nodeclaim/lifecycle/launch.go +++ b/pkg/controllers/nodeclaim/lifecycle/launch.go @@ -24,8 +24,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/samber/lo" v1 "k8s.io/api/core/v1" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" @@ -83,7 +83,8 @@ func (l *Launch) launchNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeCla switch { case cloudprovider.IsInsufficientCapacityError(err): l.recorder.Publish(InsufficientCapacityErrorEvent(nodeClaim, err)) - logging.FromContext(ctx).Error(err) + ctrl.FromContext(ctx).Error(err, "falied to launch nodeClaim") + if err = l.kubeClient.Delete(ctx, nodeClaim); err != nil { return nil, client.IgnoreNotFound(err) } @@ -101,12 +102,12 @@ func (l *Launch) launchNodeClaim(ctx context.Context, nodeClaim *v1beta1.NodeCla return nil, fmt.Errorf("launching nodeclaim, %w", err) } } - logging.FromContext(ctx).With( + ctrl.FromContext(ctx).WithValues( "provider-id", created.Status.ProviderID, "instance-type", created.Labels[v1.LabelInstanceTypeStable], "zone", created.Labels[v1.LabelTopologyZone], "capacity-type", created.Labels[v1beta1.CapacityTypeLabelKey], - "allocatable", created.Status.Allocatable).Infof("launched nodeclaim") + "allocatable", created.Status.Allocatable).Info("launched nodeclaim") return created, nil } diff --git a/pkg/controllers/nodeclaim/lifecycle/liveness.go b/pkg/controllers/nodeclaim/lifecycle/liveness.go index d52b30e278..ec7e3c0694 100644 --- a/pkg/controllers/nodeclaim/lifecycle/liveness.go +++ b/pkg/controllers/nodeclaim/lifecycle/liveness.go @@ -22,8 +22,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "k8s.io/utils/clock" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" @@ -55,7 +55,7 @@ func (l *Liveness) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeClaim) if err := l.kubeClient.Delete(ctx, nodeClaim); err != nil { return reconcile.Result{}, client.IgnoreNotFound(err) } - logging.FromContext(ctx).With("ttl", registrationTTL).Debugf("terminating due to registration ttl") + ctrl.FromContext(ctx).WithValues("ttl", registrationTTL).V(1).Info("terminating due to registration ttl") metrics.NodeClaimsTerminatedCounter.With(prometheus.Labels{ metrics.ReasonLabel: "liveness", metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey], diff --git a/pkg/controllers/nodeclaim/lifecycle/registration.go b/pkg/controllers/nodeclaim/lifecycle/registration.go index 52d0b035f8..63cf296026 100644 --- a/pkg/controllers/nodeclaim/lifecycle/registration.go +++ b/pkg/controllers/nodeclaim/lifecycle/registration.go @@ -24,9 +24,9 @@ import ( "github.com/samber/lo" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" @@ -47,8 +47,7 @@ func (r *Registration) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeCla nodeClaim.StatusConditions().MarkFalse(v1beta1.Registered, "NotLaunched", "Node not launched") return reconcile.Result{}, nil } - - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provider-id", nodeClaim.Status.ProviderID)) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithValues("provider-id", nodeClaim.Status.ProviderID)) node, err := nodeclaimutil.NodeForNodeClaim(ctx, r.kubeClient, nodeClaim) if err != nil { if nodeclaimutil.IsNodeNotFoundError(err) { @@ -61,11 +60,11 @@ func (r *Registration) Reconcile(ctx context.Context, nodeClaim *v1beta1.NodeCla } return reconcile.Result{}, fmt.Errorf("getting node for nodeclaim, %w", err) } - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("node", node.Name)) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithValues("node", node.Name)) if err = r.syncNode(ctx, nodeClaim, node); err != nil { return reconcile.Result{}, fmt.Errorf("syncing node, %w", err) } - logging.FromContext(ctx).Infof("registered nodeclaim") + ctrl.FromContext(ctx).Info("registered nodeclaim") nodeClaim.StatusConditions().MarkTrue(v1beta1.Registered) nodeClaim.Status.NodeName = node.Name diff --git a/pkg/controllers/nodeclaim/lifecycle/suite_test.go b/pkg/controllers/nodeclaim/lifecycle/suite_test.go index 6c740c2a3f..15f01ba446 100644 --- a/pkg/controllers/nodeclaim/lifecycle/suite_test.go +++ b/pkg/controllers/nodeclaim/lifecycle/suite_test.go @@ -28,10 +28,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" clock "k8s.io/utils/clock/testing" - . "knative.dev/pkg/logging/testing" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + "sigs.k8s.io/karpenter/pkg/apis" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/cloudprovider/fake" diff --git a/pkg/controllers/nodeclaim/termination/controller.go b/pkg/controllers/nodeclaim/termination/controller.go index 3d91916d2a..1b931d6eaf 100644 --- a/pkg/controllers/nodeclaim/termination/controller.go +++ b/pkg/controllers/nodeclaim/termination/controller.go @@ -25,13 +25,13 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/client-go/util/workqueue" - "knative.dev/pkg/logging" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -64,7 +64,7 @@ func (c *Controller) Reconcile(_ context.Context, _ *v1beta1.NodeClaim) (reconci } func (c *Controller) Finalize(ctx context.Context, nodeClaim *v1beta1.NodeClaim) (reconcile.Result, error) { - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("node", nodeClaim.Status.NodeName, "provider-id", nodeClaim.Status.ProviderID)) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithValues("node", nodeClaim.Status.NodeName, "provider-id", nodeClaim.Status.ProviderID)) stored := nodeClaim.DeepCopy() if !controllerutil.ContainsFinalizer(nodeClaim, v1beta1.TerminationFinalizer) { return reconcile.Result{}, nil @@ -93,7 +93,7 @@ func (c *Controller) Finalize(ctx context.Context, nodeClaim *v1beta1.NodeClaim) if err = c.kubeClient.Patch(ctx, nodeClaim, client.MergeFrom(stored)); err != nil { return reconcile.Result{}, client.IgnoreNotFound(fmt.Errorf("removing termination finalizer, %w", err)) } - logging.FromContext(ctx).Infof("deleted nodeclaim") + ctrl.FromContext(ctx).Info("deleted nodeclaim") } return reconcile.Result{}, nil } diff --git a/pkg/controllers/nodeclaim/termination/suite_test.go b/pkg/controllers/nodeclaim/termination/suite_test.go index 37dc79f5c2..51d37e27cd 100644 --- a/pkg/controllers/nodeclaim/termination/suite_test.go +++ b/pkg/controllers/nodeclaim/termination/suite_test.go @@ -29,10 +29,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" clock "k8s.io/utils/clock/testing" - . "knative.dev/pkg/logging/testing" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + nodeclaimtermination "sigs.k8s.io/karpenter/pkg/controllers/nodeclaim/termination" "sigs.k8s.io/karpenter/pkg/apis" diff --git a/pkg/controllers/nodepool/counter/controller.go b/pkg/controllers/nodepool/counter/controller.go index d5f3a3428e..eb2646c143 100644 --- a/pkg/controllers/nodepool/counter/controller.go +++ b/pkg/controllers/nodepool/counter/controller.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/karpenter/pkg/utils/functional" v1 "k8s.io/api/core/v1" - controllerruntime "sigs.k8s.io/controller-runtime" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -99,7 +99,7 @@ func (c *Controller) Name() string { } func (c *Controller) Builder(_ context.Context, m manager.Manager) operatorcontroller.Builder { - return operatorcontroller.Adapt(controllerruntime. + return operatorcontroller.Adapt(ctrl. NewControllerManagedBy(m). For(&v1beta1.NodePool{}). Watches( diff --git a/pkg/controllers/nodepool/counter/suite_test.go b/pkg/controllers/nodepool/counter/suite_test.go index 97584761a6..1c59da31e2 100644 --- a/pkg/controllers/nodepool/counter/suite_test.go +++ b/pkg/controllers/nodepool/counter/suite_test.go @@ -27,9 +27,10 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clock "k8s.io/utils/clock/testing" - . "knative.dev/pkg/logging/testing" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + . "sigs.k8s.io/karpenter/pkg/test/expectations" "sigs.k8s.io/karpenter/pkg/apis" diff --git a/pkg/controllers/nodepool/hash/controller.go b/pkg/controllers/nodepool/hash/controller.go index 5e4321053f..dedae1ab7b 100644 --- a/pkg/controllers/nodepool/hash/controller.go +++ b/pkg/controllers/nodepool/hash/controller.go @@ -21,7 +21,7 @@ import ( "github.com/samber/lo" "k8s.io/apimachinery/pkg/api/equality" - controllerruntime "sigs.k8s.io/controller-runtime" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -63,7 +63,7 @@ func (c *Controller) Name() string { } func (c *Controller) Builder(_ context.Context, m manager.Manager) operatorcontroller.Builder { - return operatorcontroller.Adapt(controllerruntime. + return operatorcontroller.Adapt(ctrl. NewControllerManagedBy(m). For(&v1beta1.NodePool{}). WithOptions(controller.Options{MaxConcurrentReconciles: 10}), diff --git a/pkg/controllers/nodepool/hash/suite_test.go b/pkg/controllers/nodepool/hash/suite_test.go index 358a2e948d..5ce86bf078 100644 --- a/pkg/controllers/nodepool/hash/suite_test.go +++ b/pkg/controllers/nodepool/hash/suite_test.go @@ -26,10 +26,11 @@ import ( "github.com/samber/lo" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - . "knative.dev/pkg/logging/testing" "knative.dev/pkg/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + . "sigs.k8s.io/karpenter/pkg/test/expectations" "sigs.k8s.io/karpenter/pkg/apis" diff --git a/pkg/controllers/provisioning/controller.go b/pkg/controllers/provisioning/controller.go index 458348c13c..0e3cdf303d 100644 --- a/pkg/controllers/provisioning/controller.go +++ b/pkg/controllers/provisioning/controller.go @@ -22,7 +22,7 @@ import ( "github.com/samber/lo" v1 "k8s.io/api/core/v1" - controllerruntime "sigs.k8s.io/controller-runtime" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -70,7 +70,7 @@ func (c *PodController) Reconcile(_ context.Context, p *v1.Pod) (reconcile.Resul } func (*PodController) Builder(_ context.Context, m manager.Manager) operatorcontroller.Builder { - return operatorcontroller.Adapt(controllerruntime. + return operatorcontroller.Adapt(ctrl. NewControllerManagedBy(m). For(&v1.Pod{}). WithOptions(controller.Options{MaxConcurrentReconciles: 10}), @@ -116,7 +116,7 @@ func (c *NodeController) Reconcile(_ context.Context, n *v1.Node) (reconcile.Res } func (*NodeController) Builder(_ context.Context, m manager.Manager) operatorcontroller.Builder { - return operatorcontroller.Adapt(controllerruntime. + return operatorcontroller.Adapt(ctrl. NewControllerManagedBy(m). For(&v1.Node{}). WithOptions(controller.Options{MaxConcurrentReconciles: 10}), diff --git a/pkg/controllers/provisioning/provisioner.go b/pkg/controllers/provisioning/provisioner.go index 195b02110d..da62baba30 100644 --- a/pkg/controllers/provisioning/provisioner.go +++ b/pkg/controllers/provisioning/provisioner.go @@ -31,8 +31,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/workqueue" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -118,7 +118,7 @@ func (p *Provisioner) Reconcile(ctx context.Context, _ reconcile.Request) (resul // with making any scheduling decision off of our state nodes. Otherwise, we have the potential to make // a scheduling decision based on a smaller subset of nodes in our cluster state than actually exist. if !p.cluster.Synced(ctx) { - logging.FromContext(ctx).Debugf("waiting on cluster sync") + ctrl.FromContext(ctx).V(1).Info("waiting on cluster sync") return reconcile.Result{RequeueAfter: time.Second}, nil } @@ -160,7 +160,7 @@ func (p *Provisioner) GetPendingPods(ctx context.Context) ([]*v1.Pod, error) { } return lo.Reject(pods, func(po *v1.Pod, _ int) bool { if err := p.Validate(ctx, po); err != nil { - logging.FromContext(ctx).With("pod", client.ObjectKeyFromObject(po)).Debugf("ignoring pod, %s", err) + ctrl.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(po)).V(3).Info("ignoring pod, %s", err) return true } p.consolidationWarnings(ctx, po) @@ -176,14 +176,14 @@ func (p *Provisioner) consolidationWarnings(ctx context.Context, po *v1.Pod) { if po.Spec.Affinity != nil && po.Spec.Affinity.PodAntiAffinity != nil { if len(po.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution) != 0 { if p.cm.HasChanged(string(po.UID), "pod-antiaffinity") { - logging.FromContext(ctx).Infof("pod %q has a preferred Anti-Affinity which can prevent consolidation", client.ObjectKeyFromObject(po)) + ctrl.FromContext(ctx).Info("pod %q has a preferred Anti-Affinity which can prevent consolidation", client.ObjectKeyFromObject(po)) } } } for _, tsc := range po.Spec.TopologySpreadConstraints { if tsc.WhenUnsatisfiable == v1.ScheduleAnyway { if p.cm.HasChanged(string(po.UID), "pod-topology-spread") { - logging.FromContext(ctx).Infof("pod %q has a preferred TopologySpreadConstraint which can prevent consolidation", client.ObjectKeyFromObject(po)) + ctrl.FromContext(ctx).Info("pod %q has a preferred TopologySpreadConstraint which can prevent consolidation", client.ObjectKeyFromObject(po)) } } } @@ -205,7 +205,7 @@ func (p *Provisioner) NewScheduler(ctx context.Context, pods []*v1.Pod, stateNod } nodePoolList.Items = lo.Filter(nodePoolList.Items, func(n v1beta1.NodePool, _ int) bool { if err := n.RuntimeValidate(); err != nil { - logging.FromContext(ctx).With("nodepool", n.Name).Errorf("nodepool failed validation, %s", err) + ctrl.FromContext(ctx).WithValues("nodepool", n.Name).Error(err, "nodepool failed validation, %s") return false } return n.DeletionTimestamp.IsZero() @@ -228,11 +228,12 @@ func (p *Provisioner) NewScheduler(ctx context.Context, pods []*v1.Pod, stateNod if err != nil { // we just log an error and skip the provisioner to prevent a single mis-configured provisioner from stopping // all scheduling - logging.FromContext(ctx).With("nodepool", nodePool.Name).Errorf("skipping, unable to resolve instance types, %s", err) + ctrl.FromContext(ctx).WithValues("nodepool", nodePool.Name).Error(err, "skipping, unable to resolve instance types, %s") continue } if len(instanceTypeOptions) == 0 { - logging.FromContext(ctx).With("nodepool", nodePool.Name).Info("skipping, no resolved instance types found") + ctrl.FromContext(ctx).WithValues("nodepool", nodePool.Name).Info("skipping, no resolved instance types found") + continue } instanceTypes[nodePool.Name] = append(instanceTypes[nodePool.Name], instanceTypeOptions...) @@ -323,21 +324,22 @@ func (p *Provisioner) Schedule(ctx context.Context) (scheduler.Results, error) { s, err := p.NewScheduler(ctx, pods, nodes.Active()) if err != nil { if errors.Is(err, ErrNodePoolsNotFound) { - logging.FromContext(ctx).Info(ErrNodePoolsNotFound) + ctrl.FromContext(ctx).Info("", ErrNodePoolsNotFound) return scheduler.Results{}, nil } return scheduler.Results{}, fmt.Errorf("creating scheduler, %w", err) } results := s.Solve(ctx, pods).TruncateInstanceTypes(scheduler.MaxInstanceTypes) - logging.FromContext(ctx).With("pods", pretty.Slice(lo.Map(pods, func(p *v1.Pod, _ int) string { return client.ObjectKeyFromObject(p).String() }), 5)). - With("duration", time.Since(start)). - Infof("found provisionable pod(s)") + + ctrl.FromContext(ctx).WithValues("pods", pretty.Slice(lo.Map(pods, func(p *v1.Pod, _ int) string { return client.ObjectKeyFromObject(p).String() }), 5)). + WithValues("duration", time.Since(start)). + Info("found provisionable pod(s)") results.Record(ctx, p.recorder, p.cluster) return results, nil } func (p *Provisioner) Create(ctx context.Context, n *scheduler.NodeClaim, opts ...functional.Option[LaunchOptions]) (string, error) { - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("nodepool", n.NodePoolName)) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithValues("nodepool", n.NodePoolName)) options := functional.ResolveOptions(opts...) latest := &v1beta1.NodePool{} if err := p.kubeClient.Get(ctx, types.NamespacedName{Name: n.NodePoolName}, latest); err != nil { @@ -354,7 +356,9 @@ func (p *Provisioner) Create(ctx context.Context, n *scheduler.NodeClaim, opts . instanceTypeRequirement, _ := lo.Find(nodeClaim.Spec.Requirements, func(req v1beta1.NodeSelectorRequirementWithMinValues) bool { return req.Key == v1.LabelInstanceTypeStable }) - logging.FromContext(ctx).With("nodeclaim", nodeClaim.Name, "requests", nodeClaim.Spec.Resources.Requests, "instance-types", instanceTypeList(instanceTypeRequirement.Values)).Infof("created nodeclaim") + + ctrl.FromContext(ctx).WithValues("nodeclaim", nodeClaim.Name, "requests", nodeClaim.Spec.Resources.Requests, "instance-types", instanceTypeList(instanceTypeRequirement.Values)). + Info("created nodeclaim") metrics.NodeClaimsCreatedCounter.With(prometheus.Labels{ metrics.ReasonLabel: options.Reason, metrics.NodePoolLabel: nodeClaim.Labels[v1beta1.NodePoolLabelKey], @@ -440,7 +444,7 @@ func (p *Provisioner) injectTopology(ctx context.Context, pods []*v1.Pod) []*v1. var schedulablePods []*v1.Pod for _, pod := range pods { if err := p.volumeTopology.Inject(ctx, pod); err != nil { - logging.FromContext(ctx).With("pod", client.ObjectKeyFromObject(pod)).Errorf("getting volume topology requirements, %s", err) + ctrl.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(pod)).Error(err, "getting volume topology requirements, %s") } else { schedulablePods = append(schedulablePods, pod) } diff --git a/pkg/controllers/provisioning/scheduling/preferences.go b/pkg/controllers/provisioning/scheduling/preferences.go index 3392eacde8..d587a9f2e5 100644 --- a/pkg/controllers/provisioning/scheduling/preferences.go +++ b/pkg/controllers/provisioning/scheduling/preferences.go @@ -22,9 +22,9 @@ import ( "sort" v1 "k8s.io/api/core/v1" - "knative.dev/pkg/logging" "knative.dev/pkg/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/karpenter/pkg/utils/pretty" ) @@ -36,7 +36,7 @@ type Preferences struct { } func (p *Preferences) Relax(ctx context.Context, pod *v1.Pod) bool { - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("pod", client.ObjectKeyFromObject(pod))) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(pod))) relaxations := []func(*v1.Pod) *string{ p.removeRequiredNodeAffinityTerm, p.removePreferredPodAffinityTerm, @@ -50,7 +50,7 @@ func (p *Preferences) Relax(ctx context.Context, pod *v1.Pod) bool { for _, relaxFunc := range relaxations { if reason := relaxFunc(pod); reason != nil { - logging.FromContext(ctx).Debugf("relaxing soft constraints for pod since it previously failed to schedule, %s", ptr.StringValue(reason)) + ctrl.FromContext(ctx).V(1).Info("relaxing soft constraints for pod since it previously failed to schedule, %s", ptr.StringValue(reason)) return true } } diff --git a/pkg/controllers/provisioning/scheduling/scheduler.go b/pkg/controllers/provisioning/scheduling/scheduler.go index 5405fe3389..9ba8c9218c 100644 --- a/pkg/controllers/provisioning/scheduling/scheduler.go +++ b/pkg/controllers/provisioning/scheduling/scheduler.go @@ -25,8 +25,8 @@ import ( "github.com/samber/lo" "go.uber.org/multierr" v1 "k8s.io/api/core/v1" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/cloudprovider" @@ -101,7 +101,7 @@ type Results struct { func (r Results) Record(ctx context.Context, recorder events.Recorder, cluster *state.Cluster) { // Report failures and nominations for p, err := range r.PodErrors { - logging.FromContext(ctx).With("pod", client.ObjectKeyFromObject(p)).Errorf("Could not schedule pod, %s", err) + ctrl.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(p)).Error(err, "Could not schedule pod, %s") recorder.Publish(PodFailedToScheduleEvent(p, err)) } for _, existing := range r.ExistingNodes { @@ -120,7 +120,7 @@ func (r Results) Record(ctx context.Context, recorder events.Recorder, cluster * if newCount == 0 { return } - logging.FromContext(ctx).With("nodeclaims", len(r.NewNodeClaims), "pods", newCount).Infof("computed new nodeclaim(s) to fit pod(s)") + ctrl.FromContext(ctx).WithValues("nodeclaims", len(r.NewNodeClaims), "pods", newCount).Info("computed new nodeclaim(s) to fit pod(s)") // Report in flight newNodes, or exit to avoid log spam inflightCount := 0 existingCount := 0 @@ -131,7 +131,7 @@ func (r Results) Record(ctx context.Context, recorder events.Recorder, cluster * if existingCount == 0 { return } - logging.FromContext(ctx).Infof("computed %d unready node(s) will fit %d pod(s)", inflightCount, existingCount) + ctrl.FromContext(ctx).Info("computed %d unready node(s) will fit %d pod(s)", inflightCount, existingCount) } // AllNonPendingPodsScheduled returns true if all pods scheduled. @@ -225,7 +225,7 @@ func (s *Scheduler) Solve(ctx context.Context, pods []*v1.Pod) Results { q.Push(pod, relaxed) if relaxed { if err := s.topology.Update(ctx, pod); err != nil { - logging.FromContext(ctx).Errorf("updating topology, %s", err) + ctrl.FromContext(ctx).Error(err, "updating topology, %s") } } } @@ -275,7 +275,8 @@ func (s *Scheduler) add(ctx context.Context, pod *v1.Pod) error { errs = multierr.Append(errs, fmt.Errorf("all available instance types exceed limits for nodepool: %q", nodeClaimTemplate.NodePoolName)) continue } else if len(s.instanceTypes[nodeClaimTemplate.NodePoolName]) != len(instanceTypes) { - logging.FromContext(ctx).With("nodepool", nodeClaimTemplate.NodePoolName).Debugf("%d out of %d instance types were excluded because they would breach limits", + + ctrl.FromContext(ctx).WithValues("nodepool", nodeClaimTemplate.NodePoolName).V(1).Info("%d out of %d instance types were excluded because they would breach limits", len(s.instanceTypes[nodeClaimTemplate.NodePoolName])-len(instanceTypes), len(s.instanceTypes[nodeClaimTemplate.NodePoolName])) } } diff --git a/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go b/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go index 044def37f7..431c962784 100644 --- a/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go +++ b/pkg/controllers/provisioning/scheduling/scheduling_benchmark_test.go @@ -29,16 +29,16 @@ import ( "text/tabwriter" "time" + "github.com/go-logr/logr" "github.com/samber/lo" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" "k8s.io/utils/clock" fakecr "sigs.k8s.io/controller-runtime/pkg/client/fake" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" - "go.uber.org/zap" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/logging" "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/cloudprovider/fake" @@ -130,7 +130,7 @@ func TestSchedulingProfile(t *testing.T) { func benchmarkScheduler(b *testing.B, instanceCount, podCount int) { // disable logging - ctx = logging.WithLogger(context.Background(), zap.NewNop().Sugar()) + ctx = ctrl.IntoContext(context.Background(), logr.New(logr.Discard().GetSink())) nodePool := test.NodePool() instanceTypes := fake.InstanceTypes(instanceCount) diff --git a/pkg/controllers/provisioning/scheduling/suite_test.go b/pkg/controllers/provisioning/scheduling/suite_test.go index 017ca2e2c5..c90992f533 100644 --- a/pkg/controllers/provisioning/scheduling/suite_test.go +++ b/pkg/controllers/provisioning/scheduling/suite_test.go @@ -55,9 +55,10 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "knative.dev/pkg/logging/testing" "knative.dev/pkg/ptr" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + . "sigs.k8s.io/karpenter/pkg/test/expectations" ) diff --git a/pkg/controllers/provisioning/scheduling/volumetopology.go b/pkg/controllers/provisioning/scheduling/volumetopology.go index 1b9bb27e42..9ad54ef99c 100644 --- a/pkg/controllers/provisioning/scheduling/volumetopology.go +++ b/pkg/controllers/provisioning/scheduling/volumetopology.go @@ -24,8 +24,8 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/types" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" volumeutil "sigs.k8s.io/karpenter/pkg/utils/volume" ) @@ -70,9 +70,9 @@ func (v *VolumeTopology) Inject(ctx context.Context, pod *v1.Pod) error { pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[i].MatchExpressions, requirements...) } - logging.FromContext(ctx). - With("pod", client.ObjectKeyFromObject(pod)). - Debugf("adding requirements derived from pod volumes, %s", requirements) + ctrl.FromContext(ctx). + WithValues("pod", client.ObjectKeyFromObject(pod)). + V(1).Info("adding requirements derived from pod volumes, %s", requirements) return nil } diff --git a/pkg/controllers/provisioning/suite_test.go b/pkg/controllers/provisioning/suite_test.go index 182f4a0374..73bf610c70 100644 --- a/pkg/controllers/provisioning/suite_test.go +++ b/pkg/controllers/provisioning/suite_test.go @@ -32,10 +32,11 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" clock "k8s.io/utils/clock/testing" - . "knative.dev/pkg/logging/testing" "knative.dev/pkg/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/karpenter/pkg/utils/testing" + "sigs.k8s.io/karpenter/pkg/apis" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/cloudprovider" diff --git a/pkg/controllers/state/cluster.go b/pkg/controllers/state/cluster.go index 0c44e6e27e..2eba5b6a95 100644 --- a/pkg/controllers/state/cluster.go +++ b/pkg/controllers/state/cluster.go @@ -34,9 +34,9 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/clock" - "knative.dev/pkg/logging" "knative.dev/pkg/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/cloudprovider" @@ -93,12 +93,12 @@ func (c *Cluster) Synced(ctx context.Context) (synced bool) { }() nodeClaimList := &v1beta1.NodeClaimList{} if err := c.kubeClient.List(ctx, nodeClaimList); err != nil { - logging.FromContext(ctx).Errorf("checking cluster state sync, %v", err) + ctrl.FromContext(ctx).Error(err, "checking cluster state sync, %v") return false } nodeList := &v1.NodeList{} if err := c.kubeClient.List(ctx, nodeList); err != nil { - logging.FromContext(ctx).Errorf("checking cluster state sync, %v", err) + ctrl.FromContext(ctx).Error(err, "checking cluster state sync, %v") return false } c.mu.RLock() diff --git a/pkg/controllers/state/informer/daemonset.go b/pkg/controllers/state/informer/daemonset.go index 0a07a53def..542e815031 100644 --- a/pkg/controllers/state/informer/daemonset.go +++ b/pkg/controllers/state/informer/daemonset.go @@ -22,7 +22,7 @@ import ( appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/errors" - controllerruntime "sigs.k8s.io/controller-runtime" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -67,7 +67,7 @@ func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reco } func (c *Controller) Builder(_ context.Context, m manager.Manager) operatorcontroller.Builder { - return operatorcontroller.Adapt(controllerruntime. + return operatorcontroller.Adapt(ctrl. NewControllerManagedBy(m). For(&appsv1.DaemonSet{}). WithOptions(controller.Options{MaxConcurrentReconciles: 10}), diff --git a/pkg/controllers/state/informer/node.go b/pkg/controllers/state/informer/node.go index a219e64add..0db05f4b0b 100644 --- a/pkg/controllers/state/informer/node.go +++ b/pkg/controllers/state/informer/node.go @@ -21,10 +21,11 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - "knative.dev/pkg/logging" + controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -51,7 +52,7 @@ func (c *NodeController) Name() string { } func (c *NodeController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(c.Name()).With("node", req.NamespacedName.Name)) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithName(c.Name()).WithValues("node", req.NamespacedName.Name)) node := &v1.Node{} if err := c.kubeClient.Get(ctx, req.NamespacedName, node); err != nil { if errors.IsNotFound(err) { diff --git a/pkg/controllers/state/informer/nodeclaim.go b/pkg/controllers/state/informer/nodeclaim.go index f5e348545f..3dd358572d 100644 --- a/pkg/controllers/state/informer/nodeclaim.go +++ b/pkg/controllers/state/informer/nodeclaim.go @@ -20,10 +20,10 @@ import ( "context" "k8s.io/apimachinery/pkg/api/errors" - "knative.dev/pkg/logging" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -51,7 +51,7 @@ func (c *NodeClaimController) Name() string { } func (c *NodeClaimController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(c.Name()).With("nodeclaim", req.NamespacedName.Name)) + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithName(c.Name()).WithValues("nodeclaim", req.NamespacedName.Name)) nodeClaim := &v1beta1.NodeClaim{} if err := c.kubeClient.Get(ctx, req.NamespacedName, nodeClaim); err != nil { if errors.IsNotFound(err) { diff --git a/pkg/controllers/state/informer/nodepool.go b/pkg/controllers/state/informer/nodepool.go index 8e5db559f3..71762f7816 100644 --- a/pkg/controllers/state/informer/nodepool.go +++ b/pkg/controllers/state/informer/nodepool.go @@ -19,7 +19,7 @@ package informer import ( "context" - controllerruntime "sigs.k8s.io/controller-runtime" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" @@ -58,7 +58,7 @@ func (c *NodePoolController) Reconcile(_ context.Context, _ *v1beta1.NodePool) ( } func (c *NodePoolController) Builder(_ context.Context, m manager.Manager) operatorcontroller.Builder { - return operatorcontroller.Adapt(controllerruntime. + return operatorcontroller.Adapt(ctrl. NewControllerManagedBy(m). For(&v1beta1.NodePool{}). WithOptions(controller.Options{MaxConcurrentReconciles: 10}). diff --git a/pkg/controllers/state/informer/pod.go b/pkg/controllers/state/informer/pod.go index 6efcd6020e..f1fd619814 100644 --- a/pkg/controllers/state/informer/pod.go +++ b/pkg/controllers/state/informer/pod.go @@ -22,10 +22,10 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - "knative.dev/pkg/logging" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -53,7 +53,8 @@ func (c *PodController) Name() string { } func (c *PodController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(c.Name()).With("pod", req.NamespacedName)) + + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithName(c.Name()).WithValues("pod", req.NamespacedName)) pod := &v1.Pod{} if err := c.kubeClient.Get(ctx, req.NamespacedName, pod); err != nil { if errors.IsNotFound(err) { diff --git a/pkg/controllers/state/suite_test.go b/pkg/controllers/state/suite_test.go index a62a7ec57e..12336cbf48 100644 --- a/pkg/controllers/state/suite_test.go +++ b/pkg/controllers/state/suite_test.go @@ -49,7 +49,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "knative.dev/pkg/logging/testing" + + . "sigs.k8s.io/karpenter/pkg/utils/testing" . "sigs.k8s.io/karpenter/pkg/test/expectations" ) diff --git a/pkg/operator/controller/controller.go b/pkg/operator/controller/controller.go index 2284302bd5..2d6bd729aa 100644 --- a/pkg/operator/controller/controller.go +++ b/pkg/operator/controller/controller.go @@ -19,7 +19,7 @@ package controller import ( "context" - controllerruntime "sigs.k8s.io/controller-runtime" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -40,18 +40,18 @@ type Controller interface { } // Builder is a struct, that when complete, registers the passed reconciler with the manager stored -// insider of the builder. Typed reference implementations, see controllerruntime.Builder +// insider of the builder. Typed reference implementations, see ctrl.Builder type Builder interface { // Complete builds a builder by registering the Reconciler with the manager Complete(Reconciler) error } -// Adapter adapts a controllerruntime.Builder into the Builder interface +// Adapter adapts a ctrl.Builder into the Builder interface type Adapter struct { - builder *controllerruntime.Builder + builder *ctrl.Builder } -func Adapt(builder *controllerruntime.Builder) Builder { +func Adapt(builder *ctrl.Builder) Builder { return &Adapter{ builder: builder, } diff --git a/pkg/operator/controller/singleton.go b/pkg/operator/controller/singleton.go index 5471058547..2d87edafe1 100644 --- a/pkg/operator/controller/singleton.go +++ b/pkg/operator/controller/singleton.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "k8s.io/client-go/util/workqueue" - "knative.dev/pkg/logging" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/ratelimiter" @@ -79,9 +79,9 @@ func (s *Singleton) initMetrics() { var singletonRequest = reconcile.Request{} func (s *Singleton) Start(ctx context.Context) error { - ctx = logging.WithLogger(ctx, logging.FromContext(ctx).Named(s.Name())) - logging.FromContext(ctx).Infof("starting controller") - defer logging.FromContext(ctx).Infof("stopping controller") + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx).WithName(s.Name())) + ctrl.FromContext(ctx).Info("starting controller") + defer ctrl.FromContext(ctx).Info("stopping controller") for { select { @@ -104,7 +104,7 @@ func (s *Singleton) reconcile(ctx context.Context) time.Duration { case err != nil: reconcileErrors.WithLabelValues(s.Name()).Inc() reconcileTotal.WithLabelValues(s.Name(), labelError).Inc() - logging.FromContext(ctx).Error(err) + ctrl.FromContext(ctx).Error(err, "") return s.rateLimiter.When(singletonRequest) case res.Requeue: reconcileTotal.WithLabelValues(s.Name(), labelRequeue).Inc() diff --git a/pkg/operator/controller/suite_test.go b/pkg/operator/controller/suite_test.go index c6e2c7f9dc..373df845f2 100644 --- a/pkg/operator/controller/suite_test.go +++ b/pkg/operator/controller/suite_test.go @@ -36,7 +36,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "knative.dev/pkg/logging/testing" + + . "sigs.k8s.io/karpenter/pkg/utils/testing" . "sigs.k8s.io/karpenter/pkg/test/expectations" ) diff --git a/pkg/operator/controller/typed.go b/pkg/operator/controller/typed.go index fc7dc89d0a..67a7e12b34 100644 --- a/pkg/operator/controller/typed.go +++ b/pkg/operator/controller/typed.go @@ -22,9 +22,9 @@ import ( "strings" "github.com/samber/lo" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -62,9 +62,9 @@ func (t *typedDecorator[T]) Name() string { func (t *typedDecorator[T]) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { obj := reflect.New(reflect.TypeOf(*new(T)).Elem()).Interface().(T) // Create a new pointer to a client.Object - ctx = logging.WithLogger(ctx, logging.FromContext(ctx). - Named(t.typedController.Name()). - With( + ctx = ctrl.IntoContext(ctx, ctrl.FromContext(ctx). + WithName(t.typedController.Name()). + WithValues( strings.ToLower(lo.Must(apiutil.GVKForObject(obj, scheme.Scheme)).Kind), lo.Ternary(req.NamespacedName.Namespace != "", req.NamespacedName.String(), req.Name), ), diff --git a/pkg/operator/logging/logging.go b/pkg/operator/logging/logging.go index ef058c6689..7e02682213 100644 --- a/pkg/operator/logging/logging.go +++ b/pkg/operator/logging/logging.go @@ -33,6 +33,7 @@ import ( "k8s.io/klog/v2" "knative.dev/pkg/changeset" "knative.dev/pkg/logging" + "knative.dev/pkg/logging/logkey" ctrl "sigs.k8s.io/controller-runtime/pkg/log" diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 64a1320c68..4fc671d1c2 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -29,10 +29,12 @@ import ( "github.com/prometheus/client_golang/prometheus" coordinationv1 "k8s.io/api/coordination/v1" "knative.dev/pkg/changeset" + controllerruntime "sigs.k8s.io/controller-runtime" crmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/karpenter/pkg/metrics" + "github.com/go-logr/logr" "github.com/go-logr/zapr" "github.com/samber/lo" v1 "k8s.io/api/core/v1" @@ -42,16 +44,16 @@ import ( "k8s.io/client-go/util/flowcontrol" "k8s.io/utils/clock" knativeinjection "knative.dev/pkg/injection" - knativelogging "knative.dev/pkg/logging" "knative.dev/pkg/signals" "knative.dev/pkg/system" "knative.dev/pkg/webhook" - controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/metrics/server" + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/events" @@ -68,13 +70,17 @@ const ( component = "controller" ) -var BuildInfo = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metrics.Namespace, - Name: "build_info", - Help: "A metric with a constant '1' value labeled by version from which karpenter was built.", - }, - []string{"version", "goversion", "goarch", "commit"}, +var ( + env *envp.Env + BuildInfo = prometheus.NewGaugeVec( + + prometheus.GaugeOpts{ + Namespace: metrics.Namespace, + Name: "build_info", + Help: "A metric with a constant '1' value labeled by version from which karpenter was built.", + }, + []string{"version", "goversion", "goarch", "commit"}, + ) ) // Version is the karpenter app version injected during compilation @@ -130,10 +136,10 @@ func NewOperator() (context.Context, *Operator) { // Logging logger := logging.NewLogger(ctx, component) - ctx = knativelogging.WithLogger(ctx, logger) + ctx = logr.NewContext(context.TODO(), env.Log.WithName(component)) logging.ConfigureGlobalLoggers(ctx) - knativelogging.FromContext(ctx).With("version", Version).Debugf("discovered karpenter version") + ctrl.FromContext(ctx).WithValues("version", Version).V(1).Info("discovered karpenter version") // Manager mgrOpts := controllerruntime.Options{ @@ -149,8 +155,7 @@ func NewOperator() (context.Context, *Operator) { }, HealthProbeBindAddress: fmt.Sprintf(":%d", options.FromContext(ctx).HealthProbePort), BaseContext: func() context.Context { - ctx := context.Background() - ctx = knativelogging.WithLogger(ctx, logger) + ctx = logr.NewContext(context.Background(), env.Log) ctx = injection.WithOptionsOrDie(ctx, options.Injectables...) return ctx }, @@ -229,7 +234,7 @@ func (o *Operator) Start(ctx context.Context) { lo.Must0(o.Manager.Start(ctx)) }() if options.FromContext(ctx).DisableWebhook { - knativelogging.FromContext(ctx).Infof("webhook disabled") + ctrl.FromContext(ctx).Info("webhook disabled") } else { wg.Add(1) go func() { diff --git a/pkg/operator/options/suite_test.go b/pkg/operator/options/suite_test.go index 906e1677eb..ef9307fb8e 100644 --- a/pkg/operator/options/suite_test.go +++ b/pkg/operator/options/suite_test.go @@ -26,7 +26,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/samber/lo" - . "knative.dev/pkg/logging/testing" + + . "sigs.k8s.io/karpenter/pkg/utils/testing" "sigs.k8s.io/karpenter/pkg/operator/options" "sigs.k8s.io/karpenter/pkg/test" diff --git a/pkg/scheduling/volumeusage.go b/pkg/scheduling/volumeusage.go index 83cbba6d40..bb6bc35d08 100644 --- a/pkg/scheduling/volumeusage.go +++ b/pkg/scheduling/volumeusage.go @@ -28,8 +28,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" csitranslation "k8s.io/csi-translation-lib" "k8s.io/csi-translation-lib/plugins" - "knative.dev/pkg/logging" "sigs.k8s.io/controller-runtime/pkg/client" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" volumeutil "sigs.k8s.io/karpenter/pkg/utils/volume" ) @@ -87,7 +87,7 @@ func GetVolumes(ctx context.Context, kubeClient client.Client, pod *v1.Pod) (Vol // computing limits, otherwise Karpenter may never be able to update its cluster state. if err != nil { if errors.IsNotFound(err) { - logging.FromContext(ctx).With("pod", pod.Name, "volume", volume.Name).Errorf("failed updating volume limits for volume, %w", err) + ctrl.FromContext(ctx).WithValues("pod", pod.Name, "volume", volume.Name).Error(err, "failed updating volume limits for volume, %w") continue } return nil, fmt.Errorf("failed updating volume limits, %w", err) @@ -131,7 +131,7 @@ func resolveDriver(ctx context.Context, kubeClient client.Client, pod *v1.Pod, v // In either of these cases, a PV must have been previously bound to the PVC and has since been removed. We can // ignore this PVC while computing limits and continue. if storageClassName == "" { - logging.FromContext(ctx).With("pod", pod.Name, "volume", volumeName, "pvc", pvc.Name).Errorf("failed updating volume limits for volume with unbound PVC, no storage class specified") + ctrl.FromContext(ctx).WithValues("pod", pod.Name, "volume", volumeName, "pvc", pvc.Name).Error(nil, "failed updating volume limits for volume with unbound PVC, no storage class specified") return "", nil } @@ -142,7 +142,7 @@ func resolveDriver(ctx context.Context, kubeClient client.Client, pod *v1.Pod, v // 2. The StorageClass never existed and was used to bind the PVC to an existing PV, but that PV was removed // In either of these cases, we should ignore the PVC while computing limits and continue. if errors.IsNotFound(err) { - logging.FromContext(ctx).With("pod", pod.Name, "volume", volumeName, "pvc", pvc.Name, "storageclass", storageClassName).Errorf("failed updating volume limits for volume with unbound PVC, %w", err) + ctrl.FromContext(ctx).WithValues("pod", pod.Name, "volume", volumeName, "pvc", pvc.Name, "storageclass", storageClassName).Error(err, "failed updating volume limits for volume with unbound PVC, %w") return "", nil } return "", err diff --git a/pkg/utils/nodeclaim/suite_test.go b/pkg/utils/nodeclaim/suite_test.go index b8087a676b..93927290a8 100644 --- a/pkg/utils/nodeclaim/suite_test.go +++ b/pkg/utils/nodeclaim/suite_test.go @@ -26,12 +26,12 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - . "knative.dev/pkg/logging/testing" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/karpenter/pkg/apis" "sigs.k8s.io/karpenter/pkg/operator/scheme" . "sigs.k8s.io/karpenter/pkg/test/expectations" + . "sigs.k8s.io/karpenter/pkg/utils/testing" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/scheduling" diff --git a/pkg/utils/testing/util.go b/pkg/utils/testing/util.go new file mode 100644 index 0000000000..ba954ded5c --- /dev/null +++ b/pkg/utils/testing/util.go @@ -0,0 +1,41 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + + "go.uber.org/zap" + "go.uber.org/zap/zaptest" + + "knative.dev/pkg/logging" +) + +// TestLogger gets a logger to use in unit and end to end tests +func TestLogger(t zaptest.TestingT) *zap.SugaredLogger { + opts := zaptest.WrapOptions( + zap.AddCaller(), + zap.Development(), + ) + + return zaptest.NewLogger(t, opts).Sugar() +} + +// TestContextWithLogger returns a context with a logger to be used in tests +func TestContextWithLogger(t zaptest.TestingT) context.Context { + return logging.WithLogger(context.Background(), TestLogger(t)) +} diff --git a/pkg/webhooks/webhooks.go b/pkg/webhooks/webhooks.go index 341ffbbe9b..9c7dd17fdb 100644 --- a/pkg/webhooks/webhooks.go +++ b/pkg/webhooks/webhooks.go @@ -24,8 +24,8 @@ import ( "net/http" "strings" + "github.com/go-logr/logr" "github.com/samber/lo" - "go.uber.org/zap" "golang.org/x/sync/errgroup" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" @@ -41,6 +41,9 @@ import ( "knative.dev/pkg/webhook/resourcesemantics" "knative.dev/pkg/webhook/resourcesemantics/validation" "sigs.k8s.io/controller-runtime/pkg/healthz" + ctrl "sigs.k8s.io/controller-runtime/pkg/log" + + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" "sigs.k8s.io/karpenter/pkg/apis/v1beta1" "sigs.k8s.io/karpenter/pkg/operator/logging" @@ -49,10 +52,13 @@ import ( const component = "webhook" -var Resources = map[schema.GroupVersionKind]resourcesemantics.GenericCRD{ - v1beta1.SchemeGroupVersion.WithKind("NodePool"): &v1beta1.NodePool{}, - v1beta1.SchemeGroupVersion.WithKind("NodeClaim"): &v1beta1.NodeClaim{}, -} +var ( + env *envp.Env + Resources = map[schema.GroupVersionKind]resourcesemantics.GenericCRD{ + v1beta1.SchemeGroupVersion.WithKind("NodePool"): &v1beta1.NodePool{}, + v1beta1.SchemeGroupVersion.WithKind("NodeClaim"): &v1beta1.NodeClaim{}, + } +) func NewWebhooks() []knativeinjection.ControllerConstructor { return []knativeinjection.ControllerConstructor{ @@ -87,7 +93,7 @@ func NewConfigValidationWebhook(ctx context.Context, _ configmap.Watcher) *contr func Start(ctx context.Context, cfg *rest.Config, ctors ...knativeinjection.ControllerConstructor) { ctx, startInformers := knativeinjection.EnableInjectionOrDie(ctx, cfg) logger := logging.NewLogger(ctx, component) - ctx = knativelogging.WithLogger(ctx, logger) + ctx = logr.NewContext(ctx, env.Log.WithName(component)) cmw := sharedmain.SetupConfigMapWatchOrDie(ctx, knativelogging.FromContext(ctx)) controllers, webhooks := sharedmain.ControllersAndWebhooksFromCtors(ctx, cmw, ctors...) @@ -96,8 +102,9 @@ func Start(ctx context.Context, cfg *rest.Config, ctors ...knativeinjection.Cont // So make sure that we have synchronized our configuration state before launching the // webhooks, so that things are properly initialized. logger.Info("Starting configuration manager...") + log := ctrl.FromContext(ctx) if err := cmw.Start(ctx.Done()); err != nil { - knativelogging.FromContext(ctx).Fatalw("Failed to start configuration manager", zap.Error(err)) + log.Error(err, "Failed to start configuration manager") } // If we have one or more admission controllers, then start the webhook @@ -118,7 +125,7 @@ func Start(ctx context.Context, cfg *rest.Config, ctors ...knativeinjection.Cont wh, err = webhook.New(ctx, webhooks) if err != nil { - knativelogging.FromContext(ctx).Fatalw("Failed to create webhook", zap.Error(err)) + log.Error(err, "Failed to create webhook") } eg.Go(func() error { return wh.Run(ctx.Done()) @@ -132,7 +139,7 @@ func Start(ctx context.Context, cfg *rest.Config, ctors ...knativeinjection.Cont if wh != nil { wh.InformersHaveSynced() } - knativelogging.FromContext(ctx).Info("Starting controllers...") + log.Info("Starting controllers...") eg.Go(func() error { return controller.StartAll(ctx, controllers...) }) @@ -142,7 +149,7 @@ func Start(ctx context.Context, cfg *rest.Config, ctors ...knativeinjection.Cont // Don't forward ErrServerClosed as that indicates we're already shutting down. if err := eg.Wait(); err != nil && !errors.Is(err, http.ErrServerClosed) { - knativelogging.FromContext(ctx).Errorw("Error while running server", zap.Error(err)) + log.Error(err, "Error while running server") } }