From 50a491d4bae701fbba31293810864fb79189250f Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Sat, 25 Sep 2021 17:46:58 +0000 Subject: [PATCH] rangecache: improve tracing of range descriptor lookups Release note: None --- pkg/kv/bulk/sst_batcher_test.go | 3 ++- pkg/kv/kvclient/kvcoord/dist_sender.go | 3 ++- pkg/kv/kvclient/kvcoord/dist_sender_test.go | 4 ++-- pkg/kv/kvclient/rangecache/range_cache.go | 19 ++++++++------- .../kvclient/rangecache/range_cache_test.go | 18 +++++++------- pkg/sql/distsql_physical_planner_test.go | 2 +- pkg/sql/rowexec/tablereader_test.go | 24 ++++++++++++------- 7 files changed, 42 insertions(+), 31 deletions(-) diff --git a/pkg/kv/bulk/sst_batcher_test.go b/pkg/kv/bulk/sst_batcher_test.go index f1253af02a13..2752ae1f694f 100644 --- a/pkg/kv/bulk/sst_batcher_test.go +++ b/pkg/kv/bulk/sst_batcher_test.go @@ -163,7 +163,8 @@ func runTestImport(t *testing.T, batchSizeValue int64) { // splits to exercise that codepath, but we also want to make sure we // still handle an unexpected split, so we make our own range cache and // only populate it with one of our two splits. - mockCache := rangecache.NewRangeCache(s.ClusterSettings(), nil, func() int64 { return 2 << 10 }, s.Stopper()) + mockCache := rangecache.NewRangeCache(s.ClusterSettings(), nil, + func() int64 { return 2 << 10 }, s.Stopper(), s.Tracer().(*tracing.Tracer)) addr, err := keys.Addr(key(0)) if err != nil { t.Fatal(err) diff --git a/pkg/kv/kvclient/kvcoord/dist_sender.go b/pkg/kv/kvclient/kvcoord/dist_sender.go index 6a86af4e871d..548c10416af4 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender.go @@ -395,7 +395,8 @@ func NewDistSender(cfg DistSenderConfig) *DistSender { getRangeDescCacheSize := func() int64 { return rangeDescriptorCacheSize.Get(&ds.st.SV) } - ds.rangeCache = rangecache.NewRangeCache(ds.st, rdb, getRangeDescCacheSize, cfg.RPCContext.Stopper) + ds.rangeCache = rangecache.NewRangeCache(ds.st, rdb, getRangeDescCacheSize, + cfg.RPCContext.Stopper, cfg.AmbientCtx.Tracer) if tf := cfg.TestingKnobs.TransportFactory; tf != nil { ds.transportFactory = tf } else { diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_test.go index 5b845e66221e..c127b697ca02 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_test.go @@ -4391,7 +4391,7 @@ func TestSendToReplicasSkipsStaleReplicas(t *testing.T) { getRangeDescCacheSize := func() int64 { return 1 << 20 } - rc := rangecache.NewRangeCache(st, nil /* db */, getRangeDescCacheSize, stopper) + rc := rangecache.NewRangeCache(st, nil /* db */, getRangeDescCacheSize, stopper, st.Tracer) rc.Insert(ctx, roachpb.RangeInfo{ Desc: desc, Lease: roachpb.Lease{ @@ -4432,7 +4432,7 @@ func TestSendToReplicasSkipsStaleReplicas(t *testing.T) { } cfg := DistSenderConfig{ - AmbientCtx: log.AmbientContext{Tracer: tracing.NewTracer()}, + AmbientCtx: log.AmbientContext{Tracer: st.Tracer}, Clock: clock, NodeDescs: ns, RPCContext: rpcContext, diff --git a/pkg/kv/kvclient/rangecache/range_cache.go b/pkg/kv/kvclient/rangecache/range_cache.go index 1cfaea7c296e..01433913c990 100644 --- a/pkg/kv/kvclient/rangecache/range_cache.go +++ b/pkg/kv/kvclient/rangecache/range_cache.go @@ -77,6 +77,7 @@ type RangeDescriptorDB interface { type RangeCache struct { st *cluster.Settings stopper *stop.Stopper + tracer *tracing.Tracer // RangeDescriptorDB is used to retrieve range descriptors from the // database, which will be cached by this structure. db RangeDescriptorDB @@ -175,9 +176,13 @@ func makeLookupRequestKey( // NewRangeCache returns a new RangeCache which uses the given RangeDescriptorDB // as the underlying source of range descriptors. func NewRangeCache( - st *cluster.Settings, db RangeDescriptorDB, size func() int64, stopper *stop.Stopper, + st *cluster.Settings, + db RangeDescriptorDB, + size func() int64, + stopper *stop.Stopper, + tracer *tracing.Tracer, ) *RangeCache { - rdc := &RangeCache{st: st, db: db, stopper: stopper} + rdc := &RangeCache{st: st, db: db, stopper: stopper, tracer: tracer} rdc.rangeCache.cache = cache.NewOrderedCache(cache.Config{ Policy: cache.CacheLRU, ShouldEvict: func(n int, _, _ interface{}) bool { @@ -634,9 +639,7 @@ func (rc *RangeCache) tryLookup( return returnToken, nil } - if log.V(2) { - log.Infof(ctx, "lookup range descriptor: key=%s (reverse: %t)", key, useReverseScan) - } + log.VEventf(ctx, 2, "looking up range descriptor: key=%s", key) var prevDesc *roachpb.RangeDescriptor if evictToken.Valid() { @@ -646,7 +649,7 @@ func (rc *RangeCache) tryLookup( resC, leader := rc.lookupRequests.DoChan(requestKey, func() (interface{}, error) { var lookupRes EvictionToken if err := rc.stopper.RunTaskWithErr(ctx, "rangecache: range lookup", func(ctx context.Context) error { - ctx, reqSpan := tracing.ForkSpan(ctx, "range lookup") + ctx, reqSpan := tracing.EnsureChildSpan(ctx, rc.tracer, "range lookup") defer reqSpan.Finish() // Clear the context's cancelation. This request services potentially many // callers waiting for its result, and using the flight's leader's @@ -761,9 +764,9 @@ func (rc *RangeCache) tryLookup( s = res.Val.(EvictionToken).String() } if res.Shared { - log.Eventf(ctx, "looked up range descriptor with shared request: %s", s) + log.VEventf(ctx, 2, "looked up range descriptor with shared request: %s", s) } else { - log.Eventf(ctx, "looked up range descriptor: %s", s) + log.VEventf(ctx, 2, "looked up range descriptor: %s", s) } if res.Err != nil { return EvictionToken{}, res.Err diff --git a/pkg/kv/kvclient/rangecache/range_cache_test.go b/pkg/kv/kvclient/rangecache/range_cache_test.go index a3c5b1d010ca..acc054381a6e 100644 --- a/pkg/kv/kvclient/rangecache/range_cache_test.go +++ b/pkg/kv/kvclient/rangecache/range_cache_test.go @@ -258,7 +258,7 @@ func initTestDescriptorDB(t *testing.T) *testDescriptorDB { } // TODO(andrei): don't leak this Stopper. Someone needs to Stop() it. db.stopper = stop.NewStopper() - db.cache = NewRangeCache(st, db, staticSize(2<<10), db.stopper) + db.cache = NewRangeCache(st, db, staticSize(2<<10), db.stopper, st.Tracer) return db } @@ -483,7 +483,7 @@ func TestLookupByKeyMin(t *testing.T) { st := cluster.MakeTestingClusterSettings() stopper := stop.NewStopper() defer stopper.Stop(context.Background()) - cache := NewRangeCache(st, nil, staticSize(2<<10), stopper) + cache := NewRangeCache(st, nil, staticSize(2<<10), stopper, st.Tracer) startToMeta2Desc := roachpb.RangeDescriptor{ StartKey: roachpb.RKeyMin, EndKey: keys.RangeMetaKey(roachpb.RKey("a")), @@ -1011,7 +1011,7 @@ func TestRangeCacheClearOverlapping(t *testing.T) { st := cluster.MakeTestingClusterSettings() stopper := stop.NewStopper() defer stopper.Stop(ctx) - cache := NewRangeCache(st, nil, staticSize(2<<10), stopper) + cache := NewRangeCache(st, nil, staticSize(2<<10), stopper, st.Tracer) cache.rangeCache.cache.Add(rangeCacheKey(keys.RangeMetaKey(roachpb.RKeyMax)), &CacheEntry{desc: *defDesc}) // Now, add a new, overlapping set of descriptors. @@ -1187,7 +1187,7 @@ func TestRangeCacheClearOlderOverlapping(t *testing.T) { st := cluster.MakeTestingClusterSettings() stopper := stop.NewStopper() defer stopper.Stop(ctx) - cache := NewRangeCache(st, nil /* db */, staticSize(2<<10), stopper) + cache := NewRangeCache(st, nil /* db */, staticSize(2<<10), stopper, st.Tracer) for _, d := range tc.cachedDescs { cache.Insert(ctx, roachpb.RangeInfo{Desc: d}) } @@ -1239,7 +1239,7 @@ func TestRangeCacheClearOverlappingMeta(t *testing.T) { st := cluster.MakeTestingClusterSettings() stopper := stop.NewStopper() defer stopper.Stop(ctx) - cache := NewRangeCache(st, nil, staticSize(2<<10), stopper) + cache := NewRangeCache(st, nil, staticSize(2<<10), stopper, st.Tracer) cache.Insert(ctx, roachpb.RangeInfo{Desc: firstDesc}, roachpb.RangeInfo{Desc: restDesc}) @@ -1277,7 +1277,7 @@ func TestGetCachedRangeDescriptorInverted(t *testing.T) { st := cluster.MakeTestingClusterSettings() stopper := stop.NewStopper() defer stopper.Stop(ctx) - cache := NewRangeCache(st, nil, staticSize(2<<10), stopper) + cache := NewRangeCache(st, nil, staticSize(2<<10), stopper, st.Tracer) for _, rd := range testData { cache.Insert(ctx, roachpb.RangeInfo{ Desc: rd, @@ -1415,7 +1415,7 @@ func TestRangeCacheGeneration(t *testing.T) { st := cluster.MakeTestingClusterSettings() stopper := stop.NewStopper() defer stopper.Stop(ctx) - cache := NewRangeCache(st, nil, staticSize(2<<10), stopper) + cache := NewRangeCache(st, nil, staticSize(2<<10), stopper, st.Tracer) cache.Insert(ctx, roachpb.RangeInfo{Desc: *descAM2}, roachpb.RangeInfo{Desc: *descMZ4}) cache.Insert(ctx, roachpb.RangeInfo{Desc: *tc.insertDesc}) @@ -1481,7 +1481,7 @@ func TestRangeCacheEvictAndReplace(t *testing.T) { st := cluster.MakeTestingClusterSettings() stopper := stop.NewStopper() defer stopper.Stop(ctx) - cache := NewRangeCache(st, nil, staticSize(2<<10), stopper) + cache := NewRangeCache(st, nil, staticSize(2<<10), stopper, st.Tracer) ri := roachpb.RangeInfo{Desc: desc1} cache.Insert(ctx, ri) @@ -1592,7 +1592,7 @@ func TestRangeCacheUpdateLease(t *testing.T) { st := cluster.MakeTestingClusterSettings() stopper := stop.NewStopper() defer stopper.Stop(ctx) - cache := NewRangeCache(st, nil, staticSize(2<<10), stopper) + cache := NewRangeCache(st, nil, staticSize(2<<10), stopper, st.Tracer) cache.Insert(ctx, roachpb.RangeInfo{ Desc: desc1, diff --git a/pkg/sql/distsql_physical_planner_test.go b/pkg/sql/distsql_physical_planner_test.go index e0ed8c050a23..b9cd34606035 100644 --- a/pkg/sql/distsql_physical_planner_test.go +++ b/pkg/sql/distsql_physical_planner_test.go @@ -266,7 +266,7 @@ func TestDistSQLReceiverUpdatesCaches(t *testing.T) { st := cluster.MakeTestingClusterSettings() stopper := stop.NewStopper() defer stopper.Stop(ctx) - rangeCache := rangecache.NewRangeCache(st, nil /* db */, size, stopper) + rangeCache := rangecache.NewRangeCache(st, nil /* db */, size, stopper, st.Tracer) r := MakeDistSQLReceiver( ctx, &errOnlyResultWriter{}, /* resultWriter */ diff --git a/pkg/sql/rowexec/tablereader_test.go b/pkg/sql/rowexec/tablereader_test.go index 88b75c9f37f3..6c62fd3e84e9 100644 --- a/pkg/sql/rowexec/tablereader_test.go +++ b/pkg/sql/rowexec/tablereader_test.go @@ -127,13 +127,15 @@ func TestTableReader(t *testing.T) { ts := c.spec ts.Table = *td.TableDesc() - evalCtx := tree.MakeTestingEvalContext(s.ClusterSettings()) + st := s.ClusterSettings() + evalCtx := tree.MakeTestingEvalContext(st) defer evalCtx.Stop(ctx) flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{ - Settings: s.ClusterSettings(), - RangeCache: rangecache.NewRangeCache(s.ClusterSettings(), nil, func() int64 { return 2 << 10 }, s.Stopper()), + Settings: st, + RangeCache: rangecache.NewRangeCache(s.ClusterSettings(), nil, + func() int64 { return 2 << 10 }, s.Stopper(), s.Tracer().(*tracing.Tracer)), }, Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), NodeID: evalCtx.NodeID, @@ -372,13 +374,15 @@ func TestLimitScans(t *testing.T) { tableDesc := catalogkv.TestingGetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") - evalCtx := tree.MakeTestingEvalContext(s.ClusterSettings()) + st := s.ClusterSettings() + evalCtx := tree.MakeTestingEvalContext(st) defer evalCtx.Stop(ctx) flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{ - Settings: s.ClusterSettings(), - RangeCache: rangecache.NewRangeCache(s.ClusterSettings(), nil, func() int64 { return 2 << 10 }, s.Stopper()), + Settings: st, + RangeCache: rangecache.NewRangeCache(s.ClusterSettings(), nil, + func() int64 { return 2 << 10 }, s.Stopper(), s.Tracer().(*tracing.Tracer)), }, Txn: kv.NewTxn(ctx, kvDB, s.NodeID()), NodeID: evalCtx.NodeID, @@ -476,7 +480,8 @@ func BenchmarkTableReader(b *testing.B) { s, sqlDB, kvDB := serverutils.StartServer(b, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - evalCtx := tree.MakeTestingEvalContext(s.ClusterSettings()) + st := s.ClusterSettings() + evalCtx := tree.MakeTestingEvalContext(st) defer evalCtx.Stop(ctx) const numCols = 2 @@ -492,8 +497,9 @@ func BenchmarkTableReader(b *testing.B) { flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{ - Settings: s.ClusterSettings(), - RangeCache: rangecache.NewRangeCache(s.ClusterSettings(), nil, func() int64 { return 2 << 10 }, s.Stopper()), + Settings: st, + RangeCache: rangecache.NewRangeCache(s.ClusterSettings(), nil, + func() int64 { return 2 << 10 }, s.Stopper(), s.Tracer().(*tracing.Tracer)), }, Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), NodeID: evalCtx.NodeID,