diff --git a/pkg/kv/bulk/sst_batcher.go b/pkg/kv/bulk/sst_batcher.go index 242a63e232c9..7bce386210e0 100644 --- a/pkg/kv/bulk/sst_batcher.go +++ b/pkg/kv/bulk/sst_batcher.go @@ -361,15 +361,14 @@ func (b *SSTBatcher) flushIfNeeded(ctx context.Context, nextKey roachpb.Key) err if !b.flushKeyChecked && b.rc != nil { b.flushKeyChecked = true if k, err := keys.Addr(nextKey); err != nil { - log.Warningf(ctx, "failed to get RKey for flush key lookup") + log.Warningf(ctx, "failed to get RKey for flush key lookup: %v", err) } else { - r := b.rc.GetCached(ctx, k, false /* inverted */) - if r != nil { + if r, err := b.rc.Lookup(ctx, k); err != nil { + log.Warningf(ctx, "failed to lookup range cache entry for key %v: %v", k, err) + } else { k := r.Desc().EndKey.AsRawKey() b.flushKey = k log.VEventf(ctx, 3, "%s building sstable that will flush before %v", b.name, k) - } else { - log.VEventf(ctx, 2, "%s no cached range desc available to determine sst flush key", b.name) } } } diff --git a/pkg/kv/bulk/sst_batcher_test.go b/pkg/kv/bulk/sst_batcher_test.go index 588c8f5f9779..e29f5a40cd8a 100644 --- a/pkg/kv/bulk/sst_batcher_test.go +++ b/pkg/kv/bulk/sst_batcher_test.go @@ -277,27 +277,24 @@ func runTestImport(t *testing.T, batchSizeValue int64) { return encoding.EncodeStringAscending(append([]byte{}, prefix...), fmt.Sprintf("k%d", i)) } - t.Logf("splitting at %s and %s", key(split1), key(split2)) + t.Logf("splitting at %s", key(split1)) require.NoError(t, kvDB.AdminSplit(ctx, key(split1), hlc.MaxTimestamp /* expirationTime */)) - require.NoError(t, kvDB.AdminSplit(ctx, key(split2), hlc.MaxTimestamp /* expirationTime */)) // We want to make sure our range-aware batching knows about one of our - // splits to exercise that codepath, but we also want to make sure we + // splits to exercise that code path, but we also want to make sure we // still handle an unexpected split, so we make our own range cache and - // only populate it with one of our two splits. - mockCache := rangecache.NewRangeCache(s.ClusterSettings(), nil, + // populate it after the first split but before the second split. + ds := s.DistSenderI().(*kvcoord.DistSender) + mockCache := rangecache.NewRangeCache(s.ClusterSettings(), ds, func() int64 { return 2 << 10 }, s.Stopper(), s.TracerI().(*tracing.Tracer)) - addr, err := keys.Addr(key(0)) - require.NoError(t, err) - - tok, err := s.DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache().LookupWithEvictionToken( - ctx, addr, rangecache.EvictionToken{}, false) - require.NoError(t, err) - - r := roachpb.RangeInfo{ - Desc: *tok.Desc(), + for _, k := range []int{0, split1} { + ent, err := ds.RangeDescriptorCache().Lookup(ctx, keys.MustAddr(key(k))) + require.NoError(t, err) + mockCache.Insert(ctx, roachpb.RangeInfo{Desc: *ent.Desc()}) } - mockCache.Insert(ctx, r) + + t.Logf("splitting at %s", key(split2)) + require.NoError(t, kvDB.AdminSplit(ctx, key(split2), hlc.MaxTimestamp /* expirationTime */)) ts := hlc.Timestamp{WallTime: 100} mem := mon.NewUnlimitedMonitor(ctx, "lots", mon.MemoryResource, nil, nil, 0, nil)