Skip to content

Commit

Permalink
Merge pull request cockroachdb#88315 from cockroachdb/blathers/backpo…
Browse files Browse the repository at this point in the history
…rt-release-22.2-86492

release-22.2: bulk: perform meta lookup on range cache miss during index backfill
  • Loading branch information
nvanbenschoten authored Nov 23, 2022
2 parents 1260776 + 56010d1 commit fe3ec49
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 20 deletions.
9 changes: 4 additions & 5 deletions pkg/kv/bulk/sst_batcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -360,15 +360,14 @@ func (b *SSTBatcher) flushIfNeeded(ctx context.Context, nextKey roachpb.Key) err
if !b.flushKeyChecked && b.rc != nil {
b.flushKeyChecked = true
if k, err := keys.Addr(nextKey); err != nil {
log.Warningf(ctx, "failed to get RKey for flush key lookup")
log.Warningf(ctx, "failed to get RKey for flush key lookup: %v", err)
} else {
r := b.rc.GetCached(ctx, k, false /* inverted */)
if r != nil {
if r, err := b.rc.Lookup(ctx, k); err != nil {
log.Warningf(ctx, "failed to lookup range cache entry for key %v: %v", k, err)
} else {
k := r.Desc().EndKey.AsRawKey()
b.flushKey = k
log.VEventf(ctx, 3, "%s building sstable that will flush before %v", b.name, k)
} else {
log.VEventf(ctx, 2, "%s no cached range desc available to determine sst flush key", b.name)
}
}
}
Expand Down
27 changes: 12 additions & 15 deletions pkg/kv/bulk/sst_batcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,27 +277,24 @@ func runTestImport(t *testing.T, batchSizeValue int64) {
return encoding.EncodeStringAscending(append([]byte{}, prefix...), fmt.Sprintf("k%d", i))
}

t.Logf("splitting at %s and %s", key(split1), key(split2))
t.Logf("splitting at %s", key(split1))
require.NoError(t, kvDB.AdminSplit(ctx, key(split1), hlc.MaxTimestamp /* expirationTime */))
require.NoError(t, kvDB.AdminSplit(ctx, key(split2), hlc.MaxTimestamp /* expirationTime */))

// We want to make sure our range-aware batching knows about one of our
// splits to exercise that codepath, but we also want to make sure we
// splits to exercise that code path, but we also want to make sure we
// still handle an unexpected split, so we make our own range cache and
// only populate it with one of our two splits.
mockCache := rangecache.NewRangeCache(s.ClusterSettings(), nil,
// populate it after the first split but before the second split.
ds := s.DistSenderI().(*kvcoord.DistSender)
mockCache := rangecache.NewRangeCache(s.ClusterSettings(), ds,
func() int64 { return 2 << 10 }, s.Stopper(), s.TracerI().(*tracing.Tracer))
addr, err := keys.Addr(key(0))
require.NoError(t, err)

tok, err := s.DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache().LookupWithEvictionToken(
ctx, addr, rangecache.EvictionToken{}, false)
require.NoError(t, err)

r := roachpb.RangeInfo{
Desc: *tok.Desc(),
for _, k := range []int{0, split1} {
ent, err := ds.RangeDescriptorCache().Lookup(ctx, keys.MustAddr(key(k)))
require.NoError(t, err)
mockCache.Insert(ctx, roachpb.RangeInfo{Desc: *ent.Desc()})
}
mockCache.Insert(ctx, r)

t.Logf("splitting at %s", key(split2))
require.NoError(t, kvDB.AdminSplit(ctx, key(split2), hlc.MaxTimestamp /* expirationTime */))

ts := hlc.Timestamp{WallTime: 100}
mem := mon.NewUnlimitedMonitor(ctx, "lots", mon.MemoryResource, nil, nil, 0, nil)
Expand Down

0 comments on commit fe3ec49

Please sign in to comment.