diff --git a/compaction.go b/compaction.go index 4c27585064..13c37d6eda 100644 --- a/compaction.go +++ b/compaction.go @@ -537,7 +537,7 @@ func newCompaction(pc *pickedCompaction, opts *Options, bytesCompacted *uint64) // are the grandparent sstables). if c.outputLevel.level+1 < numLevels { c.grandparents = c.version.Overlaps(c.outputLevel.level+1, c.cmp, - c.smallest.UserKey, c.largest.UserKey) + c.smallest.UserKey, c.largest.UserKey, c.largest.IsExclusiveSentinel()) } c.setupInuseKeyRanges() @@ -718,8 +718,8 @@ func newFlush( if opts.FlushSplitBytes > 0 { c.maxOutputFileSize = uint64(opts.Level(0).TargetFileSize) c.maxOverlapBytes = maxGrandparentOverlapBytes(opts, 0) - c.grandparents = c.version.Overlaps(baseLevel, c.cmp, - c.smallest.UserKey, c.largest.UserKey) + c.grandparents = c.version.Overlaps(baseLevel, c.cmp, c.smallest.UserKey, + c.largest.UserKey, c.largest.IsExclusiveSentinel()) adjustGrandparentOverlapBytesForFlush(c, flushingBytes) } @@ -752,7 +752,10 @@ func calculateInuseKeyRanges( } for ; level < numLevels; level++ { - overlaps := v.Overlaps(level, cmp, smallest, largest) + // NB: We always treat `largest` as inclusive for simplicity, because + // there's little consequence to calculating slightly broader in-use key + // ranges. + overlaps := v.Overlaps(level, cmp, smallest, largest, false /* exclusiveEnd */) iter := overlaps.Iter() // We may already have in-use key ranges from higher levels. Iterate @@ -898,7 +901,7 @@ func (c *compaction) errorOnUserKeyOverlap(ve *versionEdit) error { if n := len(ve.NewFiles); n > 1 { meta := ve.NewFiles[n-1].Meta prevMeta := ve.NewFiles[n-2].Meta - if prevMeta.Largest.Trailer != InternalKeyRangeDeleteSentinel && + if !prevMeta.Largest.IsExclusiveSentinel() && c.cmp(prevMeta.Largest.UserKey, meta.Smallest.UserKey) >= 0 { return errors.Errorf("pebble: compaction split user key across two sstables: %s in %s and %s", prevMeta.Largest.Pretty(c.formatKey), @@ -1694,6 +1697,8 @@ func (d *DB) maybeScheduleCompactionPicker( // into the same snapshot stripe, a delete-only compaction may delete any // sstables within the range. type deleteCompactionHint struct { + // start and end are user keys specifying a key range [start, end) of + // deleted keys. start []byte end []byte // The level of the file containing the range tombstone(s) when the hint @@ -1742,7 +1747,7 @@ func (h *deleteCompactionHint) canDelete(cmp Compare, m *fileMetadata, snapshots return false } - // The file's keys must be completely contianed within the hint range. + // The file's keys must be completely contained within the hint range. return cmp(h.start, m.Smallest.UserKey) <= 0 && cmp(m.Largest.UserKey, h.end) < 0 } @@ -1854,7 +1859,7 @@ func checkDeleteCompactionHints( // The hint h will be resolved and dropped, regardless of whether // there are any tables that can be deleted. for l := h.tombstoneLevel + 1; l < numLevels; l++ { - overlaps := v.Overlaps(l, cmp, h.start, h.end) + overlaps := v.Overlaps(l, cmp, h.start, h.end, true /* exclusiveEnd */) iter := overlaps.Iter() for m := iter.First(); m != nil; m = iter.Next() { if m.Compacting || !h.canDelete(cmp, m, snapshots) || files[m] { diff --git a/compaction_iter.go b/compaction_iter.go index 0c1677c7d5..c93b42bb09 100644 --- a/compaction_iter.go +++ b/compaction_iter.go @@ -5,6 +5,7 @@ package pebble import ( + "fmt" "io" "sort" "strconv" @@ -456,9 +457,9 @@ func (i *compactionIter) skipInStripe() { func (i *compactionIter) iterNext() bool { i.iterKey, i.iterValue = i.iter.Next() - // We should never see a range delete sentinel in the compaction input. - if i.iterKey != nil && i.iterKey.Trailer == InternalKeyRangeDeleteSentinel { - panic("pebble: unexpected range delete sentinel in compaction input") + // We should never see an exclusive sentinel in the compaction input. + if i.iterKey != nil && i.iterKey.IsExclusiveSentinel() { + panic(fmt.Sprintf("pebble: unexpected exclusive sentinel in compaction input, trailer = %x", i.iterKey.Trailer)) } return i.iterKey != nil } diff --git a/compaction_picker.go b/compaction_picker.go index b7a7647084..6a3cd47726 100644 --- a/compaction_picker.go +++ b/compaction_picker.go @@ -217,7 +217,8 @@ func (pc *pickedCompaction) setupInputs(opts *Options, diskAvailBytes uint64) bo // sstables, and then expand those tables to a clean cut. No need to do // this for intra-L0 compactions; outputLevel.files is left empty for those. if pc.startLevel.level != pc.outputLevel.level { - pc.outputLevel.files = pc.version.Overlaps(pc.outputLevel.level, pc.cmp, pc.smallest.UserKey, pc.largest.UserKey) + pc.outputLevel.files = pc.version.Overlaps(pc.outputLevel.level, pc.cmp, pc.smallest.UserKey, + pc.largest.UserKey, pc.largest.IsExclusiveSentinel()) pc.outputLevel.files, isCompacting = expandToAtomicUnit(pc.cmp, pc.outputLevel.files, false /* disableIsCompacting */) if isCompacting { return false @@ -303,7 +304,8 @@ func (pc *pickedCompaction) grow(sm, la InternalKey, maxExpandedBytes uint64) bo if pc.outputLevel.files.Empty() { return false } - grow0 := pc.version.Overlaps(pc.startLevel.level, pc.cmp, sm.UserKey, la.UserKey) + grow0 := pc.version.Overlaps(pc.startLevel.level, pc.cmp, sm.UserKey, + la.UserKey, la.IsExclusiveSentinel()) grow0, isCompacting := expandToAtomicUnit(pc.cmp, grow0, false /* disableIsCompacting */) if isCompacting { return false @@ -315,7 +317,8 @@ func (pc *pickedCompaction) grow(sm, la InternalKey, maxExpandedBytes uint64) bo return false } sm1, la1 := manifest.KeyRange(pc.cmp, grow0.Iter()) - grow1 := pc.version.Overlaps(pc.outputLevel.level, pc.cmp, sm1.UserKey, la1.UserKey) + grow1 := pc.version.Overlaps(pc.outputLevel.level, pc.cmp, sm1.UserKey, + la1.UserKey, la1.IsExclusiveSentinel()) grow1, isCompacting = expandToAtomicUnit(pc.cmp, grow1, false /* disableIsCompacting */) if isCompacting { return false @@ -392,12 +395,9 @@ func expandToAtomicUnit( if cmp(prev.Largest.UserKey, cur.Smallest.UserKey) < 0 { break } - if prev.Largest.Trailer == InternalKeyRangeDeleteSentinel { - // The range deletion sentinel key is set for the largest key in a - // table when a range deletion tombstone straddles a table. It - // isn't necessary to include the prev table in the atomic - // compaction unit as prev.largest.UserKey does not actually exist - // in the prev table. + if prev.Largest.IsExclusiveSentinel() { + // The table prev has a largest key indicating that the user key + // prev.largest.UserKey doesn't actually exist in the table. break } // prev.Largest.UserKey == cur.Smallest.UserKey, so we need to @@ -413,12 +413,9 @@ func expandToAtomicUnit( if cmp(cur.Largest.UserKey, next.Smallest.UserKey) < 0 { break } - if cur.Largest.Trailer == InternalKeyRangeDeleteSentinel { - // The range deletion sentinel key is set for the largest key - // in a table when a range deletion tombstone straddles a - // table. It isn't necessary to include the next table in the - // compaction as PeekPrev().Largest.UserKey does not actually - // exist in the table. + if cur.Largest.IsExclusiveSentinel() { + // The table cur has a largest key indicating that the user key + // cur.largest.UserKey doesn't actually exist in the table. break } // cur.Largest.UserKey == next.Smallest.UserKey, so we need to @@ -1169,7 +1166,8 @@ func pickAutoHelper( if pc.startLevel.level == 0 { cmp := opts.Comparer.Compare smallest, largest := manifest.KeyRange(cmp, pc.startLevel.files.Iter()) - pc.startLevel.files = vers.Overlaps(0, cmp, smallest.UserKey, largest.UserKey) + pc.startLevel.files = vers.Overlaps(0, cmp, smallest.UserKey, + largest.UserKey, largest.IsExclusiveSentinel()) if pc.startLevel.files.Empty() { panic("pebble: empty compaction") } @@ -1383,7 +1381,8 @@ func pickManualHelper( pc = newPickedCompaction(opts, vers, manual.level, baseLevel) manual.outputLevel = pc.outputLevel.level cmp := opts.Comparer.Compare - pc.startLevel.files = vers.Overlaps(manual.level, cmp, manual.start.UserKey, manual.end.UserKey) + pc.startLevel.files = vers.Overlaps(manual.level, cmp, manual.start.UserKey, + manual.end.UserKey, manual.end.IsExclusiveSentinel()) if pc.startLevel.files.Empty() { // Nothing to do return nil @@ -1415,7 +1414,7 @@ func (p *compactionPickerByScore) pickReadTriggeredCompaction( func pickReadTriggeredCompactionHelper( p *compactionPickerByScore, rc *readCompaction, env compactionEnv) (pc *pickedCompaction) { cmp := p.opts.Comparer.Compare - overlapSlice := p.vers.Overlaps(rc.level, cmp, rc.start, rc.end) + overlapSlice := p.vers.Overlaps(rc.level, cmp, rc.start, rc.end, false /* exclusiveEnd */) if overlapSlice.Empty() { // If there is no overlap, then the file with the key range // must have been compacted away. So, we don't proceed to @@ -1448,8 +1447,8 @@ func pickReadTriggeredCompactionHelper( // Prevent read compactions which are too wide. outputOverlaps := pc.version.Overlaps( - pc.outputLevel.level, pc.cmp, pc.smallest.UserKey, pc.largest.UserKey, - ) + pc.outputLevel.level, pc.cmp, pc.smallest.UserKey, + pc.largest.UserKey, pc.largest.IsExclusiveSentinel()) if outputOverlaps.SizeSum() > pc.maxReadCompactionBytes { return nil } diff --git a/compaction_picker_test.go b/compaction_picker_test.go index e1b5babe76..5500ab29dd 100644 --- a/compaction_picker_test.go +++ b/compaction_picker_test.go @@ -1098,7 +1098,7 @@ func TestPickedCompactionSetupInputs(t *testing.T) { } pc.version = newVersion(opts, files) pc.startLevel.files = pc.version.Overlaps(pc.startLevel.level, pc.cmp, - []byte(args[0].String()), []byte(args[1].String())) + []byte(args[0].String()), []byte(args[1].String()), false /* exclusiveEnd */) var isCompacting bool if !pc.setupInputs(opts, availBytes) { diff --git a/compaction_test.go b/compaction_test.go index 4858bcd8fd..7f5d722584 100644 --- a/compaction_test.go +++ b/compaction_test.go @@ -1721,7 +1721,8 @@ func TestCompactionDeleteOnlyHints(t *testing.T) { if !force { // Find the file in the current version. v := d.mu.versions.currentVersion() - overlaps := v.Overlaps(tombstoneLevel, d.opts.Comparer.Compare, start, end) + overlaps := v.Overlaps(tombstoneLevel, d.opts.Comparer.Compare, start, + end, true /* exclusiveEnd */) iter := overlaps.Iter() for m := iter.First(); m != nil; m = iter.Next() { if m.FileNum.String() == parts[1] { diff --git a/db.go b/db.go index cb0d46ef05..32bca168f7 100644 --- a/db.go +++ b/db.go @@ -1126,7 +1126,7 @@ func (d *DB) Compact( maxLevelWithFiles := 1 cur := d.mu.versions.currentVersion() for level := 0; level < numLevels; level++ { - overlaps := cur.Overlaps(level, d.cmp, start, end) + overlaps := cur.Overlaps(level, d.cmp, start, end, iEnd.IsExclusiveSentinel()) if !overlaps.Empty() { maxLevelWithFiles = level + 1 } @@ -1400,7 +1400,7 @@ func (d *DB) EstimateDiskUsage(start, end []byte) (uint64, error) { // We can only use `Overlaps` to restrict `files` at L1+ since at L0 it // expands the range iteratively until it has found a set of files that // do not overlap any other L0 files outside that set. - overlaps := readState.current.Overlaps(level, d.opts.Comparer.Compare, start, end) + overlaps := readState.current.Overlaps(level, d.opts.Comparer.Compare, start, end, false /* exclusiveEnd */) iter = overlaps.Iter() } for file := iter.First(); file != nil; file = iter.Next() { diff --git a/ingest.go b/ingest.go index 65d47311b7..8f72f55341 100644 --- a/ingest.go +++ b/ingest.go @@ -428,7 +428,8 @@ func ingestTargetLevel( } // Check boundary overlap. - boundaryOverlaps := v.Overlaps(level, cmp, meta.Smallest.UserKey, meta.Largest.UserKey) + boundaryOverlaps := v.Overlaps(level, cmp, meta.Smallest.UserKey, + meta.Largest.UserKey, meta.Largest.IsExclusiveSentinel()) if !boundaryOverlaps.Empty() { continue } diff --git a/internal/base/internal.go b/internal/base/internal.go index 70cea88a46..dda48069d7 100644 --- a/internal/base/internal.go +++ b/internal/base/internal.go @@ -143,7 +143,7 @@ func MakeInternalKey(userKey []byte, seqNum uint64, kind InternalKeyKind) Intern } // MakeSearchKey constructs an internal key that is appropriate for searching -// for a the specified user key. The search key contain the maximual sequence +// for a the specified user key. The search key contain the maximal sequence // number and kind ensuring that it sorts before any other internal keys for // the same user key. func MakeSearchKey(userKey []byte) InternalKey { @@ -342,6 +342,15 @@ func (k InternalKey) Pretty(f FormatKey) fmt.Formatter { return prettyInternalKey{k, f} } +// IsExclusiveSentinel returns whether this internal key excludes point keys +// with the same user key if used as an end boundary. See the comment on +// InternalKeyRangeDeletionSentinel. +func (k InternalKey) IsExclusiveSentinel() bool { + // TODO(jackson): This may need to change to include separate sentinels for + // range key unsets and deletes. + return k.Trailer == InternalKeyRangeDeleteSentinel || k.Trailer == InternalKeyBoundaryRangeKey +} + type prettyInternalKey struct { InternalKey formatKey FormatKey diff --git a/internal/manifest/l0_sublevels.go b/internal/manifest/l0_sublevels.go index c7e94c937f..b41fa4800d 100644 --- a/internal/manifest/l0_sublevels.go +++ b/internal/manifest/l0_sublevels.go @@ -238,7 +238,7 @@ func NewL0Sublevels( keys = append(keys, intervalKey{key: f.Smallest.UserKey}) keys = append(keys, intervalKey{ key: f.Largest.UserKey, - isLargest: f.Largest.Trailer != base.InternalKeyRangeDeleteSentinel, + isLargest: !f.Largest.IsExclusiveSentinel(), }) } keys = sortAndDedup(keys, cmp) @@ -269,7 +269,7 @@ func NewL0Sublevels( cmp, intervalKey{ key: f.Largest.UserKey, - isLargest: f.Largest.Trailer != base.InternalKeyRangeDeleteSentinel}, + isLargest: !f.Largest.IsExclusiveSentinel()}, keys[f.minIntervalIndex+index]) <= 0 }) if f.maxIntervalIndex == len(keys) { @@ -381,7 +381,7 @@ func (s *L0Sublevels) InitCompactingFileInfo(inProgress []L0Compaction) { // compacting. for _, c := range inProgress { startIK := intervalKey{key: c.Smallest.UserKey, isLargest: false} - endIK := intervalKey{key: c.Largest.UserKey, isLargest: c.Largest.Trailer != base.InternalKeyRangeDeleteSentinel} + endIK := intervalKey{key: c.Largest.UserKey, isLargest: !c.Largest.IsExclusiveSentinel()} start := sort.Search(len(s.orderedIntervals), func(i int) bool { return intervalKeyCompare(s.cmp, s.orderedIntervals[i].startKey, startIK) >= 0 }) diff --git a/internal/manifest/l0_sublevels_test.go b/internal/manifest/l0_sublevels_test.go index 290199063e..cf5be36057 100644 --- a/internal/manifest/l0_sublevels_test.go +++ b/internal/manifest/l0_sublevels_test.go @@ -161,7 +161,7 @@ func visualizeSublevels( buf.WriteByte(middleChar) lastChar++ } - if f.Largest.Trailer == base.InternalKeyRangeDeleteSentinel && + if f.Largest.IsExclusiveSentinel() && j < len(files)-1 && files[j+1].Smallest.UserKey[0] == f.Largest.UserKey[0] { // This case happens where two successive files have // matching end/start user keys but where the left-side file diff --git a/internal/manifest/level_metadata.go b/internal/manifest/level_metadata.go index 817bbfbb74..153b8a014e 100644 --- a/internal/manifest/level_metadata.go +++ b/internal/manifest/level_metadata.go @@ -72,7 +72,8 @@ func (lm *LevelMetadata) Slice() LevelSlice { // key-sorted (eg, non-L0). func (lm *LevelMetadata) Find(cmp base.Compare, m *FileMetadata) *LevelFile { // TODO(jackson): Add an assertion that lm is key-sorted. - o := overlaps(lm.Iter(), cmp, m.Smallest.UserKey, m.Largest.UserKey) + o := overlaps(lm.Iter(), cmp, m.Smallest.UserKey, + m.Largest.UserKey, m.Largest.IsExclusiveSentinel()) iter := o.Iter() for f := iter.First(); f != nil; f = iter.Next() { if f == m { diff --git a/internal/manifest/version.go b/internal/manifest/version.go index de7d657af8..b0cc6114d5 100644 --- a/internal/manifest/version.go +++ b/internal/manifest/version.go @@ -242,22 +242,44 @@ func SortBySmallest(files []*FileMetadata, cmp Compare) { sort.Sort(bySmallest{files, cmp}) } -func overlaps(iter LevelIterator, cmp Compare, start, end []byte) LevelSlice { +func overlaps(iter LevelIterator, cmp Compare, start, end []byte, exclusiveEnd bool) LevelSlice { startIter := iter.Clone() startIter.SeekGE(cmp, start) + // SeekGE compares user keys. The user key `start` may be equal to the + // f.Largest because f.Largest is a range deletion sentinel, indicating that + // the user key `start` is NOT contained within the file f. If that's the + // case, we can narrow the overlapping bounds to exclude the file with the + // sentinel. + if f := startIter.Current(); f != nil && f.Largest.IsExclusiveSentinel() && + cmp(f.Largest.UserKey, start) == 0 { + startIter.Next() + } + endIter := iter.Clone() endIter.SeekGE(cmp, end) - // endIter is now pointing at the *first* file with a largest key >= end. - // If there are multiple files including the user key `end`, we want all - // of them, so move forward. - for endIter.Current() != nil && cmp(endIter.Current().Largest.UserKey, end) == 0 { - endIter.Next() + if !exclusiveEnd { + // endIter is now pointing at the *first* file with a largest key >= end. + // If there are multiple files including the user key `end`, we want all + // of them, so move forward. + for f := endIter.Current(); f != nil && cmp(f.Largest.UserKey, end) == 0; { + f = endIter.Next() + } } + // LevelSlice uses inclusive bounds, so if we seeked to the end sentinel // or nexted too far because Largest.UserKey equaled `end`, go back. - if !endIter.iter.valid() || cmp(endIter.Current().Smallest.UserKey, end) > 0 { + // + // Consider !exclusiveEnd and end = 'f', with the following file bounds: + // + // [b,d] [e, f] [f, f] [g, h] + // + // the above for loop will Next until it arrives at [g, h]. We need to + // observe that g > f, and Prev to the file with bounds [f, f]. + if !endIter.iter.valid() { + endIter.Prev() + } else if c := cmp(endIter.Current().Smallest.UserKey, end); c > 0 || c == 0 && exclusiveEnd { endIter.Prev() } @@ -484,7 +506,8 @@ func (v *Version) InitL0Sublevels( func (v *Version) Contains(level int, cmp Compare, m *FileMetadata) bool { iter := v.Levels[level].Iter() if level > 0 { - overlaps := v.Overlaps(level, cmp, m.Smallest.UserKey, m.Largest.UserKey) + overlaps := v.Overlaps(level, cmp, m.Smallest.UserKey, m.Largest.UserKey, + m.Largest.IsExclusiveSentinel()) iter = overlaps.Iter() } for f := iter.First(); f != nil; f = iter.Next() { @@ -503,7 +526,7 @@ func (v *Version) Contains(level int, cmp Compare, m *FileMetadata) bool { // and the computation is repeated until [start, end] stabilizes. // The returned files are a subsequence of the input files, i.e., the ordering // is not changed. -func (v *Version) Overlaps(level int, cmp Compare, start, end []byte) LevelSlice { +func (v *Version) Overlaps(level int, cmp Compare, start, end []byte, exclusiveEnd bool) LevelSlice { if level == 0 { // Indices that have been selected as overlapping. l0 := v.Levels[level] @@ -520,11 +543,11 @@ func (v *Version) Overlaps(level int, cmp Compare, start, end []byte) LevelSlice } smallest := meta.Smallest.UserKey largest := meta.Largest.UserKey - if cmp(largest, start) < 0 { + if c := cmp(largest, start); c < 0 || c == 0 && meta.Largest.IsExclusiveSentinel() { // meta is completely before the specified range; skip it. continue } - if cmp(smallest, end) > 0 { + if c := cmp(smallest, end); c > 0 || c == 0 && exclusiveEnd { // meta is completely after the specified range; skip it. continue } @@ -571,7 +594,7 @@ func (v *Version) Overlaps(level int, cmp Compare, start, end []byte) LevelSlice return slice } - return overlaps(v.Levels[level].Iter(), cmp, start, end) + return overlaps(v.Levels[level].Iter(), cmp, start, end, exclusiveEnd) } // CheckOrdering checks that the files are consistent with respect to diff --git a/internal/manifest/version_edit.go b/internal/manifest/version_edit.go index 0367b5d03c..c0781b45cc 100644 --- a/internal/manifest/version_edit.go +++ b/internal/manifest/version_edit.go @@ -608,7 +608,8 @@ func (b *BulkVersionEdit) Apply( // Check consistency of the level in the vicinity of our edits. if sm != nil && la != nil { - overlap := overlaps(v.Levels[level].Iter(), cmp, sm.Smallest.UserKey, la.Largest.UserKey) + overlap := overlaps(v.Levels[level].Iter(), cmp, sm.Smallest.UserKey, + la.Largest.UserKey, la.Largest.IsExclusiveSentinel()) // overlap contains all of the added files. We want to ensure that // the added files are consistent with neighboring existing files // too, so reslice the overlap to pull in a neighbor on each side. diff --git a/internal/manifest/version_test.go b/internal/manifest/version_test.go index f918a1d4d4..381de2070f 100644 --- a/internal/manifest/version_test.go +++ b/internal/manifest/version_test.go @@ -141,30 +141,39 @@ func TestOverlaps(t *testing.T) { m10 := &FileMetadata{ FileNum: 710, Size: 1, - Smallest: base.ParseInternalKey("d.SET.7108"), - Largest: base.ParseInternalKey("g.SET.7109"), + Smallest: base.ParseInternalKey("a.SET.7140"), + Largest: base.InternalKey{ + UserKey: []byte("d"), + Trailer: base.InternalKeyRangeDeleteSentinel, + }, } m11 := &FileMetadata{ FileNum: 711, Size: 1, - Smallest: base.ParseInternalKey("g.SET.7118"), - Largest: base.ParseInternalKey("j.SET.7119"), + Smallest: base.ParseInternalKey("d.SET.7108"), + Largest: base.ParseInternalKey("g.SET.7109"), } m12 := &FileMetadata{ FileNum: 712, Size: 1, - Smallest: base.ParseInternalKey("n.SET.7128"), - Largest: base.ParseInternalKey("p.SET.7129"), + Smallest: base.ParseInternalKey("g.SET.7118"), + Largest: base.ParseInternalKey("j.SET.7119"), } m13 := &FileMetadata{ FileNum: 713, Size: 1, - Smallest: base.ParseInternalKey("p.SET.7148"), - Largest: base.ParseInternalKey("p.SET.7149"), + Smallest: base.ParseInternalKey("n.SET.7128"), + Largest: base.ParseInternalKey("p.SET.7129"), } m14 := &FileMetadata{ FileNum: 714, Size: 1, + Smallest: base.ParseInternalKey("p.SET.7148"), + Largest: base.ParseInternalKey("p.SET.7149"), + } + m15 := &FileMetadata{ + FileNum: 715, + Size: 1, Smallest: base.ParseInternalKey("p.SET.7138"), Largest: base.ParseInternalKey("u.SET.7139"), } @@ -172,13 +181,14 @@ func TestOverlaps(t *testing.T) { v := Version{ Levels: [NumLevels]LevelMetadata{ 0: levelMetadata(0, m00, m01, m02, m03, m04, m05, m06, m07), - 1: levelMetadata(1, m10, m11, m12, m13, m14), + 1: levelMetadata(1, m10, m11, m12, m13, m14, m15), }, } testCases := []struct { level int ukey0, ukey1 string + exclusiveEnd bool want string }{ // Level 0: m00=b-e, m01=c-f, m02=f-g, m03=x-y, m04=n-p, m05=p-p, m06=p-u, m07=r-s. @@ -188,68 +198,74 @@ func TestOverlaps(t *testing.T) { // - m06 contains m07, // - m00, m01 and m02 transitively overlap/touch each other, and // - m04, m05, m06 and m07 transitively overlap/touch each other. - {0, "a", "a", ""}, - {0, "a", "b", "m00 m01 m02"}, - {0, "a", "d", "m00 m01 m02"}, - {0, "a", "e", "m00 m01 m02"}, - {0, "a", "g", "m00 m01 m02"}, - {0, "a", "z", "m00 m01 m02 m03 m04 m05 m06 m07"}, - {0, "c", "e", "m00 m01 m02"}, - {0, "d", "d", "m00 m01 m02"}, - {0, "g", "n", "m00 m01 m02 m04 m05 m06 m07"}, - {0, "h", "i", ""}, - {0, "h", "o", "m04 m05 m06 m07"}, - {0, "h", "u", "m04 m05 m06 m07"}, - {0, "k", "l", ""}, - {0, "k", "o", "m04 m05 m06 m07"}, - {0, "k", "p", "m04 m05 m06 m07"}, - {0, "n", "o", "m04 m05 m06 m07"}, - {0, "n", "z", "m03 m04 m05 m06 m07"}, - {0, "o", "z", "m03 m04 m05 m06 m07"}, - {0, "p", "z", "m03 m04 m05 m06 m07"}, - {0, "q", "z", "m03 m04 m05 m06 m07"}, - {0, "r", "s", "m04 m05 m06 m07"}, - {0, "r", "z", "m03 m04 m05 m06 m07"}, - {0, "s", "z", "m03 m04 m05 m06 m07"}, - {0, "u", "z", "m03 m04 m05 m06 m07"}, - {0, "y", "z", "m03"}, - {0, "z", "z", ""}, - - // Level 1: m10=d-g, m11=g-j, m12=n-p, m13=p-p, m14=p-u. - {1, "a", "a", ""}, - {1, "a", "b", ""}, - {1, "a", "d", "m10"}, - {1, "a", "e", "m10"}, - {1, "a", "g", "m10 m11"}, - {1, "a", "z", "m10 m11 m12 m13 m14"}, - {1, "c", "e", "m10"}, - {1, "d", "d", "m10"}, - {1, "g", "n", "m10 m11 m12"}, - {1, "h", "i", "m11"}, - {1, "h", "o", "m11 m12"}, - {1, "h", "u", "m11 m12 m13 m14"}, - {1, "k", "l", ""}, - {1, "k", "o", "m12"}, - {1, "k", "p", "m12 m13 m14"}, - {1, "n", "o", "m12"}, - {1, "n", "z", "m12 m13 m14"}, - {1, "o", "z", "m12 m13 m14"}, - {1, "p", "z", "m12 m13 m14"}, - {1, "q", "z", "m14"}, - {1, "r", "s", "m14"}, - {1, "r", "z", "m14"}, - {1, "s", "z", "m14"}, - {1, "u", "z", "m14"}, - {1, "y", "z", ""}, - {1, "z", "z", ""}, + {0, "a", "a", false, ""}, + {0, "a", "b", false, "m00 m01 m02"}, + {0, "a", "d", false, "m00 m01 m02"}, + {0, "a", "e", false, "m00 m01 m02"}, + {0, "a", "g", false, "m00 m01 m02"}, + {0, "a", "z", false, "m00 m01 m02 m03 m04 m05 m06 m07"}, + {0, "c", "e", false, "m00 m01 m02"}, + {0, "d", "d", false, "m00 m01 m02"}, + {0, "b", "f", true, "m00 m01"}, + {0, "g", "n", false, "m00 m01 m02 m04 m05 m06 m07"}, + {0, "h", "i", false, ""}, + {0, "h", "o", false, "m04 m05 m06 m07"}, + {0, "h", "u", false, "m04 m05 m06 m07"}, + {0, "k", "l", false, ""}, + {0, "k", "o", false, "m04 m05 m06 m07"}, + {0, "k", "p", false, "m04 m05 m06 m07"}, + {0, "n", "o", false, "m04 m05 m06 m07"}, + {0, "n", "z", false, "m03 m04 m05 m06 m07"}, + {0, "o", "z", false, "m03 m04 m05 m06 m07"}, + {0, "p", "z", false, "m03 m04 m05 m06 m07"}, + {0, "q", "z", false, "m03 m04 m05 m06 m07"}, + {0, "r", "s", false, "m04 m05 m06 m07"}, + {0, "r", "z", false, "m03 m04 m05 m06 m07"}, + {0, "s", "z", false, "m03 m04 m05 m06 m07"}, + {0, "u", "z", false, "m03 m04 m05 m06 m07"}, + {0, "y", "z", false, "m03"}, + {0, "z", "z", false, ""}, + + // Level 1: m10=a-d* m11=d-g, m12=g-j, m13=n-p, m14=p-p, m15=p-u. + // d* - exclusive, rangedel sentinel + {1, "a", "a", false, "m10"}, + {1, "a", "b", false, "m10"}, + {1, "a", "d", false, "m10 m11"}, + {1, "a", "e", false, "m10 m11"}, + {1, "a", "g", false, "m10 m11 m12"}, + {1, "a", "g", true, "m10 m11"}, + {1, "a", "z", false, "m10 m11 m12 m13 m14 m15"}, + {1, "c", "e", false, "m10 m11"}, + {1, "d", "d", false, "m11"}, + {1, "g", "n", false, "m11 m12 m13"}, + {1, "h", "i", false, "m12"}, + {1, "h", "n", true, "m12"}, + {1, "h", "n", false, "m12 m13"}, + {1, "h", "o", false, "m12 m13"}, + {1, "h", "u", false, "m12 m13 m14 m15"}, + {1, "k", "l", false, ""}, + {1, "k", "o", false, "m13"}, + {1, "k", "p", false, "m13 m14 m15"}, + {1, "k", "p", true, "m13"}, + {1, "n", "o", false, "m13"}, + {1, "n", "z", false, "m13 m14 m15"}, + {1, "o", "z", false, "m13 m14 m15"}, + {1, "p", "z", false, "m13 m14 m15"}, + {1, "q", "z", false, "m15"}, + {1, "r", "s", false, "m15"}, + {1, "r", "z", false, "m15"}, + {1, "s", "z", false, "m15"}, + {1, "u", "z", false, "m15"}, + {1, "y", "z", false, ""}, + {1, "z", "z", false, ""}, // Level 2: empty. - {2, "a", "z", ""}, + {2, "a", "z", false, ""}, } cmp := base.DefaultComparer.Compare for _, tc := range testCases { - overlaps := v.Overlaps(tc.level, cmp, []byte(tc.ukey0), []byte(tc.ukey1)) + overlaps := v.Overlaps(tc.level, cmp, []byte(tc.ukey0), []byte(tc.ukey1), tc.exclusiveEnd) iter := overlaps.Iter() var s []string for meta := iter.First(); meta != nil; meta = iter.Next() { @@ -257,7 +273,8 @@ func TestOverlaps(t *testing.T) { } got := strings.Join(s, " ") if got != tc.want { - t.Errorf("level=%d, range=%s-%s\ngot %v\nwant %v", tc.level, tc.ukey0, tc.ukey1, got, tc.want) + t.Errorf("level=%d, range=%s-%s (exclusiveEnd = %t)\ngot %v\nwant %v", + tc.level, tc.ukey0, tc.ukey1, tc.exclusiveEnd, got, tc.want) } } } diff --git a/level_checker.go b/level_checker.go index 021cab85a2..f717760bb1 100644 --- a/level_checker.go +++ b/level_checker.go @@ -128,7 +128,7 @@ func (m *simpleMergingIter) step() bool { item := &m.heap.items[0] l := &m.levels[item.index] // Sentinels are not relevant for this point checking. - if item.key.Trailer != InternalKeyRangeDeleteSentinel && item.key.Visible(m.snapshot) { + if !item.key.IsExclusiveSentinel() && item.key.Visible(m.snapshot) { m.numPoints++ keyChanged := m.heap.cmp(item.key.UserKey, m.lastKey.UserKey) != 0 if !keyChanged { diff --git a/level_iter.go b/level_iter.go index a951bd32dd..b93d4be015 100644 --- a/level_iter.go +++ b/level_iter.go @@ -226,7 +226,7 @@ func (l *levelIter) findFileGE(key []byte) *fileMetadata { // (see the comment in that function). m := l.files.SeekGE(l.cmp, key) - for m != nil && m.Largest.Trailer == InternalKeyRangeDeleteSentinel && + for m != nil && m.Largest.IsExclusiveSentinel() && l.cmp(m.Largest.UserKey, key) == 0 { m = l.files.Next() } @@ -354,7 +354,7 @@ func (l *levelIter) loadFile(file *fileMetadata, dir int) loadFileReturnIndicato *l.largestUserKey = file.Largest.UserKey } if l.isLargestUserKeyRangeDelSentinel != nil { - *l.isLargestUserKeyRangeDelSentinel = file.Largest.Trailer == InternalKeyRangeDeleteSentinel + *l.isLargestUserKeyRangeDelSentinel = file.Largest.IsExclusiveSentinel() } return newFileLoaded } diff --git a/merging_iter.go b/merging_iter.go index 40a5c4947b..3b5dda9db8 100644 --- a/merging_iter.go +++ b/merging_iter.go @@ -400,7 +400,7 @@ func (m *mergingIter) switchToMinHeap() { // bound. Instead, we seek it to >= f and Next from there. if l.iterKey == nil || (m.lower != nil && l.isSyntheticIterBoundsKey && - l.iterKey.Trailer == InternalKeyRangeDeleteSentinel && + l.iterKey.IsExclusiveSentinel() && m.heap.cmp(l.iterKey.UserKey, m.lower) <= 0) { if m.lower != nil { l.iterKey, l.iterValue = l.iter.SeekGE(m.lower, false /* trySeekUsingNext */) @@ -475,8 +475,7 @@ func (m *mergingIter) switchToMaxHeap() { // bound. Instead, we seek it to < g, and Prev from there. if l.iterKey == nil || (m.upper != nil && l.isSyntheticIterBoundsKey && - l.iterKey.Trailer == InternalKeyRangeDeleteSentinel && - m.heap.cmp(l.iterKey.UserKey, m.upper) >= 0) { + l.iterKey.IsExclusiveSentinel() && m.heap.cmp(l.iterKey.UserKey, m.upper) >= 0) { if m.upper != nil { l.iterKey, l.iterValue = l.iter.SeekLT(m.upper) } else { diff --git a/range_del_test.go b/range_del_test.go index db1866c3b3..e2d7389c75 100644 --- a/range_del_test.go +++ b/range_del_test.go @@ -46,7 +46,7 @@ func TestRangeDel(t *testing.T) { d.mu.Lock() // Disable the "dynamic base level" code for this test. d.mu.versions.picker.forceBaseLevel1() - s := fmt.Sprintf("mem: %d\n%s", len(d.mu.mem.queue), d.mu.versions.currentVersion()) + s := fmt.Sprintf("mem: %d\n%s", len(d.mu.mem.queue), d.mu.versions.currentVersion().DebugString(opts.Comparer.FormatKey)) d.mu.Unlock() return s @@ -60,7 +60,7 @@ func TestRangeDel(t *testing.T) { d.mu.Lock() // Disable the "dynamic base level" code for this test. d.mu.versions.picker.forceBaseLevel1() - s := d.mu.versions.currentVersion().String() + s := d.mu.versions.currentVersion().DebugString(opts.Comparer.FormatKey) d.mu.Unlock() return s @@ -265,20 +265,22 @@ func TestRangeDelCompactionTruncation(t *testing.T) { if formatVersion < FormatSetWithDelete { expectLSM(` 1: - 000012:[a#3,RANGEDEL-b#72057594037927935,RANGEDEL] + 000008:[a#3,RANGEDEL-b#72057594037927935,RANGEDEL] +2: + 000012:[b#4,SET-b#4,SET] + 000013:[b#3,RANGEDEL-c#72057594037927935,RANGEDEL] 3: - 000017:[b#4,SET-b#4,SET] - 000018:[b#3,RANGEDEL-c#72057594037927935,RANGEDEL] - 000019:[c#5,SET-d#72057594037927935,RANGEDEL] + 000014:[c#5,SET-d#72057594037927935,RANGEDEL] `) } else { expectLSM(` 1: - 000012:[a#3,RANGEDEL-b#72057594037927935,RANGEDEL] + 000008:[a#3,RANGEDEL-b#72057594037927935,RANGEDEL] +2: + 000012:[b#4,SETWITHDEL-b#4,SETWITHDEL] + 000013:[b#3,RANGEDEL-c#72057594037927935,RANGEDEL] 3: - 000017:[b#4,SETWITHDEL-b#4,SETWITHDEL] - 000018:[b#3,RANGEDEL-c#72057594037927935,RANGEDEL] - 000019:[c#5,SETWITHDEL-d#72057594037927935,RANGEDEL] + 000014:[c#5,SET-d#72057594037927935,RANGEDEL] `) } @@ -441,19 +443,20 @@ func TestRangeDelCompactionTruncation3(t *testing.T) { require.NoError(t, d.Compact([]byte("c"), []byte("c\x00"))) expectLSM(` +3: + 000017:[a#3,RANGEDEL-b#2,SET] + 000018:[b#1,RANGEDEL-c#72057594037927935,RANGEDEL] 4: - 000020:[a#3,RANGEDEL-b#2,SET] - 000021:[b#1,RANGEDEL-c#72057594037927935,RANGEDEL] - 000022:[c#4,SET-d#72057594037927935,RANGEDEL] + 000019:[c#4,SET-d#72057594037927935,RANGEDEL] `) require.NoError(t, d.Compact([]byte("c"), []byte("c\x00"))) expectLSM(` +3: + 000017:[a#3,RANGEDEL-b#2,SET] + 000018:[b#1,RANGEDEL-c#72057594037927935,RANGEDEL] 5: - 000023:[a#3,RANGEDEL-b#2,SET] - 000024:[b#1,RANGEDEL-c#72057594037927935,RANGEDEL] - 000025:[c#4,SET-d#72057594037927935,RANGEDEL] -`) + 000019:[c#4,SET-d#72057594037927935,RANGEDEL]`) if _, _, err := d.Get([]byte("b")); err != ErrNotFound { t.Fatalf("expected not found, but found %v", err) @@ -461,11 +464,11 @@ func TestRangeDelCompactionTruncation3(t *testing.T) { require.NoError(t, d.Compact([]byte("a"), []byte("a\x00"))) expectLSM(` +4: + 000020:[a#3,RANGEDEL-b#2,SET] + 000021:[b#1,RANGEDEL-c#72057594037927935,RANGEDEL] 5: - 000025:[c#4,SET-d#72057594037927935,RANGEDEL] -6: - 000026:[a#3,RANGEDEL-b#2,SET] - 000027:[b#1,RANGEDEL-c#72057594037927935,RANGEDEL] + 000019:[c#4,SET-d#72057594037927935,RANGEDEL] `) if v, _, err := d.Get([]byte("b")); err != ErrNotFound { diff --git a/table_stats.go b/table_stats.go index b06ce5f6da..1a4dbb3b75 100644 --- a/table_stats.go +++ b/table_stats.go @@ -341,7 +341,8 @@ func (d *DB) averageEntrySizeBeneath( // summing their value sizes and entry counts. var fileSum, keySum, valSum, entryCount uint64 for l := level + 1; l < numLevels; l++ { - overlaps := v.Overlaps(l, d.cmp, meta.Smallest.UserKey, meta.Largest.UserKey) + overlaps := v.Overlaps(l, d.cmp, meta.Smallest.UserKey, + meta.Largest.UserKey, meta.Largest.IsExclusiveSentinel()) iter := overlaps.Iter() for file := iter.First(); file != nil; file = iter.Next() { err := d.tableCache.withReader(file, func(r *sstable.Reader) (err error) { @@ -382,7 +383,8 @@ func (d *DB) averageEntrySizeBeneath( func (d *DB) estimateSizeBeneath( v *version, level int, meta *fileMetadata, start, end []byte, ) (estimate uint64, hintSeqNum uint64, err error) { - // Find all files in lower levels that overlap with the deleted range. + // Find all files in lower levels that overlap with the deleted range + // [start, end). // // An overlapping file might be completely contained by the range // tombstone, in which case we can count the entire file size in @@ -392,11 +394,12 @@ func (d *DB) estimateSizeBeneath( // additional I/O to read the file's index blocks. hintSeqNum = math.MaxUint64 for l := level + 1; l < numLevels; l++ { - overlaps := v.Overlaps(l, d.cmp, start, end) + overlaps := v.Overlaps(l, d.cmp, start, end, true /* exclusiveEnd */) iter := overlaps.Iter() for file := iter.First(); file != nil; file = iter.Next() { - if d.cmp(start, file.Smallest.UserKey) <= 0 && - d.cmp(file.Largest.UserKey, end) <= 0 { + startCmp := d.cmp(start, file.Smallest.UserKey) + endCmp := d.cmp(file.Largest.UserKey, end) + if startCmp <= 0 && (endCmp < 0 || endCmp == 0 && file.Largest.IsExclusiveSentinel()) { // The range fully contains the file, so skip looking it up in // table cache/looking at its indexes and add the full file size. estimate += file.Size diff --git a/testdata/manual_compaction b/testdata/manual_compaction index ebe49e79c9..d6111e3c17 100644 --- a/testdata/manual_compaction +++ b/testdata/manual_compaction @@ -105,7 +105,7 @@ wait-pending-table-stats num-entries: 2 num-deletions: 1 point-deletions-bytes-estimate: 0 -range-deletions-bytes-estimate: 776 +range-deletions-bytes-estimate: 31 # Same as above, except range tombstone covers multiple grandparent file boundaries. diff --git a/testdata/manual_compaction_set_with_del b/testdata/manual_compaction_set_with_del index 53bd16258d..e73e310266 100644 --- a/testdata/manual_compaction_set_with_del +++ b/testdata/manual_compaction_set_with_del @@ -105,7 +105,7 @@ wait-pending-table-stats num-entries: 2 num-deletions: 1 point-deletions-bytes-estimate: 0 -range-deletions-bytes-estimate: 776 +range-deletions-bytes-estimate: 31 # Same as above, except range tombstone covers multiple grandparent file boundaries. diff --git a/testdata/range_del b/testdata/range_del index 0c90961ab4..bbfa454921 100644 --- a/testdata/range_del +++ b/testdata/range_del @@ -641,9 +641,9 @@ L1 ---- mem: 1 1: - 000004:[a-a] - 000005:[a-a] - 000006:[a-a] + 000004:[a#3,SET-a#3,SET] + 000005:[a#2,SET-a#2,SET] + 000006:[a#1,SET-a#1,SET] get seq=1 a @@ -720,9 +720,9 @@ L1 ---- mem: 1 1: - 000004:[a-a] - 000005:[a-a] - 000006:[a-a] + 000004:[a#3,MERGE-a#3,MERGE] + 000005:[a#2,MERGE-a#2,MERGE] + 000006:[a#1,MERGE-a#1,MERGE] get seq=1 a @@ -803,11 +803,11 @@ L3 ---- mem: 1 1: - 000004:[a-a] + 000004:[a#3,MERGE-a#3,MERGE] 2: - 000005:[a-a] + 000005:[a#2,MERGE-a#2,MERGE] 3: - 000006:[a-a] + 000006:[a#1,MERGE-a#1,MERGE] get seq=1 a @@ -916,13 +916,13 @@ L3 ---- mem: 1 0.0: - 000004:[a-d] + 000004:[a#4,SET-d#4,SET] 1: - 000005:[a-d] + 000005:[a#3,SET-d#3,SET] 2: - 000006:[a-d] + 000006:[a#2,RANGEDEL-d#2,SET] 3: - 000007:[a-d] + 000007:[a#1,SET-d#1,SET] get seq=2 a @@ -1042,11 +1042,11 @@ L2 ---- mem: 1 1: - 000004:[a-b] - 000005:[b-c] - 000006:[c-d] + 000004:[a#2,RANGEDEL-b#72057594037927935,RANGEDEL] + 000005:[b#2,RANGEDEL-c#72057594037927935,RANGEDEL] + 000006:[c#2,RANGEDEL-d#72057594037927935,RANGEDEL] 2: - 000007:[a-d] + 000007:[a#1,SET-d#1,SET] get seq=2 a @@ -1138,9 +1138,9 @@ L2 ---- mem: 1 1: - 000004:[a-b] + 000004:[a#1,RANGEDEL-b#72057594037927935,RANGEDEL] 2: - 000005:[a-a] + 000005:[a#2,SET-a#2,SET] get seq=3 a @@ -1163,23 +1163,23 @@ L0 ---- mem: 1 0.1: - 000005:[a-a] - 000006:[c-c] + 000005:[a#2,SET-a#2,SET] + 000006:[c#3,SET-c#3,SET] 0.0: - 000004:[a-e] + 000004:[a#1,RANGEDEL-e#72057594037927935,RANGEDEL] compact a-e ---- 1: - 000007:[a-c] - 000008:[c-e] + 000007:[a#2,SET-c#72057594037927935,RANGEDEL] + 000008:[c#3,SET-e#72057594037927935,RANGEDEL] compact d-e ---- 1: - 000007:[a-c] + 000007:[a#2,SET-c#72057594037927935,RANGEDEL] 2: - 000008:[c-e] + 000008:[c#3,SET-e#72057594037927935,RANGEDEL] iter seq=4 seek-ge b @@ -1201,23 +1201,23 @@ L0 ---- mem: 1 0.1: - 000005:[a-a] - 000006:[c-c] + 000005:[a#2,SET-a#2,SET] + 000006:[c#3,SET-c#3,SET] 0.0: - 000004:[a-e] + 000004:[a#1,RANGEDEL-e#72057594037927935,RANGEDEL] compact a-e ---- 1: - 000007:[a-c] - 000008:[c-e] + 000007:[a#2,SET-c#72057594037927935,RANGEDEL] + 000008:[c#3,SET-e#72057594037927935,RANGEDEL] compact a-b ---- 1: - 000008:[c-e] + 000008:[c#3,SET-e#72057594037927935,RANGEDEL] 2: - 000007:[a-c] + 000007:[a#2,SET-c#72057594037927935,RANGEDEL] iter seq=4 seek-lt d @@ -1247,29 +1247,29 @@ L2 ---- mem: 1 0.1: - 000005:[a-a] - 000006:[c-c] + 000005:[a#2,SET-a#2,SET] + 000006:[c#3,SET-c#3,SET] 0.0: - 000004:[a-e] + 000004:[a#1,RANGEDEL-e#72057594037927935,RANGEDEL] 2: - 000007:[d-d] + 000007:[d#0,SET-d#0,SET] compact a-b ---- 1: - 000008:[a-c] - 000009:[c-d] - 000010:[d-e] + 000008:[a#2,SET-c#72057594037927935,RANGEDEL] + 000009:[c#3,SET-d#72057594037927935,RANGEDEL] + 000010:[d#1,RANGEDEL-e#72057594037927935,RANGEDEL] 2: - 000007:[d-d] + 000007:[d#0,SET-d#0,SET] compact d-e ---- 1: - 000008:[a-c] + 000008:[a#2,SET-c#72057594037927935,RANGEDEL] + 000009:[c#3,SET-d#72057594037927935,RANGEDEL] 3: - 000013:[c-d] - 000014:[d-e] + 000011:[d#1,RANGEDEL-e#72057594037927935,RANGEDEL] get seq=4 c @@ -1278,11 +1278,12 @@ c:v compact a-b L1 ---- +1: + 000009:[c#3,SET-d#72057594037927935,RANGEDEL] 2: - 000015:[a-c] + 000008:[a#2,SET-c#72057594037927935,RANGEDEL] 3: - 000013:[c-d] - 000014:[d-e] + 000011:[d#1,RANGEDEL-e#72057594037927935,RANGEDEL] get seq=4 c @@ -1306,34 +1307,34 @@ L2 ---- mem: 1 0.1: - 000005:[a-a] - 000006:[c-c] + 000005:[a#2,SET-a#2,SET] + 000006:[c#3,SET-c#3,SET] 0.0: - 000004:[a-e] - 000007:[f-f] + 000004:[a#1,RANGEDEL-e#72057594037927935,RANGEDEL] + 000007:[f#4,SET-f#4,SET] 2: - 000008:[d-d] + 000008:[d#0,SET-d#0,SET] compact a-b ---- 0.0: - 000007:[f-f] + 000007:[f#4,SET-f#4,SET] 1: - 000009:[a-c] - 000010:[c-d] - 000011:[d-e] + 000009:[a#2,SET-c#72057594037927935,RANGEDEL] + 000010:[c#3,SET-d#72057594037927935,RANGEDEL] + 000011:[d#1,RANGEDEL-e#72057594037927935,RANGEDEL] 2: - 000008:[d-d] + 000008:[d#0,SET-d#0,SET] compact d-e ---- 0.0: - 000007:[f-f] + 000007:[f#4,SET-f#4,SET] 1: - 000009:[a-c] + 000009:[a#2,SET-c#72057594037927935,RANGEDEL] + 000010:[c#3,SET-d#72057594037927935,RANGEDEL] 3: - 000014:[c-d] - 000015:[d-e] + 000012:[d#1,RANGEDEL-e#72057594037927935,RANGEDEL] get seq=4 c @@ -1343,20 +1344,20 @@ c:v compact f-f L0 ---- 1: - 000009:[a-c] - 000007:[f-f] + 000009:[a#2,SET-c#72057594037927935,RANGEDEL] + 000010:[c#3,SET-d#72057594037927935,RANGEDEL] + 000007:[f#4,SET-f#4,SET] 3: - 000014:[c-d] - 000015:[d-e] + 000012:[d#1,RANGEDEL-e#72057594037927935,RANGEDEL] compact a-f L1 ---- 2: - 000016:[a-c] - 000017:[f-f] + 000013:[a#2,SET-c#72057594037927935,RANGEDEL] + 000014:[c#3,SET-d#72057594037927935,RANGEDEL] + 000015:[f#4,SET-f#4,SET] 3: - 000014:[c-d] - 000015:[d-e] + 000012:[d#1,RANGEDEL-e#72057594037927935,RANGEDEL] get seq=4 c @@ -1376,13 +1377,13 @@ L2 ---- mem: 1 0.1: - 000005:[a-f] + 000005:[a#4,RANGEDEL-f#72057594037927935,RANGEDEL] 0.0: - 000004:[a-f] + 000004:[a#3,RANGEDEL-f#72057594037927935,RANGEDEL] 1: - 000006:[b-e] + 000006:[b#2,RANGEDEL-e#72057594037927935,RANGEDEL] 2: - 000007:[c-d] + 000007:[c#1,RANGEDEL-d#72057594037927935,RANGEDEL] wait-pending-table-stats 000007 @@ -1439,13 +1440,13 @@ L3 ---- mem: 1 0.0: - 000004:[a-d] + 000004:[a#4,SET-d#4,SET] 1: - 000005:[a-d] + 000005:[a#3,SET-d#3,SET] 2: - 000006:[a-d] + 000006:[a#2,RANGEDEL-d#2,SET] 3: - 000007:[a-d] + 000007:[a#1,SET-d#1,SET] wait-pending-table-stats 000007 @@ -1461,7 +1462,7 @@ wait-pending-table-stats num-entries: 2 num-deletions: 1 point-deletions-bytes-estimate: 0 -range-deletions-bytes-estimate: 787 +range-deletions-bytes-estimate: 42 wait-pending-table-stats 000005 @@ -1497,14 +1498,14 @@ L2 ---- mem: 1 0.1: - 000004:[a-z] + 000004:[a#6,RANGEDEL-z#72057594037927935,RANGEDEL] 0.0: - 000005:[a-d] - 000006:[e-z] + 000005:[a#5,RANGEDEL-d#72057594037927935,RANGEDEL] + 000006:[e#4,RANGEDEL-z#72057594037927935,RANGEDEL] 1: - 000007:[a-c] + 000007:[a#2,SET-c#2,SET] 2: - 000008:[x-x] + 000008:[x#1,SET-x#1,SET] wait-pending-table-stats 000005 diff --git a/testdata/table_stats b/testdata/table_stats index 2cb56f7202..0dc9e6a2aa 100644 --- a/testdata/table_stats +++ b/testdata/table_stats @@ -39,7 +39,7 @@ wait-pending-table-stats num-entries: 1 num-deletions: 1 point-deletions-bytes-estimate: 0 -range-deletions-bytes-estimate: 796 +range-deletions-bytes-estimate: 51 reopen ---- @@ -57,7 +57,7 @@ wait-pending-table-stats num-entries: 1 num-deletions: 1 point-deletions-bytes-estimate: 0 -range-deletions-bytes-estimate: 796 +range-deletions-bytes-estimate: 51 compact a-c ---- @@ -176,7 +176,7 @@ wait-pending-table-stats num-entries: 1 num-deletions: 1 point-deletions-bytes-estimate: 0 -range-deletions-bytes-estimate: 1542 +range-deletions-bytes-estimate: 771 wait-pending-table-stats 000012 @@ -184,7 +184,7 @@ wait-pending-table-stats num-entries: 1 num-deletions: 1 point-deletions-bytes-estimate: 0 -range-deletions-bytes-estimate: 1542 +range-deletions-bytes-estimate: 771 define snapshots=(10) L6