From 2248e19f5438b600109f6fc6f198e0adb2f0a23a Mon Sep 17 00:00:00 2001 From: Rahul Aggarwal Date: Thu, 29 Jun 2023 13:42:20 -0400 Subject: [PATCH] manifest: Added Virtual SSTable Metadata Bounds Checking Added a check inside of `FileMetadata.ValidateVirtual()` to ensure that the keys returned from a virtual sstable are within the bounds of the Smallest and Largest metadata keys. Informs: #2593 --- db.go | 88 +++++++++++++++++++++++++++++++++++++++++++++ ingest.go | 7 ++++ table_cache_test.go | 2 ++ version_set_test.go | 4 +++ 4 files changed, 101 insertions(+) diff --git a/db.go b/db.go index bdccfb8cfe9..50d8dfe292e 100644 --- a/db.go +++ b/db.go @@ -2503,3 +2503,91 @@ func (d *DB) SetCreatorID(creatorID uint64) error { func (d *DB) ObjProvider() objstorage.Provider { return d.objProvider } + +func (d *DB) checkVirtualBounds(m *fileMetadata, iterOpts *IterOptions) { + if !invariants.Enabled { + return + } + + if m.HasPointKeys { + pointIter, rangeDelIter, err := d.newIters(context.TODO(), m, iterOpts, internalIterOpts{}) + if err != nil { + panic(errors.Wrap(err, "pebble: error creating point iterator")) + } + + defer pointIter.Close() + if rangeDelIter != nil { + defer rangeDelIter.Close() + } + + pointKey, _ := pointIter.First() + var rangeDel *rangekey.Span + if rangeDelIter != nil { + rangeDel = rangeDelIter.First() + } + + // Check that the lower bound is tight. + if (rangeDel == nil || d.cmp(rangeDel.SmallestKey().UserKey, m.SmallestPointKey.UserKey) != 0) && + (pointKey == nil || d.cmp(pointKey.UserKey, m.SmallestPointKey.UserKey) != 0) { + panic(errors.Wrap(err, fmt.Sprintf("pebble: virtual sstable %s lower point key bound is not tight", m.FileNum))) + } + + pointKey, _ = pointIter.Last() + rangeDel = nil + if rangeDelIter != nil { + rangeDel = rangeDelIter.Last() + } + + // Check that the upper bound is tight. + if (rangeDel == nil || d.cmp(rangeDel.LargestKey().UserKey, m.LargestPointKey.UserKey) != 0) && + (pointKey == nil || d.cmp(pointKey.UserKey, m.LargestPointKey.UserKey) != 0) { + panic(errors.Wrap(err, fmt.Sprintf("pebble: virtual sstable %s upper point key bound is not tight", m.FileNum))) + } + + // Check that iterator keys are within bounds. + for key, _ := pointIter.First(); key != nil; key, _ = pointIter.Next() { + if d.cmp(key.UserKey, m.SmallestPointKey.UserKey) < 0 || d.cmp(key.UserKey, m.LargestPointKey.UserKey) > 0 { + panic(errors.Wrap(err, fmt.Sprintf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.UserKey))) + } + } + + if rangeDelIter != nil { + for key := rangeDelIter.First(); key != nil; key = rangeDelIter.Next() { + if d.cmp(key.SmallestKey().UserKey, m.SmallestPointKey.UserKey) < 0 { + panic(errors.Wrap(err, fmt.Sprintf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.SmallestKey().UserKey))) + } + + if d.cmp(key.LargestKey().UserKey, m.LargestPointKey.UserKey) > 0 { + panic(errors.Wrap(err, fmt.Sprintf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.LargestKey().UserKey))) + } + } + } + } + + if m.HasRangeKeys { + rangeKeyIter, err := d.tableNewRangeKeyIter(m, keyspan.SpanIterOptions{Level: iterOpts.level}) + + if err != nil { + panic(errors.Wrap(err, "pebble: error creating range key iterator")) + } + + // Check that the lower bound is tight. + if d.cmp(rangeKeyIter.First().SmallestKey().UserKey, m.SmallestRangeKey.UserKey) != 0 { + panic(errors.Wrap(err, fmt.Sprintf("pebble: virtual sstable %s lower range key bound is not tight", m.FileNum))) + } + + // Check that upper bound is tight. + if d.cmp(rangeKeyIter.Last().LargestKey().UserKey, m.LargestRangeKey.UserKey) != 0 { + panic(errors.Wrap(err, fmt.Sprintf("pebble: virtual sstable %s upper range key bound is not tight", m.FileNum))) + } + + for key := rangeKeyIter.First(); key != nil; key = rangeKeyIter.Next() { + if d.cmp(key.SmallestKey().UserKey, m.SmallestRangeKey.UserKey) < 0 { + panic(errors.Wrap(err, fmt.Sprintf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.SmallestKey().UserKey))) + } + if d.cmp(key.LargestKey().UserKey, m.LargestRangeKey.UserKey) > 0 { + panic(errors.Wrap(err, fmt.Sprintf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.LargestKey().UserKey))) + } + } + } +} diff --git a/ingest.go b/ingest.go index 07e55eb3d70..61e2c8eab2d 100644 --- a/ingest.go +++ b/ingest.go @@ -1097,6 +1097,9 @@ func (d *DB) ingest( if err := ingestLink(jobID, d.opts, d.objProvider, loadResult, shared); err != nil { return IngestOperationStats{}, err } + for i, sharedMeta := range loadResult.sharedMeta { + d.checkVirtualBounds(sharedMeta, &IterOptions{level: manifest.Level(loadResult.sharedLevels[i])}) + } // Make the new tables durable. We need to do this at some point before we // update the MANIFEST (via logAndApply), otherwise a crash can have the // tables referenced in the MANIFEST, but not present in the provider. @@ -1490,6 +1493,8 @@ func (d *DB) excise( if err := leftFile.Validate(d.cmp, d.opts.Comparer.FormatKey); err != nil { return nil, err } + leftFile.ValidateVirtual(m) + d.checkVirtualBounds(leftFile, nil /* iterOptions */) ve.NewFiles = append(ve.NewFiles, newFileEntry{Level: level, Meta: leftFile}) ve.CreatedBackingTables = append(ve.CreatedBackingTables, leftFile.FileBacking) backingTableCreated = true @@ -1597,6 +1602,8 @@ func (d *DB) excise( // for it here. rightFile.Size = 1 } + rightFile.ValidateVirtual(m) + d.checkVirtualBounds(rightFile, nil /* iterOptions */) ve.NewFiles = append(ve.NewFiles, newFileEntry{Level: level, Meta: rightFile}) if !backingTableCreated { ve.CreatedBackingTables = append(ve.CreatedBackingTables, rightFile.FileBacking) diff --git a/table_cache_test.go b/table_cache_test.go index 5efca158e60..c3226005ea8 100644 --- a/table_cache_test.go +++ b/table_cache_test.go @@ -324,7 +324,9 @@ func TestVirtualReadsWiring(t *testing.T) { v2.SmallestPointKey = v2.Smallest v1.ValidateVirtual(parentFile) + d.checkVirtualBounds(v1, nil /* iterOptions */) v2.ValidateVirtual(parentFile) + d.checkVirtualBounds(v2, nil /* iterOptions */) // Write the version edit. fileMetrics := func(ve *versionEdit) map[int]*LevelMetrics { diff --git a/version_set_test.go b/version_set_test.go index 3bf2b929dfe..45b8eb834c0 100644 --- a/version_set_test.go +++ b/version_set_test.go @@ -100,7 +100,9 @@ func TestLatestRefCounting(t *testing.T) { m2.SmallestPointKey = m2.Smallest m1.ValidateVirtual(f) + d.checkVirtualBounds(m1, nil /* iterOptions */) m2.ValidateVirtual(f) + d.checkVirtualBounds(m2, nil /* iterOptions */) fileMetrics := func(ve *versionEdit) map[int]*LevelMetrics { metrics := newFileMetrics(ve.NewFiles) @@ -282,7 +284,9 @@ func TestVirtualSSTableManifestReplay(t *testing.T) { m2.Stats.NumEntries = 1 m1.ValidateVirtual(f) + d.checkVirtualBounds(m1, nil /* iterOptions */) m2.ValidateVirtual(f) + d.checkVirtualBounds(m2, nil /* iterOptions */) fileMetrics := func(ve *versionEdit) map[int]*LevelMetrics { metrics := newFileMetrics(ve.NewFiles)