From 12aec72005fd69bd8f18813c19764d6107e5b813 Mon Sep 17 00:00:00 2001 From: Radu Berinde Date: Mon, 13 Feb 2023 07:28:21 -0800 Subject: [PATCH] storage: remove pre-22.2 code This change removes any code that handles pre-22.2 versions. Features that are in 22.2 (like range keys) are now always enabled. Any new store always uses at least the 22.2 pebble format version. Release note: None Epic: none Fixes: #96764 --- pkg/ccl/cliccl/ear_test.go | 27 ++- .../storageccl/engineccl/encrypted_fs_test.go | 5 +- .../kvstorage/cluster_version_test.go | 42 +++-- pkg/kv/kvserver/spanset/batch.go | 5 - pkg/sql/gcjob_test/BUILD.bazel | 7 - pkg/sql/gcjob_test/gc_job_test.go | 154 ---------------- pkg/storage/engine.go | 5 - pkg/storage/engine_test.go | 164 ------------------ pkg/storage/intent_interleaving_iter.go | 3 +- pkg/storage/min_version_test.go | 31 ++-- pkg/storage/mvcc.go | 7 +- pkg/storage/pebble.go | 86 ++------- pkg/storage/pebble_batch.go | 24 +-- pkg/storage/pebble_iterator.go | 49 +----- pkg/storage/pebble_iterator_test.go | 4 +- pkg/storage/pebble_test.go | 16 +- pkg/storage/sst_writer.go | 11 +- pkg/storage/sst_writer_test.go | 62 ++----- pkg/storage/temp_engine.go | 1 + .../upgrademanager/manager_external_test.go | 21 +-- .../upgrades/schema_changes_external_test.go | 6 +- 21 files changed, 127 insertions(+), 603 deletions(-) diff --git a/pkg/ccl/cliccl/ear_test.go b/pkg/ccl/cliccl/ear_test.go index 43f9e534d396..b29a06cc1ac5 100644 --- a/pkg/ccl/cliccl/ear_test.go +++ b/pkg/ccl/cliccl/ear_test.go @@ -156,13 +156,13 @@ func TestList(t *testing.T) { 000004.log: env type: Data, AES128_CTR keyID: bbb65a9d114c2a18740f27b6933b74f61018bd5adf545c153b48ffe6473336ef - nonce: 80 18 c0 79 61 c7 cf ef b4 25 4e 78 - counter: 1483615076 + nonce: 31 d3 cd 5a 69 e2 13 64 21 53 57 64 + counter: 3952287331 000005.sst: env type: Data, AES128_CTR keyID: bbb65a9d114c2a18740f27b6933b74f61018bd5adf545c153b48ffe6473336ef - nonce: 71 12 f7 22 9a fb 90 24 4e 58 27 01 - counter: 3082989236 + nonce: 23 d9 b2 e1 39 b0 87 ed f9 6d 49 20 + counter: 3481614039 COCKROACHDB_DATA_KEYS_000001_monolith: env type: Store, AES128_CTR keyID: f594229216d81add7811c4360212eb7629b578ef4eab6e5d05679b3c5de48867 @@ -171,8 +171,8 @@ COCKROACHDB_DATA_KEYS_000001_monolith: CURRENT: env type: Data, AES128_CTR keyID: bbb65a9d114c2a18740f27b6933b74f61018bd5adf545c153b48ffe6473336ef - nonce: 18 c2 a6 23 cc 6e 2e 7c 8e bf 84 77 - counter: 3159373900 + nonce: 71 12 f7 22 9a fb 90 24 4e 58 27 01 + counter: 3082989236 MANIFEST-000001: env type: Data, AES128_CTR keyID: bbb65a9d114c2a18740f27b6933b74f61018bd5adf545c153b48ffe6473336ef @@ -181,14 +181,25 @@ MANIFEST-000001: OPTIONS-000003: env type: Data, AES128_CTR keyID: bbb65a9d114c2a18740f27b6933b74f61018bd5adf545c153b48ffe6473336ef - nonce: d3 97 11 b3 1a ed 22 2b 74 fb 02 0c - counter: 1229228536 + nonce: c3 6d b2 7b 3f 3e 67 b9 28 b9 81 b1 + counter: 3050109376 marker.datakeys.000001.COCKROACHDB_DATA_KEYS_000001_monolith: env type: Store, AES128_CTR keyID: f594229216d81add7811c4360212eb7629b578ef4eab6e5d05679b3c5de48867 nonce: 55 d7 d4 27 6c 97 9b dd f1 5d 40 c8 counter: 467030050 +marker.format-version.000009.010: + env type: Data, AES128_CTR + keyID: bbb65a9d114c2a18740f27b6933b74f61018bd5adf545c153b48ffe6473336ef + nonce: 6e 34 f4 3c 11 43 1a f5 69 ce 33 f1 + counter: 2398097086 +marker.manifest.000001.MANIFEST-000001: + env type: Data, AES128_CTR + keyID: bbb65a9d114c2a18740f27b6933b74f61018bd5adf545c153b48ffe6473336ef + nonce: d3 97 11 b3 1a ed 22 2b 74 fb 02 0c + counter: 1229228536 ` + require.Equal(t, want, b.String()) } diff --git a/pkg/ccl/storageccl/engineccl/encrypted_fs_test.go b/pkg/ccl/storageccl/engineccl/encrypted_fs_test.go index 74a00395eca4..a207b1e1d921 100644 --- a/pkg/ccl/storageccl/engineccl/encrypted_fs_test.go +++ b/pkg/ccl/storageccl/engineccl/encrypted_fs_test.go @@ -268,9 +268,10 @@ func TestPebbleEncryption(t *testing.T) { stats, err := db.GetEnvStats() require.NoError(t, err) // Opening the DB should've created OPTIONS, CURRENT, MANIFEST and the - // WAL, all under the active key. + // WAL. require.Equal(t, uint64(4), stats.TotalFiles) - require.Equal(t, uint64(4), stats.ActiveKeyFiles) + // We also created markers for the format version and the manifest. + require.Equal(t, uint64(6), stats.ActiveKeyFiles) var s enginepbccl.EncryptionStatus require.NoError(t, protoutil.Unmarshal(stats.EncryptionStatus, &s)) require.Equal(t, "16.key", s.ActiveStoreKey.Source) diff --git a/pkg/kv/kvserver/kvstorage/cluster_version_test.go b/pkg/kv/kvserver/kvstorage/cluster_version_test.go index c800907915d5..8340c98e5d34 100644 --- a/pkg/kv/kvserver/kvstorage/cluster_version_test.go +++ b/pkg/kv/kvserver/kvstorage/cluster_version_test.go @@ -31,8 +31,14 @@ func TestStoresClusterVersionIncompatible(t *testing.T) { ctx := context.Background() - vOneDashOne := roachpb.Version{Major: 1, Internal: 1} - vOne := roachpb.Version{Major: 1} + current := clusterversion.ByKey(clusterversion.BinaryVersionKey) + minSupported := clusterversion.ByKey(clusterversion.BinaryMinSupportedVersionKey) + + future := current + future.Major++ + + tooOld := minSupported + tooOld.Major-- type testCase struct { binV, minV roachpb.Version // binary version and min supported version @@ -42,28 +48,21 @@ func TestStoresClusterVersionIncompatible(t *testing.T) { for name, tc := range map[string]testCase{ "StoreTooNew": { // This is what the node is running. - binV: vOneDashOne, + binV: current, // This is what the running node requires from its stores. - minV: vOne, + minV: minSupported, // Version is way too high for this node. - engV: roachpb.Version{Major: 9}, - expErr: `cockroach version v1\.0-1 is incompatible with data in store =; use version v9\.0 or later`, + engV: future, + expErr: `cockroach version .* is incompatible with data in store =; use version .* or later`, }, "StoreTooOldVersion": { // This is what the node is running. - binV: roachpb.Version{Major: 9}, + binV: current, // This is what the running node requires from its stores. - minV: roachpb.Version{Major: 5}, + minV: minSupported, // Version is way too low. - engV: roachpb.Version{Major: 4}, - expErr: `store =, last used with cockroach version v4\.0, is too old for running version v9\.0 \(which requires data from v5\.0 or later\)`, - }, - "StoreTooOldMinVersion": { - // Like the previous test case, but this time cv.MinimumVersion is the culprit. - binV: roachpb.Version{Major: 9}, - minV: roachpb.Version{Major: 5}, - engV: roachpb.Version{Major: 4}, - expErr: `store =, last used with cockroach version v4\.0, is too old for running version v9\.0 \(which requires data from v5\.0 or later\)`, + engV: tooOld, + expErr: `store =, last used with cockroach version .*, is too old for running version .* \(which requires data from .* or later\)`, }, } { t.Run(name, func(t *testing.T) { @@ -71,12 +70,11 @@ func TestStoresClusterVersionIncompatible(t *testing.T) { defer engs[0].Close() // Configure versions and write. cv := clusterversion.ClusterVersion{Version: tc.engV} - if err := WriteClusterVersionToEngines(ctx, engs, cv); err != nil { - t.Fatal(err) + err := WriteClusterVersionToEngines(ctx, engs, cv) + if err == nil { + cv, err = SynthesizeClusterVersionFromEngines(ctx, engs, tc.binV, tc.minV) } - if cv, err := SynthesizeClusterVersionFromEngines( - ctx, engs, tc.binV, tc.minV, - ); !testutils.IsError(err, tc.expErr) { + if !testutils.IsError(err, tc.expErr) { t.Fatalf("unexpected error: %+v, got version %v", err, cv) } }) diff --git a/pkg/kv/kvserver/spanset/batch.go b/pkg/kv/kvserver/spanset/batch.go index 2198e2cf8371..81baf7ecd9f2 100644 --- a/pkg/kv/kvserver/spanset/batch.go +++ b/pkg/kv/kvserver/spanset/batch.go @@ -495,11 +495,6 @@ func (s spanSetReader) ConsistentIterators() bool { return s.r.ConsistentIterators() } -// SupportsRangeKeys implements the storage.Reader interface. -func (s spanSetReader) SupportsRangeKeys() bool { - return s.r.SupportsRangeKeys() -} - // PinEngineStateForIterators implements the storage.Reader interface. func (s spanSetReader) PinEngineStateForIterators() error { return s.r.PinEngineStateForIterators() diff --git a/pkg/sql/gcjob_test/BUILD.bazel b/pkg/sql/gcjob_test/BUILD.bazel index 40e297c9d2d3..af9e229d561a 100644 --- a/pkg/sql/gcjob_test/BUILD.bazel +++ b/pkg/sql/gcjob_test/BUILD.bazel @@ -11,9 +11,6 @@ go_test( args = ["-test.timeout=295s"], deps = [ "//pkg/base", - "//pkg/clusterversion", - "//pkg/config", - "//pkg/config/zonepb", "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/keys", @@ -33,7 +30,6 @@ go_test( "//pkg/sql/catalog/descs", "//pkg/sql/catalog/tabledesc", "//pkg/sql/gcjob", - "//pkg/sql/gcjob/gcjobnotifier", "//pkg/sql/isql", "//pkg/sql/sem/catid", "//pkg/storage", @@ -46,10 +42,7 @@ go_test( "//pkg/util/leaktest", "//pkg/util/log", "//pkg/util/randutil", - "//pkg/util/stop", - "//pkg/util/syncutil", "//pkg/util/timeutil", - "@com_github_cockroachdb_errors//:errors", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/sql/gcjob_test/gc_job_test.go b/pkg/sql/gcjob_test/gc_job_test.go index 8ce019a52492..71abf4780b36 100644 --- a/pkg/sql/gcjob_test/gc_job_test.go +++ b/pkg/sql/gcjob_test/gc_job_test.go @@ -19,9 +19,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/config" - "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" @@ -30,7 +27,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap" @@ -39,7 +35,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc" "github.com/cockroachdb/cockroach/pkg/sql/gcjob" - "github.com/cockroachdb/cockroach/pkg/sql/gcjob/gcjobnotifier" "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/catid" "github.com/cockroachdb/cockroach/pkg/storage" @@ -51,10 +46,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/cockroach/pkg/util/stop" - "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" - "github.com/cockroachdb/errors" "github.com/stretchr/testify/require" ) @@ -663,149 +655,3 @@ SELECT descriptor_id, index_id require.NoError(t, jr.WaitForJobs(ctx, []jobspb.JobID{jobID})) }) } - -// TestGCJobNoSystemConfig tests that the GC job is robust to running with -// no system config provided by the SystemConfigProvider. It is a regression -// test for a panic which could occur due to a slow systemconfigwatcher -// initialization. -// -// TODO(ajwerner): Remove this test in 23.1. -func TestGCJobNoSystemConfig(t *testing.T) { - defer leaktest.AfterTest(t)() - - provider := fakeSystemConfigProvider{} - var ( - v0 = clusterversion.ByKey(clusterversion.TODODelete_V22_2UseDelRangeInGCJob - 1) - v1 = clusterversion.ByKey(clusterversion.TODODelete_V22_2UseDelRangeInGCJob) - ) - settings := cluster.MakeTestingClusterSettingsWithVersions(v1, v0, false /* initializeVersion */) - ctx := context.Background() - require.NoError(t, clusterversion.Initialize(ctx, v0, &settings.SV)) - stopper := stop.NewStopper() - gcKnobs := &sql.GCJobTestingKnobs{} - s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{ - Settings: settings, - Stopper: stopper, - Knobs: base.TestingKnobs{ - GCJob: gcKnobs, - Server: &server.TestingKnobs{ - DisableAutomaticVersionUpgrade: make(chan struct{}), - BinaryVersionOverride: v0, - }, - }, - }) - defer stopper.Stop(ctx) - codec := s.ExecutorConfig().(sql.ExecutorConfig).Codec - n := gcjobnotifier.New(settings, &provider, codec, stopper) - n.Start(ctx) - gcKnobs.Notifier = n - - tdb := sqlutils.MakeSQLRunner(sqlDB) - tdb.Exec(t, "CREATE TABLE foo (i INT PRIMARY KEY)") - tdb.Exec(t, "SET CLUSTER SETTING kv.protectedts.poll_interval = '10ms'") - var id uint32 - tdb.QueryRow(t, "SELECT 'foo'::regclass::int").Scan(&id) - tdb.Exec(t, "DROP TABLE foo") - // We want to make sure there's a notifyee and that the job attempted - // to read the status twice. We expect it once for the notifier and - // once for the job itself. - testutils.SucceedsSoon(t, func() error { - if n := provider.numNotifyees(); n == 0 { - return errors.Errorf("expected 1 notifyee, got %d", n) - } - if n := provider.numCalls(); n < 2 { - return errors.Errorf("expected at least 2 calls, got %d", n) - } - return nil - }) - cfgProto := &zonepb.ZoneConfig{ - GC: &zonepb.GCPolicy{TTLSeconds: 0}, - } - cfg := config.NewSystemConfig(cfgProto) - descKV, err := kvDB.Get(ctx, codec.DescMetadataKey(id)) - require.NoError(t, err) - var zoneKV roachpb.KeyValue - zoneKV.Key = config.MakeZoneKey(codec, descpb.ID(id)) - require.NoError(t, zoneKV.Value.SetProto(cfgProto)) - defaultKV := zoneKV - defaultKV.Key = config.MakeZoneKey(codec, 0) - // We need to put in an entry for the descriptor both so that the notifier - // fires and so that we don't think the descriptor is missing. We also - // need a zone config KV to make the delta filter happy. - cfg.Values = []roachpb.KeyValue{ - {Key: descKV.Key, Value: *descKV.Value}, - defaultKV, - zoneKV, - } - - provider.setConfig(cfg) - tdb.CheckQueryResultsRetry(t, ` -SELECT status - FROM crdb_internal.jobs - WHERE description = 'GC for DROP TABLE defaultdb.public.foo'`, - [][]string{{"succeeded"}}) -} - -type fakeSystemConfigProvider struct { - mu struct { - syncutil.Mutex - - calls int - n int - config *config.SystemConfig - notifyees map[int]chan struct{} - } -} - -func (f *fakeSystemConfigProvider) GetSystemConfig() *config.SystemConfig { - f.mu.Lock() - defer f.mu.Unlock() - f.mu.calls++ - return f.mu.config -} - -func (f *fakeSystemConfigProvider) RegisterSystemConfigChannel() ( - _ <-chan struct{}, - unregister func(), -) { - f.mu.Lock() - defer f.mu.Unlock() - ch := make(chan struct{}, 1) - n := f.mu.n - f.mu.n++ - if f.mu.notifyees == nil { - f.mu.notifyees = map[int]chan struct{}{} - } - f.mu.notifyees[n] = ch - return ch, func() { - f.mu.Lock() - defer f.mu.Unlock() - delete(f.mu.notifyees, n) - } -} - -func (f *fakeSystemConfigProvider) setConfig(c *config.SystemConfig) { - f.mu.Lock() - defer f.mu.Unlock() - f.mu.config = c - for _, ch := range f.mu.notifyees { - select { - case ch <- struct{}{}: - default: - } - } -} - -func (f *fakeSystemConfigProvider) numNotifyees() int { - f.mu.Lock() - defer f.mu.Unlock() - return len(f.mu.notifyees) -} - -func (f *fakeSystemConfigProvider) numCalls() int { - f.mu.Lock() - defer f.mu.Unlock() - return f.mu.calls -} - -var _ config.SystemConfigProvider = (*fakeSystemConfigProvider)(nil) diff --git a/pkg/storage/engine.go b/pkg/storage/engine.go index 315a832932d5..23f678e19f0b 100644 --- a/pkg/storage/engine.go +++ b/pkg/storage/engine.go @@ -591,11 +591,6 @@ type Reader interface { // underlying Engine state. This is not true about Batch writes: new iterators // will see new writes made to the batch, existing iterators won't. ConsistentIterators() bool - // SupportsRangeKeys returns true if the Reader implementation supports - // range keys. - // - // TODO(erikgrinaker): Remove this after 22.2. - SupportsRangeKeys() bool // PinEngineStateForIterators ensures that the state seen by iterators // without timestamp hints (see IterOptions) is pinned and will not see diff --git a/pkg/storage/engine_test.go b/pkg/storage/engine_test.go index f397ef5f7d31..1b418a58067e 100644 --- a/pkg/storage/engine_test.go +++ b/pkg/storage/engine_test.go @@ -26,7 +26,6 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -1911,8 +1910,6 @@ func TestEngineRangeKeyMutations(t *testing.T) { defer rw.Close() } - require.True(t, rw.SupportsRangeKeys()) - // Check errors for invalid, empty, and zero-length range keys. Not // exhaustive, since we assume validation dispatches to // MVCCRangeKey.Validate() which is tested separately. @@ -2034,167 +2031,6 @@ func TestEngineRangeKeyMutations(t *testing.T) { }) } -// TestEngineRangeKeysUnsupported tests that engines without range key -// support behave as expected, i.e. writes fail but reads degrade gracefully. -func TestEngineRangeKeysUnsupported(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - // Set up an engine with a version that doesn't support range keys. - version := clusterversion.ByKey(clusterversion.TODODelete_V22_2EnsurePebbleFormatVersionRangeKeys - 1) - st := cluster.MakeTestingClusterSettingsWithVersions(version, version, true) - - eng, err := Open(context.Background(), InMemory(), st, MaxSize(1<<20)) - if err != nil { - panic(err) - } - defer eng.Close() - - require.NoError(t, eng.PutMVCC(pointKey("a", 1), stringValue("a1"))) - - batch := eng.NewBatch() - defer batch.Close() - snapshot := eng.NewSnapshot() - defer snapshot.Close() - readOnly := eng.NewReadOnly(StandardDurability) - defer readOnly.Close() - - writers := map[string]Writer{ - "engine": eng, - "batch": batch, - } - readers := map[string]Reader{ - "engine": eng, - "batch": batch, - "snapshot": snapshot, - "readonly": readOnly, - } - - // Range key puts should error, but clears are noops (since old databases - // cannot contain range keys by definition). - for name, w := range writers { - t.Run(fmt.Sprintf("write/%s", name), func(t *testing.T) { - rangeKey := rangeKey("a", "b", 2) - - err := w.PutMVCCRangeKey(rangeKey, MVCCValue{}) - require.Error(t, err) - require.Contains(t, err.Error(), "range keys not supported") - - err = w.PutRawMVCCRangeKey(rangeKey, []byte{}) - require.Error(t, err) - require.Contains(t, err.Error(), "range keys not supported") - - err = w.PutEngineRangeKey(rangeKey.StartKey, rangeKey.EndKey, nil, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "range keys not supported") - - require.NoError(t, w.ClearMVCCRangeKey(rangeKey)) - require.NoError(t, w.ClearEngineRangeKey( - rangeKey.StartKey, rangeKey.EndKey, EncodeMVCCTimestampSuffix(rangeKey.Timestamp))) - require.NoError(t, w.ClearRawRange( - rangeKey.StartKey, rangeKey.EndKey, false /* pointKeys */, true /* rangeKeys */)) - }) - } - - // All range key iterators should degrade gracefully to point key iterators, - // and be empty for IterKeyTypeRangesOnly. - keyTypes := map[string]IterKeyType{ - "PointsOnly": IterKeyTypePointsOnly, - "PointsAndRanges": IterKeyTypePointsAndRanges, - "RangesOnly": IterKeyTypeRangesOnly, - } - for name, r := range readers { - for keyTypeName, keyType := range keyTypes { - t.Run(fmt.Sprintf("read/%s/%s", name, keyTypeName), func(t *testing.T) { - require.False(t, r.SupportsRangeKeys()) - - t.Run("MVCCIterator", func(t *testing.T) { - iter := r.NewMVCCIterator(MVCCKeyAndIntentsIterKind, IterOptions{ - KeyTypes: keyType, - UpperBound: keys.MaxKey, - RangeKeyMaskingBelow: hlc.Timestamp{WallTime: 1}, // should get disabled when unsupported - }) - defer iter.Close() - - iter.SeekGE(pointKey("a", 0)) - - ok, err := iter.Valid() - require.NoError(t, err) - - if keyType == IterKeyTypeRangesOnly { - // With RangesOnly, the iterator must be empty. - require.False(t, ok) - hasPoint, hasRange := iter.HasPointAndRange() - require.False(t, hasPoint) - require.False(t, hasRange) - return - } - - require.True(t, ok) - require.Equal(t, pointKey("a", 1), iter.UnsafeKey()) - v, err := iter.UnsafeValue() - require.NoError(t, err) - require.Equal(t, stringValueRaw("a1"), v) - - hasPoint, hasRange := iter.HasPointAndRange() - require.True(t, hasPoint) - require.False(t, hasRange) - require.Empty(t, iter.RangeBounds()) - require.Empty(t, iter.RangeKeys()) - - // Exhaust the iterator. - iter.Next() - ok, err = iter.Valid() - require.NoError(t, err) - require.False(t, ok) - }) - - t.Run("EngineIterator", func(t *testing.T) { - iter := r.NewEngineIterator(IterOptions{ - KeyTypes: keyType, - UpperBound: keys.MaxKey, - RangeKeyMaskingBelow: hlc.Timestamp{WallTime: 1}, // should get disabled when unsupported - }) - defer iter.Close() - - ok, err := iter.SeekEngineKeyGE(engineKey("a", 0)) - require.NoError(t, err) - - if keyType == IterKeyTypeRangesOnly { - // With RangesOnly, the iterator must be empty. - require.False(t, ok) - hasPoint, hasRange := iter.HasPointAndRange() - require.False(t, hasPoint) - require.False(t, hasRange) - return - } - - require.True(t, ok) - key, err := iter.UnsafeEngineKey() - require.NoError(t, err) - require.Equal(t, engineKey("a", 1), key) - v, err := iter.UnsafeValue() - require.NoError(t, err) - require.Equal(t, stringValueRaw("a1"), v) - - hasPoint, hasRange := iter.HasPointAndRange() - require.True(t, hasPoint) - require.False(t, hasRange) - rangeBounds, err := iter.EngineRangeBounds() - require.NoError(t, err) - require.Empty(t, rangeBounds) - require.Empty(t, iter.EngineRangeKeys()) - - // Exhaust the iterator. - ok, err = iter.NextEngineKey() - require.NoError(t, err) - require.False(t, ok) - }) - }) - } - } -} - // TODO(erikgrinaker): The below test helpers should be moved to // testutils/storageutils instead, but that requires storage tests to be in the // storage_test package to avoid import cycles. diff --git a/pkg/storage/intent_interleaving_iter.go b/pkg/storage/intent_interleaving_iter.go index 2cf67c686e1c..80be79c9c1ab 100644 --- a/pkg/storage/intent_interleaving_iter.go +++ b/pkg/storage/intent_interleaving_iter.go @@ -267,8 +267,7 @@ func newIntentInterleavingIterator(reader Reader, opts IterOptions) MVCCIterator if reader.ConsistentIterators() { iter = maybeUnwrapUnsafeIter(reader.NewMVCCIterator(MVCCKeyIterKind, opts)).(*pebbleIterator) } else { - iter = newPebbleIteratorByCloning( - intentIter.GetRawIter(), opts, StandardDurability, reader.SupportsRangeKeys()) + iter = newPebbleIteratorByCloning(intentIter.GetRawIter(), opts, StandardDurability) } *iiIter = intentInterleavingIter{ diff --git a/pkg/storage/min_version_test.go b/pkg/storage/min_version_test.go index 8ae595cb317e..2bd12c8bb53a 100644 --- a/pkg/storage/min_version_test.go +++ b/pkg/storage/min_version_test.go @@ -96,16 +96,18 @@ func TestSetMinVersion(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) + st := cluster.MakeClusterSettings() p, err := Open(context.Background(), InMemory(), cluster.MakeClusterSettings(), CacheSize(0)) require.NoError(t, err) defer p.Close() - require.Equal(t, pebble.FormatMostCompatible, p.db.FormatMajorVersion()) + require.Equal(t, pebble.FormatPrePebblev1Marked, p.db.FormatMajorVersion()) - // Advancing the store cluster version to V22_2 should also advance the - // store's format major version. - err = p.SetMinVersion(clusterversion.ByKey(clusterversion.V22_2)) + ValueBlocksEnabled.Override(context.Background(), &st.SV, true) + // Advancing the store cluster version to one that supports value blocks + // should also advance the store's format major version. + err = p.SetMinVersion(clusterversion.ByKey(clusterversion.V23_1EnablePebbleFormatSSTableValueBlocks)) require.NoError(t, err) - require.Equal(t, pebble.FormatPrePebblev1Marked, p.db.FormatMajorVersion()) + require.Equal(t, pebble.FormatSSTableValueBlocks, p.db.FormatMajorVersion()) } func TestMinVersion_IsNotEncrypted(t *testing.T) { @@ -119,34 +121,23 @@ func TestMinVersion_IsNotEncrypted(t *testing.T) { defer func() { NewEncryptedEnvFunc = oldNewEncryptedEnvFunc }() NewEncryptedEnvFunc = fauxNewEncryptedEnvFunc + st := cluster.MakeClusterSettings() fs := vfs.NewMem() p, err := Open( context.Background(), Location{dir: "", fs: fs}, - cluster.MakeClusterSettings(), + st, EncryptionAtRest(nil)) require.NoError(t, err) defer p.Close() - - v1 := roachpb.Version{Major: 21, Minor: 1, Patch: 0, Internal: 122} - v2 := roachpb.Version{Major: 21, Minor: 1, Patch: 0, Internal: 126} - - ok, err := MinVersionIsAtLeastTargetVersion(p.unencryptedFS, p.path, v1) - require.NoError(t, err) - require.False(t, ok) - - require.NoError(t, p.SetMinVersion(v2)) - - ok, err = MinVersionIsAtLeastTargetVersion(p.unencryptedFS, p.path, v1) - require.NoError(t, err) - require.True(t, ok) + require.NoError(t, p.SetMinVersion(st.Version.BinaryVersion())) // Reading the file directly through the unencrypted MemFS should // succeed and yield the correct version. v, ok, err := getMinVersion(fs, "") require.NoError(t, err) require.True(t, ok) - require.Equal(t, v2, v) + require.Equal(t, st.Version.BinaryVersion(), v) } func fauxNewEncryptedEnvFunc( diff --git a/pkg/storage/mvcc.go b/pkg/storage/mvcc.go index 91e5c56c783d..df1df9ddbd38 100644 --- a/pkg/storage/mvcc.go +++ b/pkg/storage/mvcc.go @@ -22,7 +22,6 @@ import ( "sync" "time" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvnemesis/kvnemesisutil" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" @@ -94,8 +93,7 @@ var MVCCRangeTombstonesEnabled = settings.RegisterBoolSetting( // It requires the MVCCRangeTombstones version gate to be active, and the // setting storage.mvcc.range_tombstones.enabled to be enabled. func CanUseMVCCRangeTombstones(ctx context.Context, st *cluster.Settings) bool { - return st.Version.IsActive(ctx, clusterversion.TODODelete_V22_2MVCCRangeTombstones) && - MVCCRangeTombstonesEnabled.Get(&st.SV) + return MVCCRangeTombstonesEnabled.Get(&st.SV) } // MaxIntentsPerWriteIntentError sets maximum number of intents returned in @@ -4954,8 +4952,7 @@ func MVCCResolveWriteIntentRange( mvccIter = rw.NewMVCCIterator(MVCCKeyIterKind, iterOpts) } else { // For correctness, we need mvccIter to be consistent with engineIter. - mvccIter = newPebbleIteratorByCloning( - engineIter.GetRawIter(), iterOpts, StandardDurability, rw.SupportsRangeKeys()) + mvccIter = newPebbleIteratorByCloning(engineIter.GetRawIter(), iterOpts, StandardDurability) } iterAndBuf := GetBufUsingIter(mvccIter) defer func() { diff --git a/pkg/storage/pebble.go b/pkg/storage/pebble.go index 199c1aec7691..ca6ede5f0866 100644 --- a/pkg/storage/pebble.go +++ b/pkg/storage/pebble.go @@ -553,6 +553,8 @@ func DefaultPebbleOptions() *pebble.Options { MemTableStopWritesThreshold: 4, Merger: MVCCMerger, BlockPropertyCollectors: PebbleBlockPropertyCollectors, + // Minimum supported format. + FormatMajorVersion: pebble.FormatPrePebblev1Marked, } // Automatically flush 10s after the first range tombstone is added to a // memtable. This ensures that we can reclaim space even when there's no @@ -741,12 +743,6 @@ type Pebble struct { } asyncDone sync.WaitGroup - // supportsRangeKeys is 1 if the database supports range keys. It must - // be accessed atomically. - // - // TODO(erikgrinaker): Remove this after 22.2 when all databases support it. - supportsRangeKeys int32 - // closer is populated when the database is opened. The closer is associated // with the filesyetem closer io.Closer @@ -1093,9 +1089,6 @@ func NewPebble(ctx context.Context, cfg PebbleConfig) (p *Pebble, err error) { return nil, err } } - if p.db.FormatMajorVersion() >= pebble.FormatRangeKeys { - atomic.StoreInt32(&p.supportsRangeKeys, 1) - } return p, nil } @@ -1262,13 +1255,13 @@ func (p *Pebble) NewMVCCIterator(iterKind MVCCIterKind, opts IterOptions) MVCCIt return maybeWrapInUnsafeIter(iter) } - iter := newPebbleIterator(p.db, opts, StandardDurability, p.SupportsRangeKeys()) + iter := newPebbleIterator(p.db, opts, StandardDurability) return maybeWrapInUnsafeIter(iter) } // NewEngineIterator implements the Engine interface. func (p *Pebble) NewEngineIterator(opts IterOptions) EngineIterator { - return newPebbleIterator(p.db, opts, StandardDurability, p.SupportsRangeKeys()) + return newPebbleIterator(p.db, opts, StandardDurability) } // ConsistentIterators implements the Engine interface. @@ -1276,11 +1269,6 @@ func (p *Pebble) ConsistentIterators() bool { return false } -// SupportsRangeKeys implements the Engine interface. -func (p *Pebble) SupportsRangeKeys() bool { - return atomic.LoadInt32(&p.supportsRangeKeys) == 1 -} - // PinEngineStateForIterators implements the Engine interface. func (p *Pebble) PinEngineStateForIterators() error { return errors.AssertionFailedf( @@ -1355,7 +1343,7 @@ func (p *Pebble) ClearRawRange(start, end roachpb.Key, pointKeys, rangeKeys bool return err } } - if rangeKeys && p.SupportsRangeKeys() { + if rangeKeys { if err := p.db.RangeKeyDelete(startRaw, endRaw, pebble.Sync); err != nil { return err } @@ -1475,20 +1463,12 @@ func (p *Pebble) put(key MVCCKey, value []byte) error { // PutEngineRangeKey implements the Engine interface. func (p *Pebble) PutEngineRangeKey(start, end roachpb.Key, suffix, value []byte) error { - if !p.SupportsRangeKeys() { - return errors.Errorf("range keys not supported by Pebble database version %s", - p.db.FormatMajorVersion()) - } return p.db.RangeKeySet( EngineKey{Key: start}.Encode(), EngineKey{Key: end}.Encode(), suffix, value, pebble.Sync) } // ClearEngineRangeKey implements the Engine interface. func (p *Pebble) ClearEngineRangeKey(start, end roachpb.Key, suffix []byte) error { - if !p.SupportsRangeKeys() { - // These databases cannot contain range keys, so clearing is a noop. - return nil - } return p.db.RangeKeyUnset( EngineKey{Key: start}.Encode(), EngineKey{Key: end}.Encode(), suffix, pebble.Sync) } @@ -1522,17 +1502,7 @@ var LocalTimestampsEnabled = settings.RegisterBoolSetting( ) func shouldWriteLocalTimestamps(ctx context.Context, settings *cluster.Settings) bool { - if !LocalTimestampsEnabled.Get(&settings.SV) { - // Not enabled. - return false - } - ver := settings.Version.ActiveVersionOrEmpty(ctx) - if ver == (clusterversion.ClusterVersion{}) { - // Some tests fail to configure settings. In these cases, assume that it - // is safe to write local timestamps. - return true - } - return ver.IsActive(clusterversion.TODODelete_V22_2LocalTimestamps) + return LocalTimestampsEnabled.Get(&settings.SV) } // ShouldWriteLocalTimestamps implements the Writer interface. @@ -1984,27 +1954,22 @@ func (p *Pebble) SetMinVersion(version roachpb.Version) error { case !version.Less(clusterversion.ByKey(clusterversion.V23_1EnsurePebbleFormatSSTableValueBlocks)): formatVers = pebble.FormatSSTableValueBlocks - case !version.Less(clusterversion.ByKey(clusterversion.TODODelete_V22_2PebbleFormatPrePebblev1Marked)): + case !version.Less(clusterversion.ByKey(clusterversion.V22_2)): + // This is the earliest supported format. The code assumes that the features + // provided by this format are always available. formatVers = pebble.FormatPrePebblev1Marked - case !version.Less(clusterversion.ByKey(clusterversion.TODODelete_V22_2EnsurePebbleFormatVersionRangeKeys)): - formatVers = pebble.FormatRangeKeys - - case !version.Less(clusterversion.ByKey(clusterversion.TODODelete_V22_2PebbleFormatSplitUserKeysMarkedCompacted)): - formatVers = pebble.FormatSplitUserKeysMarkedCompacted - default: - // Corresponds to TODODelete_V22_1. - formatVers = pebble.FormatSplitUserKeysMarked + // This should never happen in production. But we tolerate tests creating + // imaginary older versions; we must still use the earliest supported + // format. + formatVers = pebble.FormatPrePebblev1Marked } if p.db.FormatMajorVersion() < formatVers { if err := p.db.RatchetFormatMajorVersion(formatVers); err != nil { return errors.Wrap(err, "ratcheting format major version") } - if formatVers >= pebble.FormatRangeKeys { - atomic.StoreInt32(&p.supportsRangeKeys, 1) - } } return nil } @@ -2141,14 +2106,13 @@ func (p *pebbleReadOnly) NewMVCCIterator(iterKind MVCCIterKind, opts IterOptions iter = &p.prefixIter } if iter.inuse { - return newPebbleIteratorByCloning(p.iter, opts, p.durability, p.SupportsRangeKeys()) + return newPebbleIteratorByCloning(p.iter, opts, p.durability) } if iter.iter != nil { iter.setOptions(opts, p.durability) } else { - iter.initReuseOrCreate( - p.parent.db, p.iter, p.iterUsed, opts, p.durability, p.SupportsRangeKeys()) + iter.initReuseOrCreate(p.parent.db, p.iter, p.iterUsed, opts, p.durability) if p.iter == nil { // For future cloning. p.iter = iter.iter @@ -2172,14 +2136,13 @@ func (p *pebbleReadOnly) NewEngineIterator(opts IterOptions) EngineIterator { iter = &p.prefixEngineIter } if iter.inuse { - return newPebbleIteratorByCloning(p.iter, opts, p.durability, p.SupportsRangeKeys()) + return newPebbleIteratorByCloning(p.iter, opts, p.durability) } if iter.iter != nil { iter.setOptions(opts, p.durability) } else { - iter.initReuseOrCreate( - p.parent.db, p.iter, p.iterUsed, opts, p.durability, p.SupportsRangeKeys()) + iter.initReuseOrCreate(p.parent.db, p.iter, p.iterUsed, opts, p.durability) if p.iter == nil { // For future cloning. p.iter = iter.iter @@ -2197,11 +2160,6 @@ func (p *pebbleReadOnly) ConsistentIterators() bool { return true } -// SupportsRangeKeys implements the Engine interface. -func (p *pebbleReadOnly) SupportsRangeKeys() bool { - return p.parent.SupportsRangeKeys() -} - // PinEngineStateForIterators implements the Engine interface. func (p *pebbleReadOnly) PinEngineStateForIterators() error { if p.iter == nil { @@ -2373,14 +2331,13 @@ func (p *pebbleSnapshot) NewMVCCIterator(iterKind MVCCIterKind, opts IterOptions return maybeWrapInUnsafeIter(iter) } - iter := MVCCIterator(newPebbleIterator( - p.snapshot, opts, StandardDurability, p.SupportsRangeKeys())) + iter := MVCCIterator(newPebbleIterator(p.snapshot, opts, StandardDurability)) return maybeWrapInUnsafeIter(iter) } // NewEngineIterator implements the Reader interface. func (p pebbleSnapshot) NewEngineIterator(opts IterOptions) EngineIterator { - return newPebbleIterator(p.snapshot, opts, StandardDurability, p.SupportsRangeKeys()) + return newPebbleIterator(p.snapshot, opts, StandardDurability) } // ConsistentIterators implements the Reader interface. @@ -2388,11 +2345,6 @@ func (p pebbleSnapshot) ConsistentIterators() bool { return true } -// SupportsRangeKeys implements the Reader interface. -func (p *pebbleSnapshot) SupportsRangeKeys() bool { - return p.parent.SupportsRangeKeys() -} - // PinEngineStateForIterators implements the Reader interface. func (p *pebbleSnapshot) PinEngineStateForIterators() error { // Snapshot already pins state, so nothing to do. diff --git a/pkg/storage/pebble_batch.go b/pkg/storage/pebble_batch.go index 8711e7a535ec..0bc9dc625263 100644 --- a/pkg/storage/pebble_batch.go +++ b/pkg/storage/pebble_batch.go @@ -177,14 +177,13 @@ func (p *pebbleBatch) NewMVCCIterator(iterKind MVCCIterKind, opts IterOptions) M handle = p.db } if iter.inuse { - return newPebbleIteratorByCloning(p.iter, opts, StandardDurability, p.SupportsRangeKeys()) + return newPebbleIteratorByCloning(p.iter, opts, StandardDurability) } if iter.iter != nil { iter.setOptions(opts, StandardDurability) } else { - iter.initReuseOrCreate( - handle, p.iter, p.iterUsed, opts, StandardDurability, p.SupportsRangeKeys()) + iter.initReuseOrCreate(handle, p.iter, p.iterUsed, opts, StandardDurability) if p.iter == nil { // For future cloning. p.iter = iter.iter @@ -211,14 +210,13 @@ func (p *pebbleBatch) NewEngineIterator(opts IterOptions) EngineIterator { handle = p.db } if iter.inuse { - return newPebbleIteratorByCloning(p.iter, opts, StandardDurability, p.SupportsRangeKeys()) + return newPebbleIteratorByCloning(p.iter, opts, StandardDurability) } if iter.iter != nil { iter.setOptions(opts, StandardDurability) } else { - iter.initReuseOrCreate( - handle, p.iter, p.iterUsed, opts, StandardDurability, p.SupportsRangeKeys()) + iter.initReuseOrCreate(handle, p.iter, p.iterUsed, opts, StandardDurability) if p.iter == nil { // For future cloning. p.iter = iter.iter @@ -235,11 +233,6 @@ func (p *pebbleBatch) ConsistentIterators() bool { return true } -// SupportsRangeKeys implements the Batch interface. -func (p *pebbleBatch) SupportsRangeKeys() bool { - return p.db.FormatMajorVersion() >= pebble.FormatRangeKeys -} - // PinEngineStateForIterators implements the Batch interface. func (p *pebbleBatch) PinEngineStateForIterators() error { if p.iter == nil { @@ -323,7 +316,7 @@ func (p *pebbleBatch) ClearRawRange(start, end roachpb.Key, pointKeys, rangeKeys return err } } - if rangeKeys && p.SupportsRangeKeys() { + if rangeKeys { if err := p.batch.RangeKeyDelete(p.buf, endRaw, pebble.Sync); err != nil { return err } @@ -450,19 +443,12 @@ func (p *pebbleBatch) PutRawMVCCRangeKey(rangeKey MVCCRangeKey, value []byte) er // PutEngineRangeKey implements the Engine interface. func (p *pebbleBatch) PutEngineRangeKey(start, end roachpb.Key, suffix, value []byte) error { - if !p.SupportsRangeKeys() { - return errors.Errorf("range keys not supported by Pebble database version %s", - p.db.FormatMajorVersion()) - } return p.batch.RangeKeySet( EngineKey{Key: start}.Encode(), EngineKey{Key: end}.Encode(), suffix, value, nil) } // ClearEngineRangeKey implements the Engine interface. func (p *pebbleBatch) ClearEngineRangeKey(start, end roachpb.Key, suffix []byte) error { - if !p.SupportsRangeKeys() { - return nil // noop - } return p.batch.RangeKeyUnset( EngineKey{Key: start}.Encode(), EngineKey{Key: end}.Encode(), suffix, nil) } diff --git a/pkg/storage/pebble_iterator.go b/pkg/storage/pebble_iterator.go index b045e8ca7d3e..39327291b104 100644 --- a/pkg/storage/pebble_iterator.go +++ b/pkg/storage/pebble_iterator.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage/pebbleiter" "github.com/cockroachdb/cockroach/pkg/util" - "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" @@ -48,10 +47,6 @@ type pebbleIterator struct { // initialized the first time an iterator's RangeKeys() method is called. mvccRangeKeyVersions []MVCCRangeKeyVersion - // True if the iterator's underlying reader supports range keys. - // - // TODO(erikgrinaker): Remove after 22.2. - supportsRangeKeys bool // Set to true to govern whether to call SeekPrefixGE or SeekGE. Skips // SSTables based on MVCC/Engine key when true. prefix bool @@ -87,11 +82,11 @@ var pebbleIterPool = sync.Pool{ // newPebbleIterator creates a new Pebble iterator for the given Pebble reader. func newPebbleIterator( - handle pebble.Reader, opts IterOptions, durability DurabilityRequirement, supportsRangeKeys bool, + handle pebble.Reader, opts IterOptions, durability DurabilityRequirement, ) *pebbleIterator { p := pebbleIterPool.Get().(*pebbleIterator) p.reusable = false // defensive - p.init(nil, opts, durability, supportsRangeKeys) + p.init(nil, opts, durability) p.iter = pebbleiter.MaybeWrap(handle.NewIter(&p.options)) return p } @@ -99,15 +94,12 @@ func newPebbleIterator( // newPebbleIteratorByCloning creates a new Pebble iterator by cloning the given // iterator and reconfiguring it. func newPebbleIteratorByCloning( - iter pebbleiter.Iterator, - opts IterOptions, - durability DurabilityRequirement, - supportsRangeKeys bool, + iter pebbleiter.Iterator, opts IterOptions, durability DurabilityRequirement, ) *pebbleIterator { var err error p := pebbleIterPool.Get().(*pebbleIterator) p.reusable = false // defensive - p.init(nil, opts, durability, supportsRangeKeys) + p.init(nil, opts, durability) p.iter, err = iter.Clone(pebble.CloneOptions{ IterOptions: &p.options, RefreshBatchView: true, @@ -125,7 +117,7 @@ func newPebbleSSTIterator( ) (*pebbleIterator, error) { p := pebbleIterPool.Get().(*pebbleIterator) p.reusable = false // defensive - p.init(nil, opts, StandardDurability, true /* supportsRangeKeys */) + p.init(nil, opts, StandardDurability) var externalIterOpts []pebble.ExternalIterOption if forwardOnly { @@ -146,10 +138,7 @@ func newPebbleSSTIterator( // reconfiguring the given iter. It is valid to pass a nil iter and then create // p.iter using p.options, to avoid redundant reconfiguration via SetOptions(). func (p *pebbleIterator) init( - iter pebbleiter.Iterator, - opts IterOptions, - durability DurabilityRequirement, - supportsRangeKeys bool, + iter pebbleiter.Iterator, opts IterOptions, durability DurabilityRequirement, ) { *p = pebbleIterator{ iter: iter, @@ -158,7 +147,6 @@ func (p *pebbleIterator) init( upperBoundBuf: p.upperBoundBuf, rangeKeyMaskingBuf: p.rangeKeyMaskingBuf, reusable: p.reusable, - supportsRangeKeys: supportsRangeKeys, } p.setOptions(opts, durability) p.inuse = true // after setOptions(), so panic won't cause reader to panic too @@ -176,14 +164,13 @@ func (p *pebbleIterator) initReuseOrCreate( clone bool, opts IterOptions, durability DurabilityRequirement, - supportsRangeKeys bool, // TODO(erikgrinaker): remove after 22.2 ) { if iter != nil && !clone { - p.init(iter, opts, durability, supportsRangeKeys) + p.init(iter, opts, durability) return } - p.init(nil, opts, durability, supportsRangeKeys) + p.init(nil, opts, durability) if iter == nil { p.iter = pebbleiter.MaybeWrap(handle.NewIter(&p.options)) } else if clone { @@ -212,19 +199,6 @@ func (p *pebbleIterator) setOptions(opts IterOptions, durability DurabilityRequi panic("can't use range key masking with prefix iterators") // very high overhead } - // If this Pebble database does not support range keys yet, fall back to - // only iterating over point keys to avoid panics. This is effectively the - // same, since a database without range key support contains no range keys, - // except in the case of RangesOnly where the iterator must always be empty. - if !p.supportsRangeKeys { - if opts.KeyTypes == IterKeyTypeRangesOnly { - opts.LowerBound = nil - opts.UpperBound = []byte{0} - } - opts.KeyTypes = IterKeyTypePointsOnly - opts.RangeKeyMaskingBelow = hlc.Timestamp{} - } - // Generate new Pebble iterator options. p.options = pebble.IterOptions{ OnlyReadGuaranteedDurable: durability == GuaranteedDurability, @@ -1008,12 +982,5 @@ func (p *pebbleIterator) assertMVCCInvariants() error { return errors.AssertionFailedf("IsPrefix() does not match prefix=%v", p.prefix) } - // Ensure !supportsRangeKeys never exposes range keys. - if !p.supportsRangeKeys { - if _, hasRange := p.HasPointAndRange(); hasRange { - return errors.AssertionFailedf("hasRange=true but supportsRangeKeys=false") - } - } - return nil } diff --git a/pkg/storage/pebble_iterator_test.go b/pkg/storage/pebble_iterator_test.go index 21ecad9cf98c..d1c9eb3b1c37 100644 --- a/pkg/storage/pebble_iterator_test.go +++ b/pkg/storage/pebble_iterator_test.go @@ -72,7 +72,7 @@ func TestPebbleIterator_Corruption(t *testing.T) { LowerBound: []byte("a"), UpperBound: []byte("z"), } - iter := newPebbleIterator(p.db, iterOpts, StandardDurability, false /* range keys */) + iter := newPebbleIterator(p.db, iterOpts, StandardDurability) // Seeking into the table catches the corruption. ok, err := iter.SeekEngineKeyGE(ek) @@ -94,7 +94,7 @@ func randStr(fill []byte, rng *rand.Rand) { func TestPebbleIterator_ExternalCorruption(t *testing.T) { defer leaktest.AfterTest(t)() - version := clusterversion.ByKey(clusterversion.TODODelete_V22_2EnsurePebbleFormatVersionRangeKeys) + version := clusterversion.ByKey(clusterversion.V22_2) st := cluster.MakeTestingClusterSettingsWithVersions(version, version, true) ctx := context.Background() rng := rand.New(rand.NewSource(timeutil.Now().UnixNano())) diff --git a/pkg/storage/pebble_test.go b/pkg/storage/pebble_test.go index 5cba54859c40..00885abe281e 100644 --- a/pkg/storage/pebble_test.go +++ b/pkg/storage/pebble_test.go @@ -24,10 +24,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" + "github.com/cockroachdb/cockroach/pkg/storage/fs" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" @@ -1287,13 +1289,17 @@ func TestIncompatibleVersion(t *testing.T) { dir: "", fs: vfs.NewMem(), } - oldVer := roachpb.Version{Major: 21, Minor: 1} - stOld := cluster.MakeTestingClusterSettingsWithVersions(oldVer, oldVer, true /* initializeVersion */) - p, err := Open(ctx, loc, stOld) + + p, err := Open(ctx, loc, cluster.MakeTestingClusterSettings()) require.NoError(t, err) p.Close() - stNew := cluster.MakeTestingClusterSettings() - _, err = Open(ctx, loc, stNew) + // Overwrite the min version file with an unsupported version. + version := roachpb.Version{Major: 21, Minor: 1} + b, err := protoutil.Marshal(&version) + require.NoError(t, err) + require.NoError(t, fs.SafeWriteToFile(loc.fs, loc.dir, MinVersionFilename, b)) + + _, err = Open(ctx, loc, cluster.MakeTestingClusterSettings()) require.ErrorContains(t, err, "is too old for running version") } diff --git a/pkg/storage/sst_writer.go b/pkg/storage/sst_writer.go index ccccbdd669d7..f9bbdb80143f 100644 --- a/pkg/storage/sst_writer.go +++ b/pkg/storage/sst_writer.go @@ -62,10 +62,7 @@ func MakeIngestionWriterOptions(ctx context.Context, cs *cluster.Settings) sstab // By default, take a conservative approach and assume we don't have newer // table features available. Upgrade to an appropriate version only if the // cluster supports it. - format := sstable.TableFormatPebblev1 // Block properties. - if cs.Version.IsActive(ctx, clusterversion.TODODelete_V22_2EnablePebbleFormatVersionRangeKeys) { - format = sstable.TableFormatPebblev2 // Range keys. - } + format := sstable.TableFormatPebblev2 if cs.Version.IsActive(ctx, clusterversion.V23_1EnablePebbleFormatSSTableValueBlocks) && ValueBlocksEnabled.Get(&cs.SV) { format = sstable.TableFormatPebblev3 @@ -81,10 +78,8 @@ func MakeBackupSSTWriter(ctx context.Context, cs *cluster.Settings, f io.Writer) // By default, take a conservative approach and assume we don't have newer // table features available. Upgrade to an appropriate version only if the // cluster supports it. - format := sstable.TableFormatPebblev1 // Block properties. - if cs.Version.IsActive(ctx, clusterversion.TODODelete_V22_2EnablePebbleFormatVersionRangeKeys) { - format = sstable.TableFormatPebblev2 // Range keys. - } + format := sstable.TableFormatPebblev2 + // TODO(sumeer): add code to use TableFormatPebblev3 after confirming that // we won't run afoul of any stale tooling that reads backup ssts. opts := DefaultPebbleOptions().MakeWriterOptions(0, format) diff --git a/pkg/storage/sst_writer_test.go b/pkg/storage/sst_writer_test.go index af2a94db22c4..b46b3641c898 100644 --- a/pkg/storage/sst_writer_test.go +++ b/pkg/storage/sst_writer_test.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/encoding" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -79,30 +78,28 @@ func makePebbleSST(t testing.TB, kvs []MVCCKeyValue, ingestion bool) []byte { func TestMakeIngestionWriterOptions(t *testing.T) { defer leaktest.AfterTest(t)() - skip.WithIssue(t, 95530, "bump minBinary to 22.2. Skip 22.2 mixed-version tests for future cleanup") - testCases := []struct { name string st *cluster.Settings want sstable.TableFormat }{ { - name: "before feature gate", + name: "22.2", st: cluster.MakeTestingClusterSettingsWithVersions( - clusterversion.ByKey(clusterversion.TODODelete_V22_2EnablePebbleFormatVersionRangeKeys-1), + clusterversion.ByKey(clusterversion.V22_2), clusterversion.TestingBinaryMinSupportedVersion, true, ), - want: sstable.TableFormatPebblev1, + want: sstable.TableFormatPebblev2, }, { - name: "at feature gate", - st: cluster.MakeTestingClusterSettingsWithVersions( - clusterversion.ByKey(clusterversion.TODODelete_V22_2EnablePebbleFormatVersionRangeKeys), - clusterversion.TestingBinaryMinSupportedVersion, - true, - ), - want: sstable.TableFormatPebblev2, + name: "with value blocks", + st: func() *cluster.Settings { + st := cluster.MakeTestingClusterSettings() + ValueBlocksEnabled.Override(context.Background(), &st.SV, true) + return st + }(), + want: sstable.TableFormatPebblev3, }, } @@ -115,45 +112,6 @@ func TestMakeIngestionWriterOptions(t *testing.T) { } } -func TestSSTWriterRangeKeysUnsupported(t *testing.T) { - defer leaktest.AfterTest(t)() - defer log.Scope(t).Close(t) - - ctx := context.Background() - - // Set up a version that doesn't support range keys. - version := clusterversion.ByKey(clusterversion.TODODelete_V22_2EnsurePebbleFormatVersionRangeKeys - 1) - st := cluster.MakeTestingClusterSettingsWithVersions(version, version, true) - - writers := map[string]SSTWriter{ - "ingestion": MakeIngestionSSTWriter(ctx, st, &MemFile{}), - "backup": MakeBackupSSTWriter(ctx, st, &MemFile{}), - } - - for name, w := range writers { - t.Run(name, func(t *testing.T) { - defer w.Close() - - rangeKey := rangeKey("a", "b", 2) - - // Put should error, but clears are noops. - err := w.PutMVCCRangeKey(rangeKey, MVCCValue{}) - require.Error(t, err) - require.Contains(t, err.Error(), "range keys not supported") - - err = w.PutEngineRangeKey(rangeKey.StartKey, rangeKey.EndKey, - EncodeMVCCTimestampSuffix(rangeKey.Timestamp), nil) - require.Error(t, err) - require.Contains(t, err.Error(), "range keys not supported") - - require.NoError(t, w.ClearMVCCRangeKey(rangeKey)) - require.NoError(t, w.ClearEngineRangeKey(rangeKey.StartKey, rangeKey.EndKey, - EncodeMVCCTimestampSuffix(rangeKey.Timestamp))) - require.NoError(t, w.ClearRawRange(rangeKey.StartKey, rangeKey.EndKey, false, true)) - }) - } -} - func TestSSTWriterRangeKeys(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) diff --git a/pkg/storage/temp_engine.go b/pkg/storage/temp_engine.go index 103037cce18c..0a5aef019f45 100644 --- a/pkg/storage/temp_engine.go +++ b/pkg/storage/temp_engine.go @@ -88,6 +88,7 @@ func newPebbleTempEngine( cfg.Opts.Comparer = pebble.DefaultComparer cfg.Opts.DisableWAL = true cfg.Opts.Experimental.KeyValidationFunc = nil + cfg.Opts.BlockPropertyCollectors = nil return nil }, ) diff --git a/pkg/upgrade/upgrademanager/manager_external_test.go b/pkg/upgrade/upgrademanager/manager_external_test.go index 64b6bd6a7cd3..fb32d3b895e7 100644 --- a/pkg/upgrade/upgrademanager/manager_external_test.go +++ b/pkg/upgrade/upgrademanager/manager_external_test.go @@ -301,23 +301,20 @@ func TestConcurrentMigrationAttempts(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - // We're going to be migrating from startKey to endKey. We end up needing - // to use real versions because the ListBetween uses the keys compiled into - // the clusterversion package. - const ( - startMajor = 42 - endMajor = 48 - ) - migrationRunCounts := make(map[clusterversion.ClusterVersion]int) + // We're going to be migrating from the current version to imaginary future versions. + current := clusterversion.ByKey(clusterversion.BinaryVersionKey) + versions := []roachpb.Version{current} + for i := int32(1); i <= 6; i++ { + v := current + v.Major += i + versions = append(versions, v) + } // RegisterKVMigration the upgrades to update the map with run counts. // There should definitely not be any concurrency of execution, so the race // detector should not fire. - var versions []roachpb.Version + migrationRunCounts := make(map[clusterversion.ClusterVersion]int) - for major := int32(startMajor); major <= endMajor; major++ { - versions = append(versions, roachpb.Version{Major: major}) - } ctx := context.Background() var active int32 // used to detect races tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{ diff --git a/pkg/upgrade/upgrades/schema_changes_external_test.go b/pkg/upgrade/upgrades/schema_changes_external_test.go index 898a600da07a..b98303aa6600 100644 --- a/pkg/upgrade/upgrades/schema_changes_external_test.go +++ b/pkg/upgrade/upgrades/schema_changes_external_test.go @@ -242,9 +242,9 @@ func testMigrationWithFailures( skip.UnderRace(t, "very slow") - // We're going to be migrating from startCV to endCV. - startCV := roachpb.Version{Major: 2041} - endCV := roachpb.Version{Major: 2042} + // We're going to be migrating from the minimum supported version to the current version. + startCV := clusterversion.ByKey(clusterversion.BinaryMinSupportedVersionKey) + endCV := clusterversion.ByKey(clusterversion.BinaryVersionKey) // The tests follows the following procedure. //