diff --git a/pkg/cmd/roachtest/cdc.go b/pkg/cmd/roachtest/cdc.go index 00c66d8d49e1..fd2787cd4e17 100644 --- a/pkg/cmd/roachtest/cdc.go +++ b/pkg/cmd/roachtest/cdc.go @@ -462,7 +462,7 @@ func runCDCSchemaRegistry(ctx context.Context, t *test, c *cluster) { func registerCDC(r *testRegistry) { useRangeFeed := true - if r.buildVersion.Compare(version.MustParse(`v2.2.0-0`)) < 0 { + if r.buildVersion.Compare(version.MustParse(`v19.1.0-0`)) < 0 { // RangeFeed is not production ready in 2.1, so run the tests with the // poller. useRangeFeed = false @@ -504,7 +504,7 @@ func registerCDC(r *testRegistry) { // When testing a 2.1 binary, we use the poller for all the other tests // and this is close enough to cdc/tpcc-1000 test to be redundant, so // skip it. - MinVersion: "v2.2.0", + MinVersion: "v19.1.0", Cluster: makeClusterSpec(4, cpu(16)), Run: func(ctx context.Context, t *test, c *cluster) { cdcBasicTest(ctx, t, c, cdcTestArgs{ @@ -576,7 +576,7 @@ func registerCDC(r *testRegistry) { }) r.Add(testSpec{ Name: "cdc/cloud-sink-gcs/rangefeed=true", - MinVersion: "v2.2.0", + MinVersion: "v19.1.0", Cluster: makeClusterSpec(4, cpu(16)), Run: func(ctx context.Context, t *test, c *cluster) { cdcBasicTest(ctx, t, c, cdcTestArgs{ @@ -609,7 +609,7 @@ func registerCDC(r *testRegistry) { }) r.Add(testSpec{ Name: "cdc/schemareg", - MinVersion: "v2.2.0", + MinVersion: "v19.1.0", Cluster: makeClusterSpec(1), Run: func(ctx context.Context, t *test, c *cluster) { runCDCSchemaRegistry(ctx, t, c) diff --git a/pkg/cmd/roachtest/clearrange.go b/pkg/cmd/roachtest/clearrange.go index ea8522d1c4eb..05d6bf5d75e2 100644 --- a/pkg/cmd/roachtest/clearrange.go +++ b/pkg/cmd/roachtest/clearrange.go @@ -26,7 +26,7 @@ func registerClearRange(r *testRegistry) { // 5h for import, 90 for the test. The import should take closer // to <3:30h but it varies. Timeout: 5*time.Hour + 90*time.Minute, - MinVersion: `v2.2.0`, + MinVersion: "v19.1.0", // This test reformats a drive to ZFS, so we don't want it reused. // TODO(andrei): Can the test itself reuse the cluster (under --count=2)? // In other words, would a OnlyTagged("clearrange") policy be good? diff --git a/pkg/cmd/roachtest/disk_stall.go b/pkg/cmd/roachtest/disk_stall.go index dcf3e3ea1fc2..30c92498bafb 100644 --- a/pkg/cmd/roachtest/disk_stall.go +++ b/pkg/cmd/roachtest/disk_stall.go @@ -34,7 +34,7 @@ func registerDiskStalledDetection(r *testRegistry) { "disk-stalled/log=%t,data=%t", affectsLogDir, affectsDataDir, ), - MinVersion: `v2.2.0`, + MinVersion: "v19.1.0", Cluster: makeClusterSpec(1), Run: func(ctx context.Context, t *test, c *cluster) { runDiskStalledDetection(ctx, t, c, affectsLogDir, affectsDataDir) diff --git a/pkg/cmd/roachtest/follower_reads.go b/pkg/cmd/roachtest/follower_reads.go index 19cbf6604932..f5f7295c54fe 100644 --- a/pkg/cmd/roachtest/follower_reads.go +++ b/pkg/cmd/roachtest/follower_reads.go @@ -31,7 +31,7 @@ func registerFollowerReads(r *testRegistry) { r.Add(testSpec{ Name: "follower-reads/nodes=3", Cluster: makeClusterSpec(3 /* nodeCount */, cpu(2), geo()), - MinVersion: "v2.2.0", + MinVersion: "v19.1.0", Run: runFollowerReadsTest, }) } diff --git a/pkg/cmd/roachtest/kv.go b/pkg/cmd/roachtest/kv.go index 77e39ea2998d..da0c62036851 100644 --- a/pkg/cmd/roachtest/kv.go +++ b/pkg/cmd/roachtest/kv.go @@ -154,8 +154,9 @@ func registerKV(r *testRegistry) { func registerKVContention(r *testRegistry) { const nodes = 4 r.Add(testSpec{ - Name: fmt.Sprintf("kv/contention/nodes=%d", nodes), - Cluster: makeClusterSpec(nodes + 1), + Name: fmt.Sprintf("kv/contention/nodes=%d", nodes), + MinVersion: "v19.2.0", + Cluster: makeClusterSpec(nodes + 1), Run: func(ctx context.Context, t *test, c *cluster) { c.Put(ctx, cockroach, "./cockroach", c.Range(1, nodes)) c.Put(ctx, workload, "./workload", c.Node(nodes+1)) diff --git a/pkg/cmd/roachtest/psycopg.go b/pkg/cmd/roachtest/psycopg.go index 8635be827698..eae145b1985a 100644 --- a/pkg/cmd/roachtest/psycopg.go +++ b/pkg/cmd/roachtest/psycopg.go @@ -255,7 +255,7 @@ func registerPsycopg(r *testRegistry) { r.Add(testSpec{ Name: "psycopg", Cluster: makeClusterSpec(1), - MinVersion: "v2.2.0", + MinVersion: "v19.1.0", Run: func(ctx context.Context, t *test, c *cluster) { runPsycopg(ctx, t, c) }, diff --git a/pkg/cmd/roachtest/schemachange.go b/pkg/cmd/roachtest/schemachange.go index 17b711a7bcfb..ce095edc726e 100644 --- a/pkg/cmd/roachtest/schemachange.go +++ b/pkg/cmd/roachtest/schemachange.go @@ -310,7 +310,7 @@ func makeIndexAddTpccTest(spec clusterSpec, warehouses int, length time.Duration Duration: length, }) }, - MinVersion: "v2.2.0", + MinVersion: "v19.1.0", } } diff --git a/pkg/cmd/roachtest/scrub.go b/pkg/cmd/roachtest/scrub.go index 8001099aff84..077ff5f4710b 100644 --- a/pkg/cmd/roachtest/scrub.go +++ b/pkg/cmd/roachtest/scrub.go @@ -83,6 +83,6 @@ func makeScrubTPCCTest( Duration: length, }) }, - MinVersion: "v2.2.0", + MinVersion: "v19.1.0", } } diff --git a/pkg/cmd/roachtest/split.go b/pkg/cmd/roachtest/split.go index 118174705fd1..097c37505f74 100644 --- a/pkg/cmd/roachtest/split.go +++ b/pkg/cmd/roachtest/split.go @@ -41,7 +41,7 @@ func registerLoadSplits(r *testRegistry) { r.Add(testSpec{ Name: fmt.Sprintf("splits/load/uniform/nodes=%d", numNodes), - MinVersion: "v2.2.0", + MinVersion: "v19.1.0", Cluster: makeClusterSpec(numNodes), Run: func(ctx context.Context, t *test, c *cluster) { // This number was determined experimentally. Often, but not always, @@ -84,7 +84,7 @@ func registerLoadSplits(r *testRegistry) { }) r.Add(testSpec{ Name: fmt.Sprintf("splits/load/sequential/nodes=%d", numNodes), - MinVersion: "v2.2.0", + MinVersion: "v19.1.0", Cluster: makeClusterSpec(numNodes), Run: func(ctx context.Context, t *test, c *cluster) { runLoadSplits(ctx, t, c, splitParams{ @@ -104,7 +104,7 @@ func registerLoadSplits(r *testRegistry) { }) r.Add(testSpec{ Name: fmt.Sprintf("splits/load/spanning/nodes=%d", numNodes), - MinVersion: "v2.2.0", + MinVersion: "v19.1.0", Cluster: makeClusterSpec(numNodes), Run: func(ctx context.Context, t *test, c *cluster) { runLoadSplits(ctx, t, c, splitParams{ diff --git a/pkg/cmd/roachtest/synctest.go b/pkg/cmd/roachtest/synctest.go index fd731dc591af..5124a1f33f81 100644 --- a/pkg/cmd/roachtest/synctest.go +++ b/pkg/cmd/roachtest/synctest.go @@ -29,7 +29,7 @@ fi r.Add(testSpec{ Name: "synctest", - MinVersion: `v2.2.0`, + MinVersion: "v19.1.0", // This test sets up a custom file system; we don't want the cluster reused. Cluster: makeClusterSpec(1, reuseNone()), Run: func(ctx context.Context, t *test, c *cluster) { diff --git a/pkg/cmd/roachtest/tpcc.go b/pkg/cmd/roachtest/tpcc.go index 30eb0ea96078..358e635590bd 100644 --- a/pkg/cmd/roachtest/tpcc.go +++ b/pkg/cmd/roachtest/tpcc.go @@ -404,7 +404,7 @@ func gceOrAws(cloud string, gce, aws int) int { } func maybeMinVersionForFixturesImport(cloud string) string { - const minVersionForFixturesImport = "v2.2.0" + const minVersionForFixturesImport = "v19.1.0" if cloud == "aws" { return minVersionForFixturesImport } diff --git a/pkg/storage/raft_log_queue_test.go b/pkg/storage/raft_log_queue_test.go index 56edc99b2a62..e9e05b85fc53 100644 --- a/pkg/storage/raft_log_queue_test.go +++ b/pkg/storage/raft_log_queue_test.go @@ -722,10 +722,11 @@ func TestSnapshotLogTruncationConstraints(t *testing.T) { func TestTruncateLog(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} + cfg := TestStoreConfig(nil) + cfg.TestingKnobs.DisableRaftLogQueue = true stopper := stop.NewStopper() defer stopper.Stop(context.TODO()) - tc.Start(t, stopper) - tc.repl.store.SetRaftLogQueueActive(false) + tc.StartWithStoreConfig(t, stopper, cfg) // Populate the log with 10 entries. Save the LastIndex after each write. var indexes []uint64 @@ -887,10 +888,11 @@ func TestTruncateLogRecompute(t *testing.T) { tc := testContext{ engine: eng, } + cfg := TestStoreConfig(nil) + cfg.TestingKnobs.DisableRaftLogQueue = true stopper := stop.NewStopper() defer stopper.Stop(context.TODO()) - tc.Start(t, stopper) - tc.repl.store.SetRaftLogQueueActive(false) + tc.StartWithStoreConfig(t, stopper, cfg) key := roachpb.Key("a") repl := tc.store.LookupReplica(keys.MustAddr(key)) diff --git a/pkg/storage/replica_test.go b/pkg/storage/replica_test.go index a05d8b8593cf..20a0543065e2 100644 --- a/pkg/storage/replica_test.go +++ b/pkg/storage/replica_test.go @@ -1285,10 +1285,10 @@ func TestReplicaTSCacheLowWaterOnLease(t *testing.T) { tc := testContext{manualClock: hlc.NewManualClock(123)} cfg := TestStoreConfig(hlc.NewClock(tc.manualClock.UnixNano, time.Nanosecond)) cfg.TestingKnobs.DisableAutomaticLeaseRenewal = true + // Disable raft log truncation which confuses this test. + cfg.TestingKnobs.DisableRaftLogQueue = true tc.StartWithStoreConfig(t, stopper, cfg) - // Disable raft log truncation which confuses this test. - tc.store.SetRaftLogQueueActive(false) secondReplica, err := tc.addBogusReplicaToRangeDesc(context.TODO()) if err != nil { t.Fatal(err) @@ -5159,15 +5159,12 @@ func TestPushTxnHeartbeatTimeout(t *testing.T) { t.Fatalf("%d: %s", i, pErr) } case roachpb.STAGING: - // TODO(nvanbenschoten): Avoid writing directly to the engine once - // there's a way to create a STAGING transaction record. - txnKey := keys.TransactionKey(pushee.Key, pushee.ID) - txnRecord := pushee.AsRecord() - txnRecord.Status = roachpb.STAGING - if err := engine.MVCCPutProto( - context.Background(), tc.repl.store.Engine(), nil, txnKey, hlc.Timestamp{}, nil, &txnRecord, - ); err != nil { - t.Fatal(err) + et, etH := endTxnArgs(pushee, true) + et.InFlightWrites = []roachpb.SequencedWrite{ + {Key: key, Sequence: 1}, + } + if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), etH, &et); pErr != nil { + t.Fatalf("%d: %s", i, pErr) } default: t.Fatalf("unexpected status: %v", test.status) @@ -6624,10 +6621,10 @@ func TestEntries(t *testing.T) { // Disable ticks to avoid quiescence, which can result in empty // entries being proposed and causing the test to flake. cfg.RaftTickInterval = math.MaxInt32 + cfg.TestingKnobs.DisableRaftLogQueue = true stopper := stop.NewStopper() defer stopper.Stop(context.TODO()) tc.StartWithStoreConfig(t, stopper, cfg) - tc.repl.store.SetRaftLogQueueActive(false) repl := tc.repl rangeID := repl.RangeID @@ -6774,10 +6771,11 @@ func TestEntries(t *testing.T) { func TestTerm(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} + tsc := TestStoreConfig(nil) + tsc.TestingKnobs.DisableRaftLogQueue = true stopper := stop.NewStopper() defer stopper.Stop(context.TODO()) - tc.Start(t, stopper) - tc.repl.store.SetRaftLogQueueActive(false) + tc.StartWithStoreConfig(t, stopper, tsc) repl := tc.repl rangeID := repl.RangeID diff --git a/pkg/storage/store.go b/pkg/storage/store.go index b43bf361ca3c..fe3b6be37af7 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -937,6 +937,9 @@ func NewStore( if cfg.TestingKnobs.DisableMergeQueue { s.setMergeQueueActive(false) } + if cfg.TestingKnobs.DisableRaftLogQueue { + s.setRaftLogQueueActive(false) + } if cfg.TestingKnobs.DisableReplicaGCQueue { s.setReplicaGCQueueActive(false) } diff --git a/pkg/storage/testing_knobs.go b/pkg/storage/testing_knobs.go index 6821f0268e69..268fc628055f 100644 --- a/pkg/storage/testing_knobs.go +++ b/pkg/storage/testing_knobs.go @@ -104,6 +104,8 @@ type StoreTestingKnobs struct { DisableGCQueue bool // DisableMergeQueue disables the merge queue. DisableMergeQueue bool + // DisableReplicateQueue disables the raft log queue. + DisableRaftLogQueue bool // DisableReplicaGCQueue disables the replica GC queue. DisableReplicaGCQueue bool // DisableReplicateQueue disables the replication queue. diff --git a/pkg/storage/txn_recovery_integration_test.go b/pkg/storage/txn_recovery_integration_test.go index a064e0cefc13..41256189087a 100644 --- a/pkg/storage/txn_recovery_integration_test.go +++ b/pkg/storage/txn_recovery_integration_test.go @@ -71,8 +71,8 @@ func TestTxnRecoveryFromStaging(t *testing.T) { // state. Include both writes as the EndTransaction's in-flight writes. et, etH := endTxnArgs(txn, true) et.InFlightWrites = []roachpb.SequencedWrite{ - {Key: keyA, Sequence: 0}, - {Key: keyB, Sequence: 1}, + {Key: keyA, Sequence: 1}, + {Key: keyB, Sequence: 2}, } etReply, pErr := client.SendWrappedWith(ctx, store.TestSender(), etH, &et) if pErr != nil {