Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: remove the storage.mvcc.range_tombstones.enabled cluster setting #96966

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion docs/tech-notes/mvcc-range-tombstones.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ not yet been upgraded to support them (in the
NB: MVCC range tombstones are not yet supported in transactions, since that
would require ranged write intents.

NB: The `storage.mvcc.range_tombstones.enabled` cluster setting and the
`storage.CanUseMVCCRangeTombstones()` helper were removed in 23.1.

## Motivation

Previously, certain operations would rely on the non-MVCC methods `ClearRange`
Expand Down Expand Up @@ -638,4 +641,4 @@ completeness:
Related RFCs:

* [Streaming Replication Between Clusters RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20201119_streaming_cluster_to_cluster.md)
* [RFC Draft - Import Rollbacks without MVCC Timestamps](https://docs.google.com/document/d/16TbkFznqbsu3mialSw6o1sxOEn1pKhhJ9HTxNdw0-WE/edit#heading=h.bpox0bmkz77i)
* [RFC Draft - Import Rollbacks without MVCC Timestamps](https://docs.google.com/document/d/16TbkFznqbsu3mialSw6o1sxOEn1pKhhJ9HTxNdw0-WE/edit#heading=h.bpox0bmkz77i)
7 changes: 0 additions & 7 deletions pkg/ccl/backupccl/tenant_backup_nemesis_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,6 @@ func TestTenantBackupWithCanceledImport(t *testing.T) {
)
defer hostClusterCleanupFn()

hostSQLDB.Exec(t, "SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true")
hostSQLDB.Exec(t, "ALTER TENANT ALL SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true")

tenant10, err := tc.Servers[0].StartTenant(ctx, base.TestTenantArgs{
TenantID: roachpb.MustMakeTenantID(10),
TestingKnobs: base.TestingKnobs{
Expand Down Expand Up @@ -146,10 +143,6 @@ func TestTenantBackupNemesis(t *testing.T) {
)
defer hostClusterCleanupFn()

// Range tombstones must be enabled for tenant backups to work correctly.
hostSQLDB.Exec(t, "SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true")
hostSQLDB.Exec(t, "ALTER TENANT ALL SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true")

tenant10, err := tc.Servers[0].StartTenant(ctx, base.TestTenantArgs{
TenantID: roachpb.MustMakeTenantID(10),
TestingKnobs: base.TestingKnobs{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,7 @@
# - roll it back it back non-mvcc
# - run an inc backup and ensure we reintroduce the table spans

# disabled to run within tenant as they don't have access to the
# storage.mvcc.range_tombstones.enabled cluster setting
new-cluster name=s1 disable-tenant
new-cluster name=s1
----

###########
Expand All @@ -36,12 +34,6 @@ exec-sql
SET CLUSTER SETTING kv.bulkio.write_metadata_sst.enabled = false;
----


exec-sql
SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = false;
----


exec-sql
EXPORT INTO CSV 'nodelocal://0/export1/' FROM SELECT * FROM baz;
----
Expand Down Expand Up @@ -243,10 +235,6 @@ SET CLUSTER SETTING jobs.debug.pausepoints = 'import.after_ingest';
----


exec-sql
SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true;
----

exec-sql
SET CLUSTER SETTING kv.bulkio.write_metadata_sst.enabled = false;
----
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,6 @@ BACKUP INTO 'nodelocal://1/cluster_backup';
new-cluster name=s2 nodes=1 share-io-dir=s1 disable-tenant
----

exec-sql
SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true;
----

# Restore's OnFailOrCancel deletes descriptors which requires us to wait for no
# versions of that descriptor to be leased before proceeding. Since our test fails
# the job after the descriptors have been published, it's possible for them to be leased
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# disabled for tenants as they can't set storage.mvcc.range_tombstones.enabled
new-cluster name=s1 nodes=1 disable-tenant
new-cluster name=s1 nodes=1
----

subtest restore-retry
Expand All @@ -20,10 +19,6 @@ BACKUP INTO 'nodelocal://1/cluster_backup';
new-cluster name=s2 nodes=1 share-io-dir=s1 disable-tenant
----

exec-sql
SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true;
----

exec-sql
SELECT crdb_internal.set_vmodule('lease=3');
----
Expand Down
1 change: 0 additions & 1 deletion pkg/ccl/changefeedccl/changefeed_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3061,7 +3061,6 @@ func TestChangefeedFailOnTableOffline(t *testing.T) {
cdcTestNamedWithSystem(t, "reverted import fails changefeed with earlier cursor", func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) {
sysSQLDB := sqlutils.MakeSQLRunner(s.SystemDB)
sysSQLDB.Exec(t, "SET CLUSTER SETTING kv.bulk_io_write.small_write_size = '1'")
sysSQLDB.Exec(t, "SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true")

sqlDB := sqlutils.MakeSQLRunner(s.DB)
sqlDB.Exec(t, `CREATE TABLE for_import (a INT PRIMARY KEY, b INT)`)
Expand Down
2 changes: 0 additions & 2 deletions pkg/cmd/roachtest/tests/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -1145,8 +1145,6 @@ func runBackupMVCCRangeTombstones(ctx context.Context, t test.Test, c cluster.Cl
t.Status("configuring cluster")
_, err := conn.Exec(`SET CLUSTER SETTING kv.bulk_ingest.max_index_buffer_size = '2gb'`)
require.NoError(t, err)
_, err = conn.Exec(`SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = 't'`)
require.NoError(t, err)
_, err = conn.Exec(`SET CLUSTER SETTING server.debug.default_vmodule = 'txn=2,sst_batcher=4,
revert=2'`)
require.NoError(t, err)
Expand Down
80 changes: 30 additions & 50 deletions pkg/cmd/roachtest/tests/clearrange.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,63 +27,43 @@ import (

func registerClearRange(r registry.Registry) {
for _, checks := range []bool{true, false} {
for _, rangeTombstones := range []bool{true, false} {
checks := checks
rangeTombstones := rangeTombstones
r.Add(registry.TestSpec{
Name: fmt.Sprintf(`clearrange/checks=%t/rangeTs=%t`, checks, rangeTombstones),
Owner: registry.OwnerStorage,
// 5h for import, 90 for the test. The import should take closer
// to <3:30h but it varies.
Timeout: 5*time.Hour + 90*time.Minute,
Cluster: r.MakeClusterSpec(10, spec.CPU(16)),
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
runClearRange(ctx, t, c, checks, rangeTombstones)
},
})

// Using a separate clearrange test on zfs instead of randomly
// using the same test, cause the Timeout might be different,
// and may need to be tweaked.
r.Add(registry.TestSpec{
Name: fmt.Sprintf(`clearrange/zfs/checks=%t/rangeTs=%t`, checks, rangeTombstones),
Skip: "Consistently failing. See #68716 context.",
Owner: registry.OwnerStorage,
// 5h for import, 120 for the test. The import should take closer
// to <3:30h but it varies.
Timeout: 5*time.Hour + 120*time.Minute,
Cluster: r.MakeClusterSpec(10, spec.CPU(16), spec.SetFileSystem(spec.Zfs)),
EncryptionSupport: registry.EncryptionMetamorphic,
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
runClearRange(ctx, t, c, checks, rangeTombstones)
},
})
}
checks := checks
r.Add(registry.TestSpec{
Name: fmt.Sprintf(`clearrange/checks=%t`, checks),
Owner: registry.OwnerStorage,
// 5h for import, 90 for the test. The import should take closer
// to <3:30h but it varies.
Timeout: 5*time.Hour + 90*time.Minute,
Cluster: r.MakeClusterSpec(10, spec.CPU(16)),
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
runClearRange(ctx, t, c, checks)
},
})

// Using a separate clearrange test on zfs instead of randomly
// using the same test, cause the Timeout might be different,
// and may need to be tweaked.
r.Add(registry.TestSpec{
Name: fmt.Sprintf(`clearrange/zfs/checks=%t`, checks),
Skip: "Consistently failing. See #68716 context.",
Owner: registry.OwnerStorage,
// 5h for import, 120 for the test. The import should take closer
// to <3:30h but it varies.
Timeout: 5*time.Hour + 120*time.Minute,
Cluster: r.MakeClusterSpec(10, spec.CPU(16), spec.SetFileSystem(spec.Zfs)),
EncryptionSupport: registry.EncryptionMetamorphic,
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
runClearRange(ctx, t, c, checks)
},
})
}
}

func runClearRange(
ctx context.Context,
t test.Test,
c cluster.Cluster,
aggressiveChecks bool,
useRangeTombstones bool,
) {
func runClearRange(ctx context.Context, t test.Test, c cluster.Cluster, aggressiveChecks bool) {
c.Put(ctx, t.Cockroach(), "./cockroach")

t.Status("restoring fixture")
c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings())

{
conn := c.Conn(ctx, t.L(), 1)
if _, err := conn.ExecContext(ctx,
`SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = $1`,
useRangeTombstones); err != nil {
t.Fatal(err)
}
conn.Close()
}

// NB: on a 10 node cluster, this should take well below 3h.
tBegin := timeutil.Now()
c.Run(ctx, c.Node(1), "./cockroach", "workload", "fixtures", "import", "bank",
Expand Down
33 changes: 10 additions & 23 deletions pkg/cmd/roachtest/tests/import_cancellation.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,22 +29,18 @@ import (
)

func registerImportCancellation(r registry.Registry) {
for _, rangeTombstones := range []bool{true, false} {
r.Add(registry.TestSpec{
Name: fmt.Sprintf(`import-cancellation/rangeTs=%t`, rangeTombstones),
Owner: registry.OwnerDisasterRecovery,
Timeout: 4 * time.Hour,
Cluster: r.MakeClusterSpec(6, spec.CPU(32)),
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
runImportCancellation(ctx, t, c, rangeTombstones)
},
})
}
r.Add(registry.TestSpec{
Name: `import-cancellation`,
Owner: registry.OwnerDisasterRecovery,
Timeout: 4 * time.Hour,
Cluster: r.MakeClusterSpec(6, spec.CPU(32)),
Run: func(ctx context.Context, t test.Test, c cluster.Cluster) {
runImportCancellation(ctx, t, c)
},
})
}

func runImportCancellation(
ctx context.Context, t test.Test, c cluster.Cluster, rangeTombstones bool,
) {
func runImportCancellation(ctx context.Context, t test.Test, c cluster.Cluster) {
c.Put(ctx, t.Cockroach(), "./cockroach")
c.Put(ctx, t.DeprecatedWorkload(), "./workload") // required for tpch
c.Start(ctx, t.L(), option.DefaultStartOpts(), install.MakeClusterSettings())
Expand Down Expand Up @@ -76,15 +72,6 @@ func runImportCancellation(
if _, err := conn.Exec(`SET CLUSTER SETTING kv.bulk_ingest.max_index_buffer_size = '2gb'`); err != nil {
t.Fatal(err)
}
// Enable MVCC Range tombstones, if required.
rtEnable := "f"
if rangeTombstones {
rtEnable = "t"
}
stmt := fmt.Sprintf(`SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = '%s'`, rtEnable)
if _, err := conn.Exec(stmt); err != nil {
t.Fatal(err)
}
// Increase AddSSTable concurrency to speed up the imports. Otherwise the
// lineitem (the largest tpch table) IMPORT will extend the test duration
// significantly.
Expand Down
1 change: 0 additions & 1 deletion pkg/cmd/roachtest/tests/mvcc_gc.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ func runMVCCGC(ctx context.Context, t test.Test, c cluster.Cluster) {
execSQLOrFail(fmt.Sprintf(`SET CLUSTER SETTING %s = $1`, name), value)
}

setClusterSetting("storage.mvcc.range_tombstones.enabled", true)
setClusterSetting("kv.protectedts.poll_interval", "5s")
setClusterSetting("kv.mvcc_gc.queue_interval", "0s")

Expand Down
1 change: 1 addition & 0 deletions pkg/settings/registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,7 @@ var retiredSettings = map[string]struct{}{
"sql.distsql.drain.cancel_after_wait.enabled": {},
"changefeed.active_protected_timestamps.enabled": {},
"jobs.scheduler.single_node_scheduler.enabled": {},
"storage.mvcc.range_tombstones.enabled": {},
// renamed.
"spanconfig.host_coalesce_adjacent.enabled": {},
"sql.defaults.experimental_stream_replication.enabled": {},
Expand Down
1 change: 0 additions & 1 deletion pkg/sql/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,6 @@ go_library(
"//pkg/sql/syntheticprivilegecache",
"//pkg/sql/types",
"//pkg/sql/vtable",
"//pkg/storage",
"//pkg/storage/enginepb",
"//pkg/testutils/serverutils",
"//pkg/upgrade",
Expand Down
1 change: 0 additions & 1 deletion pkg/sql/gcjob/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ go_library(
"//pkg/sql/pgwire/pgcode",
"//pkg/sql/pgwire/pgerror",
"//pkg/sql/sqlerrors",
"//pkg/storage",
"//pkg/util/admission/admissionpb",
"//pkg/util/hlc",
"//pkg/util/log",
Expand Down
19 changes: 5 additions & 14 deletions pkg/sql/gcjob/gc_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/isql"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
Expand Down Expand Up @@ -307,7 +306,7 @@ func (r schemaChangeGCResumer) Resume(ctx context.Context, execCtx interface{})
return err
}

if !shouldUseDelRange(ctx, details, execCfg.Settings, execCfg.GCJobTestingKnobs) {
if !shouldUseDelRange(ctx, details, execCfg.Settings) {
return r.legacyWaitAndClearTableData(ctx, execCfg, details, progress)
}
return r.deleteDataAndWaitForGC(ctx, execCfg, details, progress)
Expand Down Expand Up @@ -432,7 +431,7 @@ func (r schemaChangeGCResumer) legacyWaitAndClearTableData(

// Now that we've registered to be notified, check to see if we raced
// with the new version becoming active.
if shouldUseDelRange(ctx, details, execCfg.Settings, execCfg.GCJobTestingKnobs) {
if shouldUseDelRange(ctx, details, execCfg.Settings) {
return r.deleteDataAndWaitForGC(ctx, execCfg, details, progress)
}

Expand All @@ -448,9 +447,7 @@ func (r schemaChangeGCResumer) legacyWaitAndClearTableData(
}
// We'll be notified if the new version becomes active, so check and
// see if it's now time to change to the new protocol.
if shouldUseDelRange(
ctx, details, execCfg.Settings, execCfg.GCJobTestingKnobs,
) {
if shouldUseDelRange(ctx, details, execCfg.Settings) {
return r.deleteDataAndWaitForGC(ctx, execCfg, details, progress)
}

Expand Down Expand Up @@ -500,17 +497,11 @@ func (r schemaChangeGCResumer) legacyWaitAndClearTableData(
}

func shouldUseDelRange(
ctx context.Context,
details *jobspb.SchemaChangeGCDetails,
s *cluster.Settings,
knobs *sql.GCJobTestingKnobs,
ctx context.Context, details *jobspb.SchemaChangeGCDetails, s *cluster.Settings,
) bool {
// TODO(ajwerner): Adopt the DeleteRange protocol for tenant GC.
return details.Tenant == nil &&
s.Version.IsActive(ctx, clusterversion.TODODelete_V22_2UseDelRangeInGCJob) &&
(storage.CanUseMVCCRangeTombstones(ctx, s) ||
// Allow this testing knob to override the storage setting, for convenience.
knobs.SkipWaitingForMVCCGC)
s.Version.IsActive(ctx, clusterversion.TODODelete_V22_2UseDelRangeInGCJob)
}

// waitForWork waits until there is work to do given the gossipUpDateC, the
Expand Down
1 change: 0 additions & 1 deletion pkg/sql/gcjob/gcjobnotifier/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ go_library(
"//pkg/keys",
"//pkg/roachpb",
"//pkg/settings/cluster",
"//pkg/storage",
"//pkg/util/log/logcrash",
"//pkg/util/stop",
"//pkg/util/syncutil",
Expand Down
10 changes: 0 additions & 10 deletions pkg/sql/gcjob/gcjobnotifier/notifier.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/log/logcrash"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
Expand Down Expand Up @@ -155,21 +154,12 @@ func (n *Notifier) run(_ context.Context) {
versionSettingChanged <- struct{}{}
}
})
tombstonesEnableChanges := make(chan struct{}, 1)
storage.MVCCRangeTombstonesEnabled.SetOnChange(&n.settings.SV, func(ctx context.Context) {
select {
case tombstonesEnableChanges <- struct{}{}:
default:
}
})
for {
select {
case <-n.stopper.ShouldQuiesce():
return
case <-versionSettingChanged:
n.notify()
case <-tombstonesEnableChanges:
n.notify()
case <-systemConfigUpdateCh:
n.maybeNotify()
}
Expand Down
1 change: 0 additions & 1 deletion pkg/sql/gcjob_test/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ go_test(
"//pkg/sql/gcjob/gcjobnotifier",
"//pkg/sql/isql",
"//pkg/sql/sem/catid",
"//pkg/storage",
"//pkg/testutils",
"//pkg/testutils/jobutils",
"//pkg/testutils/serverutils",
Expand Down
Loading