diff --git a/docs/generated/settings/settings-for-tenants.txt b/docs/generated/settings/settings-for-tenants.txt
index 063c69ddee42..9d417e84938a 100644
--- a/docs/generated/settings/settings-for-tenants.txt
+++ b/docs/generated/settings/settings-for-tenants.txt
@@ -5,6 +5,7 @@ admission.sql_sql_response.enabled boolean true when true, work performed by the
bulkio.backup.file_size byte size 128 MiB target size for individual data files produced during BACKUP
bulkio.backup.read_timeout duration 5m0s amount of time after which a read attempt is considered timed out, which causes the backup to fail
bulkio.backup.read_with_priority_after duration 1m0s amount of time since the read-as-of time above which a BACKUP should use priority when retrying reads
+bulkio.backup.resolve_destination_in_job.enabled boolean false defer the interaction with the external storage used to resolve backup destination until the job starts
bulkio.stream_ingestion.minimum_flush_interval duration 5s the minimum timestamp between flushes; flushes may still occur if internal buffers fill up
changefeed.node_throttle_config string specifies node level throttling configuration for all changefeeeds
cloudstorage.http.custom_ca string custom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storage
diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html
index 8d535d4ea4a8..e7a5246acbee 100644
--- a/docs/generated/settings/settings.html
+++ b/docs/generated/settings/settings.html
@@ -8,6 +8,7 @@
bulkio.backup.file_size | byte size | 128 MiB | target size for individual data files produced during BACKUP |
bulkio.backup.read_timeout | duration | 5m0s | amount of time after which a read attempt is considered timed out, which causes the backup to fail |
bulkio.backup.read_with_priority_after | duration | 1m0s | amount of time since the read-as-of time above which a BACKUP should use priority when retrying reads |
+bulkio.backup.resolve_destination_in_job.enabled | boolean | false | defer the interaction with the external storage used to resolve backup destination until the job starts |
bulkio.stream_ingestion.minimum_flush_interval | duration | 5s | the minimum timestamp between flushes; flushes may still occur if internal buffers fill up |
changefeed.node_throttle_config | string |
| specifies node level throttling configuration for all changefeeeds |
cloudstorage.http.custom_ca | string |
| custom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storage |
diff --git a/pkg/ccl/backupccl/backup_job.go b/pkg/ccl/backupccl/backup_job.go
index bf2e457d4104..452d20baf2e1 100644
--- a/pkg/ccl/backupccl/backup_job.go
+++ b/pkg/ccl/backupccl/backup_job.go
@@ -36,6 +36,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
+ "github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/mon"
@@ -376,6 +377,80 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error {
details := b.job.Details().(jobspb.BackupDetails)
p := execCtx.(sql.JobExecContext)
+ var backupManifest *BackupManifest
+
+ // If planning didn't resolve the external destination, then we need to now.
+ if details.URI == "" {
+ initialDetails := details
+ backupDetails, m, err := getBackupDetailAndManifest(
+ ctx, p.ExecCfg(), p.ExtendedEvalContext().Txn, details, p.User(),
+ )
+ if err != nil {
+ return err
+ }
+ details = backupDetails
+ backupManifest = &m
+
+ if len(backupManifest.Spans) > 0 && p.ExecCfg().Codec.ForSystemTenant() {
+ protectedtsID := uuid.MakeV4()
+ details.ProtectedTimestampRecord = &protectedtsID
+
+ if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
+ return protectTimestampForBackup(
+ ctx, p.ExecCfg(), txn, b.job.ID(), m, details,
+ )
+ }); err != nil {
+ return err
+ }
+ }
+
+ if err := writeBackupManifestCheckpoint(
+ ctx, b.job.ID(), backupDetails, backupManifest, p.ExecCfg(), p.User(),
+ ); err != nil {
+ return err
+ }
+
+ if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
+ return planSchedulePTSChaining(ctx, p.ExecCfg(), txn, &details, b.job.CreatedBy())
+ }); err != nil {
+ return err
+ }
+
+ // The description picked during original planning might still say "LATEST",
+ // if resolving that to the actual directory only just happened above here.
+ // Ideally we'd re-render the description now that we know the subdir, but
+ // we don't have backup AST node anymore to easily call the rendering func.
+ // Instead we can just do a bit of dirty string replacement iff there is one
+ // "INTO 'LATEST' IN" (if there's >1, somenoe has a weird table/db names and
+ // we should just leave the description as-is, since it is just for humans).
+ description := b.job.Payload().Description
+ const unresolvedText = "INTO 'LATEST' IN"
+ if initialDetails.Destination.Subdir == "LATEST" && strings.Count(description, unresolvedText) == 1 {
+ description = strings.ReplaceAll(description, unresolvedText, fmt.Sprintf("INTO '%s' IN", details.Destination.Subdir))
+ }
+
+ // Update the job payload (non-volatile job definition) once, with the now
+ // resolved destination, updated description, etc. If we resume again we'll
+ // skip this whole block so this isn't an excessive update of payload.
+ if err := b.job.Update(ctx, nil, func(txn *kv.Txn, md jobs.JobMetadata, ju *jobs.JobUpdater) error {
+ if err := md.CheckRunningOrReverting(); err != nil {
+ return err
+ }
+ md.Payload.Details = jobspb.WrapPayloadDetails(details)
+ md.Payload.Description = description
+ ju.UpdatePayload(md.Payload)
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ // Collect telemetry, once per backup after resolving its destination.
+ lic := utilccl.CheckEnterpriseEnabled(
+ p.ExecCfg().Settings, p.ExecCfg().ClusterID(), p.ExecCfg().Organization(), "",
+ ) != nil
+ collectTelemetry(m, details, details, lic)
+ }
+
// For all backups, partitioned or not, the main BACKUP manifest is stored at
// details.URI.
defaultConf, err := cloud.ExternalStorageConfFromURI(details.URI, p.User())
@@ -424,16 +499,20 @@ func (b *backupResumer) Resume(ctx context.Context, execCtx interface{}) error {
mem := p.ExecCfg().RootMemoryMonitor.MakeBoundAccount()
defer mem.Close(ctx)
- backupManifest, memSize, err := b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details)
- if err != nil {
- return err
- }
+ var memSize int64
defer func() {
if memSize != 0 {
mem.Shrink(ctx, memSize)
}
}()
+ if backupManifest == nil || util.ConstantWithMetamorphicTestBool("backup-read-manifest", false) {
+ backupManifest, memSize, err = b.readManifestOnResume(ctx, &mem, p.ExecCfg(), defaultStore, details)
+ if err != nil {
+ return err
+ }
+ }
+
statsCache := p.ExecCfg().TableStatsCache
// We retry on pretty generic failures -- any rpc error. If a worker node were
// to restart, it would produce this kind of error, but there may be other
diff --git a/pkg/ccl/backupccl/backup_planning.go b/pkg/ccl/backupccl/backup_planning.go
index aa7afa05cbfe..ee433efaba5b 100644
--- a/pkg/ccl/backupccl/backup_planning.go
+++ b/pkg/ccl/backupccl/backup_planning.go
@@ -47,6 +47,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
+ "github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/interval"
"github.com/cockroachdb/cockroach/pkg/util/log"
@@ -88,6 +89,13 @@ var featureBackupEnabled = settings.RegisterBoolSetting(
featureflag.FeatureFlagEnabledDefault,
).WithPublic()
+var resolveDuringExec = settings.RegisterBoolSetting(
+ settings.TenantWritable,
+ "bulkio.backup.resolve_destination_in_job.enabled",
+ "defer the interaction with the external storage used to resolve backup destination until the job starts",
+ util.ConstantWithMetamorphicTestBool("backup-resolve-exec", false),
+).WithPublic()
+
func (p *backupKMSEnv) ClusterSettings() *cluster.Settings {
return p.settings
}
@@ -785,6 +793,75 @@ func backupPlanHook(
initialDetails.SpecificTenantIds = []roachpb.TenantID{backupStmt.Targets.Tenant}
}
+ jobID := p.ExecCfg().JobRegistry.MakeJobID()
+
+ // TODO(dt): replace setting with cluster version gate.
+ if resolveDuringExec.Get(p.ExecCfg().SV()) {
+ description, err := backupJobDescription(p,
+ backupStmt.Backup, to, incrementalFrom,
+ encryptionParams.RawKmsUris,
+ initialDetails.Destination.Subdir,
+ initialDetails.Destination.IncrementalStorage,
+ )
+ if err != nil {
+ return err
+ }
+ jr := jobs.Record{
+ Description: description,
+ Details: initialDetails,
+ Progress: jobspb.BackupProgress{},
+ CreatedBy: backupStmt.CreatedByInfo,
+ Username: p.User(),
+ DescriptorIDs: func() (sqlDescIDs []descpb.ID) {
+ for i := range targetDescs {
+ sqlDescIDs = append(sqlDescIDs, targetDescs[i].GetID())
+ }
+ return sqlDescIDs
+ }(),
+ }
+ plannerTxn := p.ExtendedEvalContext().Txn
+
+ if backupStmt.Options.Detached {
+ // When running inside an explicit transaction, we simply create the job
+ // record. We do not wait for the job to finish.
+ _, err := p.ExecCfg().JobRegistry.CreateAdoptableJobWithTxn(
+ ctx, jr, jobID, plannerTxn)
+ if err != nil {
+ return err
+ }
+ resultsCh <- tree.Datums{tree.NewDInt(tree.DInt(jobID))}
+ return nil
+ }
+ var sj *jobs.StartableJob
+ if err := func() (err error) {
+ defer func() {
+ if err == nil || sj == nil {
+ return
+ }
+ if cleanupErr := sj.CleanupOnRollback(ctx); cleanupErr != nil {
+ log.Errorf(ctx, "failed to cleanup job: %v", cleanupErr)
+ }
+ }()
+ if err := p.ExecCfg().JobRegistry.CreateStartableJobWithTxn(ctx, &sj, jobID, plannerTxn, jr); err != nil {
+ return err
+ }
+ // We commit the transaction here so that the job can be started. This
+ // is safe because we're in an implicit transaction. If we were in an
+ // explicit transaction the job would have to be run with the detached
+ // option and would have been handled above.
+ return plannerTxn.Commit(ctx)
+ }(); err != nil {
+ return err
+ }
+ if err := sj.Start(ctx); err != nil {
+ return err
+ }
+ if err := sj.AwaitCompletion(ctx); err != nil {
+ return err
+ }
+ return sj.ReportExecutionResults(ctx, resultsCh)
+ }
+
// TODO(dt): move this to job execution phase.
backupDetails, backupManifest, err := getBackupDetailAndManifest(
ctx, p.ExecCfg(), p.ExtendedEvalContext().Txn, initialDetails, p.User(),
@@ -798,6 +875,10 @@ func backupPlanHook(
return err
}
+ // We create the job record in the planner's transaction to ensure that
+ // the job record creation happens transactionally.
+ plannerTxn := p.ExtendedEvalContext().Txn
+
// Write backup manifest into a temporary checkpoint file.
// This accomplishes 2 purposes:
// 1. Persists large state needed for backup job completion.
@@ -807,23 +888,7 @@ func backupPlanHook(
//
// TODO (pbardea): For partitioned backups, also add verification for other
// stores we are writing to in addition to the default.
- doWriteBackupManifestCheckpoint := func(ctx context.Context, jobID jobspb.JobID) error {
- defaultStore, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, backupDetails.URI, p.User())
- if err != nil {
- return err
- }
- defer defaultStore.Close()
-
- if err := writeBackupManifest(
- ctx, p.ExecCfg().Settings, defaultStore, tempCheckpointFileNameForJob(jobID),
- backupDetails.EncryptionOptions, &backupManifest,
- ); err != nil {
- return errors.Wrapf(err, "writing checkpoint file")
- }
- return nil
- }
-
- if err := planSchedulePTSChaining(ctx, p, &backupDetails, backupStmt); err != nil {
+ if err := planSchedulePTSChaining(ctx, p.ExecCfg(), plannerTxn, &backupDetails, backupStmt.CreatedByInfo); err != nil {
return err
}
@@ -852,14 +917,8 @@ func backupPlanHook(
p.ExecCfg().Settings, p.ExecCfg().ClusterID(), p.ExecCfg().Organization(), "",
) != nil
- // We create the job record in the planner's transaction to ensure that
- // the job record creation happens transactionally.
- plannerTxn := p.ExtendedEvalContext().Txn
-
- jobID := p.ExecCfg().JobRegistry.MakeJobID()
-
if err := protectTimestampForBackup(
- ctx, p, plannerTxn, jobID, backupManifest, backupDetails,
+ ctx, p.ExecCfg(), plannerTxn, jobID, backupManifest, backupDetails,
); err != nil {
return err
}
@@ -873,7 +932,9 @@ func backupPlanHook(
return err
}
- if err := doWriteBackupManifestCheckpoint(ctx, jobID); err != nil {
+ if err := writeBackupManifestCheckpoint(
+ ctx, jobID, backupDetails, &backupManifest, p.ExecCfg(), p.User(),
+ ); err != nil {
return err
}
@@ -897,7 +958,9 @@ func backupPlanHook(
if err := p.ExecCfg().JobRegistry.CreateStartableJobWithTxn(ctx, &sj, jobID, plannerTxn, jr); err != nil {
return err
}
- if err := doWriteBackupManifestCheckpoint(ctx, jobID); err != nil {
+ if err := writeBackupManifestCheckpoint(
+ ctx, jobID, backupDetails, &backupManifest, p.ExecCfg(), p.User(),
+ ); err != nil {
return err
}
@@ -1007,6 +1070,29 @@ func getScheduledBackupExecutionArgsFromSchedule(
return sj, args, nil
}
+func writeBackupManifestCheckpoint(
+ ctx context.Context,
+ jobID jobspb.JobID,
+ backupDetails jobspb.BackupDetails,
+ backupManifest *BackupManifest,
+ execCfg *sql.ExecutorConfig,
+ user security.SQLUsername,
+) error {
+ defaultStore, err := execCfg.DistSQLSrv.ExternalStorageFromURI(ctx, backupDetails.URI, user)
+ if err != nil {
+ return err
+ }
+ defer defaultStore.Close()
+
+ if err := writeBackupManifest(
+ ctx, execCfg.Settings, defaultStore, tempCheckpointFileNameForJob(jobID),
+ backupDetails.EncryptionOptions, backupManifest,
+ ); err != nil {
+ return errors.Wrapf(err, "writing checkpoint file")
+ }
+ return nil
+}
+
// planSchedulePTSChaining populates backupDetails with information relevant to
// the chaining of protected timestamp records between scheduled backups.
// Depending on whether backupStmt is a full or incremental backup, we populate
@@ -1014,23 +1100,24 @@ func getScheduledBackupExecutionArgsFromSchedule(
// completion of the backup job.
func planSchedulePTSChaining(
ctx context.Context,
- p sql.PlanHookState,
+ execCfg *sql.ExecutorConfig,
+ txn *kv.Txn,
backupDetails *jobspb.BackupDetails,
- backupStmt *annotatedBackupStatement,
+ createdBy *jobs.CreatedByInfo,
) error {
env := scheduledjobs.ProdJobSchedulerEnv
- if knobs, ok := p.ExecCfg().DistSQLSrv.TestingKnobs.JobsTestingKnobs.(*jobs.TestingKnobs); ok {
+ if knobs, ok := execCfg.DistSQLSrv.TestingKnobs.JobsTestingKnobs.(*jobs.TestingKnobs); ok {
if knobs.JobSchedulerEnv != nil {
env = knobs.JobSchedulerEnv
}
}
// If this is not a scheduled backup, we do not chain pts records.
- if backupStmt.CreatedByInfo == nil || backupStmt.CreatedByInfo.Name != jobs.CreatedByScheduledJobs {
+ if createdBy == nil || createdBy.Name != jobs.CreatedByScheduledJobs {
return nil
}
- _, args, err := getScheduledBackupExecutionArgsFromSchedule(ctx, env,
- p.ExtendedEvalContext().Txn, p.ExecCfg().InternalExecutor, backupStmt.CreatedByInfo.ID)
+ _, args, err := getScheduledBackupExecutionArgsFromSchedule(
+ ctx, env, txn, execCfg.InternalExecutor, createdBy.ID)
if err != nil {
return err
}
@@ -1048,8 +1135,8 @@ func planSchedulePTSChaining(
return nil
}
- _, incArgs, err := getScheduledBackupExecutionArgsFromSchedule(ctx, env,
- p.ExtendedEvalContext().Txn, p.ExecCfg().InternalExecutor, args.DependentScheduleID)
+ _, incArgs, err := getScheduledBackupExecutionArgsFromSchedule(
+ ctx, env, txn, execCfg.InternalExecutor, args.DependentScheduleID)
if err != nil {
// If we are unable to resolve the dependent incremental schedule (it
// could have been dropped) we do not need to perform any chaining.
@@ -1240,7 +1327,7 @@ func getProtectedTimestampTargetForBackup(backupManifest BackupManifest) *ptpb.T
func protectTimestampForBackup(
ctx context.Context,
- p sql.PlanHookState,
+ execCfg *sql.ExecutorConfig,
txn *kv.Txn,
jobID jobspb.JobID,
backupManifest BackupManifest,
@@ -1260,7 +1347,7 @@ func protectTimestampForBackup(
target := getProtectedTimestampTargetForBackup(backupManifest)
rec := jobsprotectedts.MakeRecord(*backupDetails.ProtectedTimestampRecord, int64(jobID),
tsToProtect, backupManifest.Spans, jobsprotectedts.Jobs, target)
- err := p.ExecCfg().ProtectedTimestampProvider.Protect(ctx, txn, rec)
+ err := execCfg.ProtectedTimestampProvider.Protect(ctx, txn, rec)
if err != nil {
return err
}
diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/BUILD.bazel b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/BUILD.bazel
index 59a04956b03c..9923ad6f5f49 100644
--- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/BUILD.bazel
+++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/BUILD.bazel
@@ -13,7 +13,6 @@ go_test(
"//pkg/ccl/partitionccl",
"//pkg/ccl/utilccl",
"//pkg/jobs",
- "//pkg/keys",
"//pkg/roachpb",
"//pkg/security",
"//pkg/security/securitytest",
diff --git a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go
index 48f211dc7519..b728bc31b5cc 100644
--- a/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go
+++ b/pkg/ccl/spanconfigccl/spanconfigreconcilerccl/datadriven_test.go
@@ -19,7 +19,6 @@ import (
_ "github.com/cockroachdb/cockroach/pkg/ccl/kvccl/kvtenantccl"
_ "github.com/cockroachdb/cockroach/pkg/ccl/partitionccl"
"github.com/cockroachdb/cockroach/pkg/jobs"
- "github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/spanconfig"
"github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigtestutils"
@@ -192,7 +191,7 @@ func TestDataDriven(t *testing.T) {
return nil
})
records, err := kvAccessor.GetSpanConfigRecords(
- ctx, []spanconfig.Target{spanconfig.MakeTargetFromSpan(keys.EverythingSpan)},
+ ctx, spanconfig.TestingEntireSpanConfigurationStateTargets(),
)
require.NoError(t, err)
sort.Slice(records, func(i, j int) bool {
diff --git a/pkg/jobs/adopt.go b/pkg/jobs/adopt.go
index 6adf59ce3d1c..6172791e09ff 100644
--- a/pkg/jobs/adopt.go
+++ b/pkg/jobs/adopt.go
@@ -141,8 +141,8 @@ COALESCE(last_run, created) + least(
resumeQueryBaseCols = "status, payload, progress, crdb_internal.sql_liveness_is_alive(claim_session_id)"
resumeQueryWhereBase = `id = $1 AND claim_session_id = $2`
- resumeQueryWithBackoff = `SELECT ` + resumeQueryBaseCols + `, ` + canRunClause + ` AS can_run` +
- ` FROM system.jobs, ` + canRunArgs + " WHERE " + resumeQueryWhereBase
+ resumeQueryWithBackoff = `SELECT ` + resumeQueryBaseCols + `, ` + canRunClause + ` AS can_run,` +
+ ` created_by_type, created_by_id FROM system.jobs, ` + canRunArgs + " WHERE " + resumeQueryWhereBase
)
// getProcessQuery returns the query that selects the jobs that are claimed
@@ -301,7 +301,13 @@ func (r *Registry) resumeJob(ctx context.Context, jobID jobspb.JobID, s sqlliven
if err != nil {
return err
}
- job := &Job{id: jobID, registry: r}
+
+ createdBy, err := unmarshalCreatedBy(row[5], row[6])
+ if err != nil {
+ return err
+ }
+
+ job := &Job{id: jobID, registry: r, createdBy: createdBy}
job.mu.payload = *payload
job.mu.progress = *progress
job.sessionID = s.ID()
diff --git a/pkg/jobs/jobs_test.go b/pkg/jobs/jobs_test.go
index 138bfdc986bf..e1a73a4ad682 100644
--- a/pkg/jobs/jobs_test.go
+++ b/pkg/jobs/jobs_test.go
@@ -1751,10 +1751,21 @@ func TestJobLifecycle(t *testing.T) {
t.Run("job with created by fields", func(t *testing.T) {
createdByType := "internal_test"
+ resumerJob := make(chan *jobs.Job, 1)
+ jobs.RegisterConstructor(
+ jobspb.TypeBackup, func(j *jobs.Job, _ *cluster.Settings) jobs.Resumer {
+ return jobs.FakeResumer{
+ OnResume: func(ctx context.Context) error {
+ resumerJob <- j
+ return nil
+ },
+ }
+ })
+
jobID := registry.MakeJobID()
record := jobs.Record{
- Details: jobspb.RestoreDetails{},
- Progress: jobspb.RestoreProgress{},
+ Details: jobspb.BackupDetails{},
+ Progress: jobspb.BackupProgress{},
CreatedBy: &jobs.CreatedByInfo{Name: createdByType, ID: 123},
}
job, err := registry.CreateAdoptableJobWithTxn(ctx, record, jobID, nil /* txn */)
@@ -1764,6 +1775,11 @@ func TestJobLifecycle(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, loadedJob.CreatedBy())
require.Equal(t, job.CreatedBy(), loadedJob.CreatedBy())
+ registry.TestingNudgeAdoptionQueue()
+ resumedJob := <-resumerJob
+ require.NotNil(t, resumedJob.CreatedBy())
+ require.Equal(t, job.CreatedBy(), resumedJob.CreatedBy())
+
})
}
diff --git a/pkg/migration/migrations/migrate_span_configs_test.go b/pkg/migration/migrations/migrate_span_configs_test.go
index a62a5c8a1a71..956052920fa7 100644
--- a/pkg/migration/migrations/migrate_span_configs_test.go
+++ b/pkg/migration/migrations/migrate_span_configs_test.go
@@ -17,7 +17,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
- "github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed/rangefeedcache"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/spanconfig"
@@ -67,10 +66,11 @@ func TestEnsureSpanConfigReconciliation(t *testing.T) {
tdb.Exec(t, `SET CLUSTER SETTING spanconfig.reconciliation_job.enabled = true`)
tdb.Exec(t, `SET CLUSTER SETTING spanconfig.reconciliation_job.checkpoint_interval = '100ms'`)
- { // Ensure that no span config entries are found.
- records, err := scKVAccessor.GetSpanConfigRecords(ctx, []spanconfig.Target{
- spanconfig.MakeTargetFromSpan(keys.EverythingSpan),
- })
+ { // Ensure that no span config records are found.
+ records, err := scKVAccessor.GetSpanConfigRecords(
+ ctx,
+ spanconfig.TestingEntireSpanConfigurationStateTargets(),
+ )
require.NoError(t, err)
require.Empty(t, records)
}
@@ -90,9 +90,10 @@ func TestEnsureSpanConfigReconciliation(t *testing.T) {
require.False(t, scReconciler.Checkpoint().IsEmpty())
{ // Ensure that the host tenant's span configs are installed.
- records, err := scKVAccessor.GetSpanConfigRecords(ctx, []spanconfig.Target{
- spanconfig.MakeTargetFromSpan(keys.EverythingSpan),
- })
+ records, err := scKVAccessor.GetSpanConfigRecords(
+ ctx,
+ spanconfig.TestingEntireSpanConfigurationStateTargets(),
+ )
require.NoError(t, err)
require.NotEmpty(t, records)
}
@@ -152,10 +153,11 @@ func TestEnsureSpanConfigReconciliationMultiNode(t *testing.T) {
tdb.Exec(t, `SET CLUSTER SETTING spanconfig.reconciliation_job.enabled = true`)
tdb.Exec(t, `SET CLUSTER SETTING spanconfig.reconciliation_job.checkpoint_interval = '100ms'`)
- { // Ensure that no span config entries are to be found.
- records, err := scKVAccessor.GetSpanConfigRecords(ctx, []spanconfig.Target{
- spanconfig.MakeTargetFromSpan(keys.EverythingSpan),
- })
+ { // Ensure that no span config records are to be found.
+ records, err := scKVAccessor.GetSpanConfigRecords(
+ ctx,
+ spanconfig.TestingEntireSpanConfigurationStateTargets(),
+ )
require.NoError(t, err)
require.Empty(t, records)
}
@@ -175,9 +177,10 @@ func TestEnsureSpanConfigReconciliationMultiNode(t *testing.T) {
require.False(t, scReconciler.Checkpoint().IsEmpty())
{ // Ensure that the host tenant's span configs are installed.
- records, err := scKVAccessor.GetSpanConfigRecords(ctx, []spanconfig.Target{
- spanconfig.MakeTargetFromSpan(keys.EverythingSpan),
- })
+ records, err := scKVAccessor.GetSpanConfigRecords(
+ ctx,
+ spanconfig.TestingEntireSpanConfigurationStateTargets(),
+ )
require.NoError(t, err)
require.NotEmpty(t, records)
}
@@ -219,9 +222,10 @@ func TestEnsureSpanConfigSubscription(t *testing.T) {
tdb.Exec(t, `SET CLUSTER SETTING spanconfig.reconciliation_job.enabled = true`)
testutils.SucceedsSoon(t, func() error {
- records, err := scKVAccessor.GetSpanConfigRecords(ctx, []spanconfig.Target{
- spanconfig.MakeTargetFromSpan(keys.EverythingSpan),
- })
+ records, err := scKVAccessor.GetSpanConfigRecords(
+ ctx,
+ spanconfig.TestingEntireSpanConfigurationStateTargets(),
+ )
require.NoError(t, err)
if len(records) == 0 {
return fmt.Errorf("empty global span configuration state")
diff --git a/pkg/migration/migrations/seed_tenant_span_configs.go b/pkg/migration/migrations/seed_tenant_span_configs.go
index c0b903c7cd9e..3010ac30bacf 100644
--- a/pkg/migration/migrations/seed_tenant_span_configs.go
+++ b/pkg/migration/migrations/seed_tenant_span_configs.go
@@ -78,7 +78,7 @@ func seedTenantSpanConfigsMigration(
return err
}
if len(scRecords) != 0 {
- // This tenant already has span config entries. It was either
+ // This tenant already has span config records. It was either
// already migrated (migrations need to be idempotent) or it was
// created after PreSeedTenantSpanConfigs was activated. There's
// nothing left to do here.
diff --git a/pkg/roachpb/span_config.go b/pkg/roachpb/span_config.go
index ad3ebc198a27..9c3cc253da90 100644
--- a/pkg/roachpb/span_config.go
+++ b/pkg/roachpb/span_config.go
@@ -127,3 +127,55 @@ func TestingDatabaseSystemSpanConfig(host bool) SpanConfig {
config.GCPolicy.IgnoreStrictEnforcement = true
return config
}
+
+// IsEntireKeyspaceTarget returns true if the receiver targets the entire
+// keyspace.
+func (st SystemSpanConfigTarget) IsEntireKeyspaceTarget() bool {
+ return st.Type.GetEntireKeyspace() != nil
+}
+
+// IsSpecificTenantKeyspaceTarget returns true if the receiver targets a
+// specific tenant's keyspace.
+func (st SystemSpanConfigTarget) IsSpecificTenantKeyspaceTarget() bool {
+ return st.Type.GetSpecificTenantKeyspace() != nil
+}
+
+// IsAllTenantKeyspaceTargetsSetTarget returns true if the receiver target
+// encompasses all targets that have been set on specific tenant keyspaces
+// by the system target source.
+func (st SystemSpanConfigTarget) IsAllTenantKeyspaceTargetsSetTarget() bool {
+ return st.Type.GetAllTenantKeyspaceTargetsSet() != nil
+}
+
+// NewEntireKeyspaceTargetType returns a system span config target type that
+// targets the entire keyspace.
+func NewEntireKeyspaceTargetType() *SystemSpanConfigTarget_Type {
+ return &SystemSpanConfigTarget_Type{
+ Type: &SystemSpanConfigTarget_Type_EntireKeyspace{
+ EntireKeyspace: &SystemSpanConfigTarget_EntireKeyspace{},
+ },
+ }
+}
+
+// NewSpecificTenantKeyspaceTargetType returns a system span config target type
+// that the given tenant ID's keyspace.
+func NewSpecificTenantKeyspaceTargetType(tenantID TenantID) *SystemSpanConfigTarget_Type {
+ return &SystemSpanConfigTarget_Type{
+ Type: &SystemSpanConfigTarget_Type_SpecificTenantKeyspace{
+ SpecificTenantKeyspace: &SystemSpanConfigTarget_TenantKeyspace{
+ TenantID: tenantID,
+ },
+ },
+ }
+}
+
+// NewAllTenantKeyspaceTargetsSetTargetType returns a read-only system span
+// config target type that encompasses all targets that have been set on
+// specific tenant keyspaces.
+func NewAllTenantKeyspaceTargetsSetTargetType() *SystemSpanConfigTarget_Type {
+ return &SystemSpanConfigTarget_Type{
+ Type: &SystemSpanConfigTarget_Type_AllTenantKeyspaceTargetsSet{
+ AllTenantKeyspaceTargetsSet: &SystemSpanConfigTarget_AllTenantKeyspaceTargetsSet{},
+ },
+ }
+}
diff --git a/pkg/roachpb/span_config.proto b/pkg/roachpb/span_config.proto
index b326da31cc2f..5f15ff040904 100644
--- a/pkg/roachpb/span_config.proto
+++ b/pkg/roachpb/span_config.proto
@@ -175,6 +175,19 @@ message SpanConfig {
}
// SystemSpanConfigTarget specifies the target of system span configurations.
+// System targets are designed for a few different kinds of interactions. We
+// want the ability to:
+// 1. Allow the host tenant to set a system span configuration on the entire
+// keyspace.
+// 2. Allow the host tenant to set a system span configuration on a particular
+// tenant's keyspace.
+// 3. Allow secondary tenants to set system span configurations on their
+// keyspace.
+//
+// Additionally, we also want each tenant to be able to fetch all system span
+// configurations that it has installed. Ideally, we want to be able to do this
+// without knowing the tenantID of all other tenants that exist. We provide a
+// read-only system span config target type to achieve exactly this.
message SystemSpanConfigTarget {
option (gogoproto.equal) = true;
@@ -182,17 +195,46 @@ message SystemSpanConfigTarget {
// configuration.
TenantID source_tenant_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "SourceTenantID"];
- // TargetTenantID is the ID of the tenant that the associated system span
- // configuration applies to.
- //
- // If the host tenant is the source and the target is unspecified then the
- // associated system span configuration applies over all ranges in the system
- // (including those belonging to secondary tenants).
- //
- // Secondary tenants are only allowed to target themselves and must fill in
- // this field. The host tenant may use this field to target a specific
- // secondary tenant.
- TenantID target_tenant_id = 2 [(gogoproto.customname) = "TargetTenantID"];
+ // TenantKeyspace is a target type that targets the keyspace of a specific
+ // tenant.
+ message TenantKeyspace {
+ option (gogoproto.equal) = true;
+
+ // TenantID is the ID of the tenant whose keyspace the the associated
+ // system span configuration applies to.
+ //
+ // Secondary tenants are only allowed to target their keyspace. The host
+ // tenant may use this field to target a specific secondary tenant's
+ // keyspace.
+ TenantID tenant_id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "TenantID"];
+ };
+
+ // EntireKeyspace is a target type that targets the entire keyspace (all
+ // ranges, including those belonging to secondary tenants). Only the host
+ // tenant is allowed to target the entire keyspace.
+ message EntireKeyspace{
+ option (gogoproto.equal) = true;
+ };
+
+ // AllTenantKeyspacesTargetsSet is is a read-only system target type that
+ // encompasses all system targets that have been set by the source tenant on
+ // specific tenant's keyspaces.
+ message AllTenantKeyspaceTargetsSet{
+ option (gogoproto.equal) = true;
+ };
+
+ message Type {
+ option (gogoproto.equal) = true;
+
+ oneof type {
+ TenantKeyspace specific_tenant_keyspace = 1;
+ EntireKeyspace entire_keyspace = 2;
+ AllTenantKeyspaceTargetsSet all_tenant_keyspace_targets_set = 3;
+ }
+ }
+
+ // Type of the system target.
+ Type type = 2;
}
// SpanConfigTarget specifies the target of an associated span configuration.
diff --git a/pkg/rpc/auth_tenant.go b/pkg/rpc/auth_tenant.go
index 819f20841a3e..201bfaf95fd8 100644
--- a/pkg/rpc/auth_tenant.go
+++ b/pkg/rpc/auth_tenant.go
@@ -315,11 +315,12 @@ func validateSpanConfigTarget(
return nil
}
- if target.TargetTenantID == nil {
- return authErrorf("secondary tenants must explicitly target themselves")
+ if target.IsEntireKeyspaceTarget() {
+ return authErrorf("secondary tenants cannot target the entire keyspace")
}
- if target.SourceTenantID != *target.TargetTenantID {
+ if target.IsSpecificTenantKeyspaceTarget() &&
+ target.Type.GetSpecificTenantKeyspace().TenantID != target.SourceTenantID {
return authErrorf(
"secondary tenants cannot interact with system span configurations of other tenants",
)
diff --git a/pkg/rpc/auth_test.go b/pkg/rpc/auth_test.go
index 613e741d41b2..9bb313bdaaa9 100644
--- a/pkg/rpc/auth_test.go
+++ b/pkg/rpc/auth_test.go
@@ -187,12 +187,11 @@ func TestTenantAuthRequest(t *testing.T) {
return ru
}
makeSystemSpanConfigTarget := func(source, target uint64) roachpb.SpanConfigTarget {
- targetID := roachpb.MakeTenantID(target)
return roachpb.SpanConfigTarget{
Union: &roachpb.SpanConfigTarget_SystemSpanConfigTarget{
SystemSpanConfigTarget: &roachpb.SystemSpanConfigTarget{
SourceTenantID: roachpb.MakeTenantID(source),
- TargetTenantID: &targetID,
+ Type: roachpb.NewSpecificTenantKeyspaceTargetType(roachpb.MakeTenantID(target)),
},
},
}
@@ -536,11 +535,22 @@ func TestTenantAuthRequest(t *testing.T) {
Union: &roachpb.SpanConfigTarget_SystemSpanConfigTarget{
SystemSpanConfigTarget: &roachpb.SystemSpanConfigTarget{
SourceTenantID: roachpb.MakeTenantID(10),
- TargetTenantID: nil,
+ Type: roachpb.NewEntireKeyspaceTargetType(),
},
},
}),
- expErr: `secondary tenants must explicitly target themselves`,
+ expErr: `secondary tenants cannot target the entire keyspace`,
+ },
+ {
+ req: makeGetSpanConfigsReq(roachpb.SpanConfigTarget{
+ Union: &roachpb.SpanConfigTarget_SystemSpanConfigTarget{
+ SystemSpanConfigTarget: &roachpb.SystemSpanConfigTarget{
+ SourceTenantID: roachpb.MakeTenantID(20),
+ Type: roachpb.NewEntireKeyspaceTargetType(),
+ },
+ },
+ }),
+ expErr: `malformed source tenant field`,
},
},
"/cockroach.roachpb.Internal/UpdateSpanConfigs": {
@@ -651,11 +661,11 @@ func TestTenantAuthRequest(t *testing.T) {
Union: &roachpb.SpanConfigTarget_SystemSpanConfigTarget{
SystemSpanConfigTarget: &roachpb.SystemSpanConfigTarget{
SourceTenantID: roachpb.MakeTenantID(10),
- TargetTenantID: nil,
+ Type: roachpb.NewEntireKeyspaceTargetType(),
},
},
}, false),
- expErr: `secondary tenants must explicitly target themselves`,
+ expErr: `secondary tenants cannot target the entire keyspace`,
},
{
req: makeUpdateSpanConfigsReq(makeSystemSpanConfigTarget(10, 10), true),
@@ -676,11 +686,11 @@ func TestTenantAuthRequest(t *testing.T) {
Union: &roachpb.SpanConfigTarget_SystemSpanConfigTarget{
SystemSpanConfigTarget: &roachpb.SystemSpanConfigTarget{
SourceTenantID: roachpb.MakeTenantID(10),
- TargetTenantID: nil,
+ Type: roachpb.NewEntireKeyspaceTargetType(),
},
},
}, true),
- expErr: `secondary tenants must explicitly target themselves`,
+ expErr: `secondary tenants cannot target the entire keyspace`,
},
},
diff --git a/pkg/spanconfig/spanconfigkvaccessor/kvaccessor.go b/pkg/spanconfig/spanconfigkvaccessor/kvaccessor.go
index 581499c9f823..a1f725bc8bb2 100644
--- a/pkg/spanconfig/spanconfigkvaccessor/kvaccessor.go
+++ b/pkg/spanconfig/spanconfigkvaccessor/kvaccessor.go
@@ -309,7 +309,7 @@ func (k *KVAccessor) constructDeleteStmtAndArgs(
}
// constructUpsertStmtAndArgs constructs the statement and query arguments
-// needed to upsert the given span config entries.
+// needed to upsert the given span config records.
func (k *KVAccessor) constructUpsertStmtAndArgs(
toUpsert []spanconfig.Record,
) (string, []interface{}, error) {
@@ -429,6 +429,12 @@ func validateUpdateArgs(toDelete []spanconfig.Target, toUpsert []spanconfig.Reco
copy(targets, list)
sort.Sort(spanconfig.Targets(targets))
for i := range targets {
+ if targets[i].IsSystemTarget() && targets[i].GetSystemTarget().IsReadOnly() {
+ return errors.AssertionFailedf(
+ "cannot use read only system target %s as an update argument", targets[i],
+ )
+ }
+
if i == 0 {
continue
}
diff --git a/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go b/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go
index 55d8dc610ad4..83721614f8d4 100644
--- a/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go
+++ b/pkg/spanconfig/spanconfigkvaccessor/kvaccessor_test.go
@@ -34,9 +34,11 @@ import (
// span [a,e)
// span [a,b)
// span [b,c)
+// system-target {cluster}
// system-target {source=1,target=20}
// system-target {source=1,target=1}
// system-target {source=20,target=20}
+// system-target {source=1, all-tenant-keyspace-targets-set}
// ----
//
// kvaccessor-update
@@ -45,13 +47,14 @@ import (
// upsert [d,e):D
// delete {source=1,target=1}
// upsert {source=1,target=1}:A
+// upsert {cluster}:F
// ----
//
// They tie into GetSpanConfigRecords and UpdateSpanConfigRecords
// respectively. For kvaccessor-get, each listed target is added to the set of
// targets being read. For kvaccessor-update, the lines prefixed with "delete"
// count towards the targets being deleted, and for "upsert" they correspond to
-// the span config entries being upserted. See
+// the span config records being upserted. See
// spanconfigtestutils.Parse{Span,Config,SpanConfigRecord} for more details.
func TestDataDriven(t *testing.T) {
defer leaktest.AfterTest(t)()
diff --git a/pkg/spanconfig/spanconfigkvaccessor/testdata/system_span_configs b/pkg/spanconfig/spanconfigkvaccessor/testdata/system_span_configs
index 42a6e592e75f..4e18e76149f2 100644
--- a/pkg/spanconfig/spanconfigkvaccessor/testdata/system_span_configs
+++ b/pkg/spanconfig/spanconfigkvaccessor/testdata/system_span_configs
@@ -2,7 +2,7 @@
# Test with an empty slate.
kvaccessor-get
-system-target {cluster}
+system-target {entire-keyspace}
system-target {source=1,target=1}
system-target {source=1,target=10}
system-target {source=20,target=20}
@@ -10,13 +10,13 @@ system-target {source=20,target=20}
# Try deleting a system span configuration that doesn't exist.
kvaccessor-update
-delete {cluster}
+delete {entire-keyspace}
----
err: expected to delete 1 row(s), deleted 0
# Basic tests that set all possible combinations of system targets.
kvaccessor-update
-upsert {cluster}:A
+upsert {entire-keyspace}:A
upsert {source=1,target=1}:B
upsert {source=1,target=10}:C
upsert {source=20,target=20}:D
@@ -24,19 +24,19 @@ upsert {source=20,target=20}:D
ok
kvaccessor-get
-system-target {cluster}
+system-target {entire-keyspace}
system-target {source=1,target=1}
system-target {source=1,target=10}
system-target {source=20,target=20}
----
-{cluster}:A
+{entire-keyspace}:A
{source=1,target=1}:B
{source=1,target=10}:C
{source=20,target=20}:D
# Update some of the span configurations that we added and ensure that works.
kvaccessor-update
-upsert {cluster}:F
+upsert {entire-keyspace}:F
upsert {source=1,target=1}:G
upsert {source=1,target=10}:H
upsert {source=20,target=20}:I
@@ -44,20 +44,27 @@ upsert {source=20,target=20}:I
ok
kvaccessor-get
-system-target {cluster}
+system-target {entire-keyspace}
system-target {source=1,target=1}
system-target {source=1,target=10}
system-target {source=20,target=20}
----
-{cluster}:F
+{entire-keyspace}:F
{source=1,target=1}:G
{source=1,target=10}:H
{source=20,target=20}:I
+kvaccessor-get
+system-target {source=1,all-tenant-keyspace-targets-set}
+----
+{source=1,target=1}:G
+{source=1,target=10}:H
+
+
# Delete all the system span configurations that we just added and ensure
# they take effect.
kvaccessor-update
-delete {cluster}
+delete {entire-keyspace}
delete {source=1,target=1}
delete {source=1,target=10}
delete {source=20,target=20}
@@ -65,17 +72,21 @@ delete {source=20,target=20}
ok
kvaccessor-get
-system-target {cluster}
+system-target {entire-keyspace}
system-target {source=1,target=1}
system-target {source=1,target=10}
system-target {source=20,target=20}
----
+kvaccessor-get
+system-target {source=1,all-tenant-keyspace-targets-set}
+----
+
# Lastly, try adding multiple system targets set by the host tenant that apply
# to distinct secondary tenants. We also add a system span configuration set by
# one of these secondary tenant's on itself for kicks.
kvaccessor-update
-upsert {cluster}:Z
+upsert {entire-keyspace}:Z
upsert {source=1,target=10}:A
upsert {source=1,target=20}:B
upsert {source=1,target=30}:C
@@ -84,14 +95,26 @@ upsert {source=10,target=10}:G
ok
kvaccessor-get
-system-target {cluster}
+system-target {entire-keyspace}
system-target {source=1,target=10}
system-target {source=1,target=20}
system-target {source=1,target=30}
system-target {source=10,target=10}
----
-{cluster}:Z
+{entire-keyspace}:Z
{source=1,target=10}:A
{source=1,target=20}:B
{source=1,target=30}:C
{source=10,target=10}:G
+
+kvaccessor-get
+system-target {source=1,all-tenant-keyspace-targets-set}
+----
+{source=1,target=10}:A
+{source=1,target=20}:B
+{source=1,target=30}:C
+
+kvaccessor-get
+system-target {source=10,all-tenant-keyspace-targets-set}
+----
+{source=10,target=10}:G
diff --git a/pkg/spanconfig/spanconfigkvaccessor/validation_test.go b/pkg/spanconfig/spanconfigkvaccessor/validation_test.go
index a8215bbdda53..040f899aa404 100644
--- a/pkg/spanconfig/spanconfigkvaccessor/validation_test.go
+++ b/pkg/spanconfig/spanconfigkvaccessor/validation_test.go
@@ -25,13 +25,12 @@ import (
func TestValidateUpdateArgs(t *testing.T) {
defer leaktest.AfterTest(t)()
- clusterTarget := spanconfig.MakeTargetFromSystemTarget(spanconfig.SystemTarget{
- SourceTenantID: roachpb.SystemTenantID,
- TargetTenantID: nil,
- })
+ entireKeyspaceTarget := spanconfig.MakeTargetFromSystemTarget(
+ spanconfig.MakeEntireKeyspaceTarget(),
+ )
makeTenantTarget := func(id uint64) spanconfig.Target {
- target, err := spanconfig.MakeTenantTarget(roachpb.MakeTenantID(id), roachpb.MakeTenantID(id))
+ target, err := spanconfig.MakeTenantKeyspaceTarget(roachpb.MakeTenantID(id), roachpb.MakeTenantID(id))
require.NoError(t, err)
return spanconfig.MakeTargetFromSystemTarget(target)
}
@@ -145,9 +144,9 @@ func TestValidateUpdateArgs(t *testing.T) {
// Duplicate in toDelete with some span targets.
toDelete: []spanconfig.Target{
spanconfig.MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("c")}),
- clusterTarget,
+ entireKeyspaceTarget,
spanconfig.MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("e"), EndKey: roachpb.Key("f")}),
- clusterTarget,
+ entireKeyspaceTarget,
spanconfig.MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("g"), EndKey: roachpb.Key("h")}),
},
expErr: "duplicate system targets .* in the same list",
@@ -156,9 +155,9 @@ func TestValidateUpdateArgs(t *testing.T) {
// Duplicate in toDelete with some span targets.
toDelete: []spanconfig.Target{
spanconfig.MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("c")}),
- clusterTarget,
+ entireKeyspaceTarget,
spanconfig.MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("e"), EndKey: roachpb.Key("f")}),
- clusterTarget,
+ entireKeyspaceTarget,
spanconfig.MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("g"), EndKey: roachpb.Key("h")}),
},
expErr: "duplicate system targets .* in the same list",
@@ -170,7 +169,7 @@ func TestValidateUpdateArgs(t *testing.T) {
spanconfig.MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("c")}),
makeTenantTarget(20),
spanconfig.MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("e"), EndKey: roachpb.Key("f")}),
- clusterTarget,
+ entireKeyspaceTarget,
spanconfig.MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("g"), EndKey: roachpb.Key("h")}),
},
toUpsert: []spanconfig.Record{
@@ -183,7 +182,38 @@ func TestValidateUpdateArgs(t *testing.T) {
},
expErr: "",
},
+ {
+ // Read only targets are not valid delete args.
+ toDelete: []spanconfig.Target{
+ spanconfig.MakeTargetFromSystemTarget(
+ spanconfig.MakeAllTenantKeyspaceTargetsSet(roachpb.SystemTenantID),
+ ),
+ },
+ expErr: "cannot use read only system target .* as an update argument",
+ },
+ {
+ // Read only targets are not valid upsert args.
+ toUpsert: []spanconfig.Record{
+ {
+ Target: spanconfig.MakeTargetFromSystemTarget(
+ spanconfig.MakeAllTenantKeyspaceTargetsSet(roachpb.SystemTenantID),
+ ),
+ },
+ },
+ expErr: "cannot use read only system target .* as an update argument",
+ },
+ {
+ // Read only target validation also applies when the source is a secondary
+ // tenant.
+ toDelete: []spanconfig.Target{
+ spanconfig.MakeTargetFromSystemTarget(
+ spanconfig.MakeAllTenantKeyspaceTargetsSet(roachpb.MakeTenantID(10)),
+ ),
+ },
+ expErr: "cannot use read only system target .* as an update argument",
+ },
} {
- require.True(t, testutils.IsError(validateUpdateArgs(tc.toDelete, tc.toUpsert), tc.expErr))
+ err := validateUpdateArgs(tc.toDelete, tc.toUpsert)
+ require.True(t, testutils.IsError(err, tc.expErr), "exp %s; got %s", tc.expErr, err)
}
}
diff --git a/pkg/spanconfig/spanconfigtestutils/utils.go b/pkg/spanconfig/spanconfigtestutils/utils.go
index 5ae42e7a398e..a8f485baf75d 100644
--- a/pkg/spanconfig/spanconfigtestutils/utils.go
+++ b/pkg/spanconfig/spanconfigtestutils/utils.go
@@ -33,8 +33,10 @@ import (
var spanRe = regexp.MustCompile(`^\[(\w+),\s??(\w+)\)$`)
// systemTargetRe matches strings of the form
-// "{source=(|system),target=(|system)}".
-var systemTargetRe = regexp.MustCompile(`^{(cluster)|(source=(\d*),\s??target=(\d*))}$`)
+// "{entire-keyspace|source=,(target=|all-tenant-keyspace-targets-set)}".
+var systemTargetRe = regexp.MustCompile(
+ `^{(entire-keyspace)|(source=(\d*),\s??((target=(\d*))|all-tenant-keyspace-targets-set))}$`,
+)
// configRe matches a single word. It's a shorthand for declaring a unique
// config.
@@ -63,15 +65,18 @@ func parseSystemTarget(t *testing.T, systemTarget string) spanconfig.SystemTarge
}
matches := systemTargetRe.FindStringSubmatch(systemTarget)
- if matches[1] == "cluster" {
- return spanconfig.MakeClusterTarget()
+ if matches[1] == "entire-keyspace" {
+ return spanconfig.MakeEntireKeyspaceTarget()
}
sourceID, err := strconv.Atoi(matches[3])
require.NoError(t, err)
- targetID, err := strconv.Atoi(matches[4])
+ if matches[4] == "all-tenant-keyspace-targets-set" {
+ return spanconfig.MakeAllTenantKeyspaceTargetsSet(roachpb.MakeTenantID(uint64(sourceID)))
+ }
+ targetID, err := strconv.Atoi(matches[6])
require.NoError(t, err)
- target, err := spanconfig.MakeTenantTarget(
+ target, err := spanconfig.MakeTenantKeyspaceTarget(
roachpb.MakeTenantID(uint64(sourceID)), roachpb.MakeTenantID(uint64(targetID)),
)
require.NoError(t, err)
diff --git a/pkg/spanconfig/systemtarget.go b/pkg/spanconfig/systemtarget.go
index be77cfcd808e..e8d8bb0df364 100644
--- a/pkg/spanconfig/systemtarget.go
+++ b/pkg/spanconfig/systemtarget.go
@@ -22,159 +22,265 @@ import (
// SystemTarget specifies the target of a system span configuration.
type SystemTarget struct {
- // SourceTenantID is the ID of the tenant that specified the system span
+ // sourceTenantID is the ID of the tenant that specified the system span
// configuration.
- // SourceTenantID is the ID of the tenant that specified the system span
- // configuration.
- SourceTenantID roachpb.TenantID
+ sourceTenantID roachpb.TenantID
- // TargetTenantID is the ID of the tenant that the associated system span
- // configuration applies to.
- //
- // If the host tenant is the source and the TargetTenantID is unspecified then
- // the associated system span configuration applies over all ranges in the
- // system (including those belonging to secondary tenants).
+ // targetTenantID is the ID of the tenant whose kesypace the associated system
+ // span configuration applies. This field can only be set in conjunction with
+ // the type being SystemTargetTypeSpecificTenantKeyspace; it must be left
+ // unset for all other system target types.
//
- // Secondary tenants are only allowed to target themselves. The host tenant
- // may use this field to target a specific secondary tenant. We validate this
- // when constructing new system targets.
- TargetTenantID *roachpb.TenantID
+ // Secondary tenants are only allowed to target their own keyspace. The host
+ // tenant may use this field to target a specific secondary tenant.
+ targetTenantID *roachpb.TenantID
+
+ // systemTargetType indicates the type of the system target. targetTenantID
+ // can only be set if the system target is specific.
+ systemTargetType systemTargetType
}
-// MakeTenantTarget constructs, validates, and returns a new SystemTarget that
-// targets the physical keyspace of the targetTenantID.
-func MakeTenantTarget(
+// systemTargetType indicates the type of SystemTarget.
+type systemTargetType int
+
+const (
+ _ systemTargetType = iota
+ // SystemTargetTypeSpecificTenantKeyspace indicates that the system target is
+ // targeting a specific tenant's keyspace.
+ SystemTargetTypeSpecificTenantKeyspace
+ // SystemTargetTypeEntireKeyspace indicates that the system target is
+ // targeting the entire keyspace. Only the host tenant is allowed to do so.
+ SystemTargetTypeEntireKeyspace
+ // SystemTargetTypeAllTenantKeyspaceTargetsSet represents a system target that
+ // encompasses all system targets that have been set by the source tenant over
+ // specific tenant's keyspace.
+ //
+ // This is a read-only system target type as it may translate to more than one
+ // system targets that may have been persisted. This target type is useful in
+ // fetching all system span configurations a tenant may have set on tenant
+ // keyspaces without knowing the tenant ID of all other tenants in the system.
+ // This is only ever significant for the host tenant as it can set system span
+ // configurations that target other tenant's keyspaces.
+ SystemTargetTypeAllTenantKeyspaceTargetsSet
+)
+
+// MakeTenantKeyspaceTarget constructs, validates, and returns a new
+// SystemTarget that targets the keyspace of the target tenant.
+func MakeTenantKeyspaceTarget(
sourceTenantID roachpb.TenantID, targetTenantID roachpb.TenantID,
) (SystemTarget, error) {
t := SystemTarget{
- SourceTenantID: sourceTenantID,
- TargetTenantID: &targetTenantID,
+ sourceTenantID: sourceTenantID,
+ targetTenantID: &targetTenantID,
+ systemTargetType: SystemTargetTypeSpecificTenantKeyspace,
}
return t, t.validate()
}
-// MakeSystemTargetFromProto constructs a SystemTarget from a
+// makeSystemTargetFromProto constructs a SystemTarget from a
// roachpb.SystemSpanConfigTarget and validates it.
-func MakeSystemTargetFromProto(proto *roachpb.SystemSpanConfigTarget) (SystemTarget, error) {
- if proto.SourceTenantID == roachpb.SystemTenantID && proto.TargetTenantID == nil {
- return MakeClusterTarget(), nil
- } else if proto.TargetTenantID == nil {
- return SystemTarget{},
- errors.Newf("secondary tenant %s cannot target the entire cluster", proto.SourceTenantID)
+func makeSystemTargetFromProto(proto *roachpb.SystemSpanConfigTarget) (SystemTarget, error) {
+ var t SystemTarget
+ switch {
+ case proto.IsSpecificTenantKeyspaceTarget():
+ t = SystemTarget{
+ sourceTenantID: proto.SourceTenantID,
+ targetTenantID: &proto.Type.GetSpecificTenantKeyspace().TenantID,
+ systemTargetType: SystemTargetTypeSpecificTenantKeyspace,
+ }
+ case proto.IsEntireKeyspaceTarget():
+ t = SystemTarget{
+ sourceTenantID: proto.SourceTenantID,
+ targetTenantID: nil,
+ systemTargetType: SystemTargetTypeEntireKeyspace,
+ }
+ case proto.IsAllTenantKeyspaceTargetsSetTarget():
+ t = SystemTarget{
+ sourceTenantID: proto.SourceTenantID,
+ targetTenantID: nil,
+ systemTargetType: SystemTargetTypeAllTenantKeyspaceTargetsSet,
+ }
+ default:
+ return SystemTarget{}, errors.AssertionFailedf("unknown system target type")
}
+ return t, t.validate()
+}
- return MakeTenantTarget(proto.SourceTenantID, *proto.TargetTenantID)
+func (st SystemTarget) toProto() *roachpb.SystemSpanConfigTarget {
+ var systemTargetType *roachpb.SystemSpanConfigTarget_Type
+ switch st.systemTargetType {
+ case SystemTargetTypeEntireKeyspace:
+ systemTargetType = roachpb.NewEntireKeyspaceTargetType()
+ case SystemTargetTypeAllTenantKeyspaceTargetsSet:
+ systemTargetType = roachpb.NewAllTenantKeyspaceTargetsSetTargetType()
+ case SystemTargetTypeSpecificTenantKeyspace:
+ systemTargetType = roachpb.NewSpecificTenantKeyspaceTargetType(*st.targetTenantID)
+ default:
+ panic("unknown system target type")
+ }
+ return &roachpb.SystemSpanConfigTarget{
+ SourceTenantID: st.sourceTenantID,
+ Type: systemTargetType,
+ }
}
-// MakeClusterTarget returns a new system target that targets the entire cluster.
-// Only the host tenant is allowed to target the entire cluster.
-func MakeClusterTarget() SystemTarget {
+// MakeEntireKeyspaceTarget returns a new system target that targets the entire
+// keyspace. Only the host tenant is allowed to target the entire keyspace.
+func MakeEntireKeyspaceTarget() SystemTarget {
return SystemTarget{
- SourceTenantID: roachpb.SystemTenantID,
+ sourceTenantID: roachpb.SystemTenantID,
+ systemTargetType: SystemTargetTypeEntireKeyspace,
}
}
-// targetsEntireCluster returns true if the target applies to all ranges in the
+// MakeAllTenantKeyspaceTargetsSet returns a new SystemTarget that
+// represents all system span configurations installed by the given tenant ID
+// on specific tenant's keyspace (including itself and other tenants).
+func MakeAllTenantKeyspaceTargetsSet(sourceID roachpb.TenantID) SystemTarget {
+ return SystemTarget{
+ sourceTenantID: sourceID,
+ systemTargetType: SystemTargetTypeAllTenantKeyspaceTargetsSet,
+ }
+}
+
+// targetsEntireKeyspace returns true if the target applies to all ranges in the
// system (including those belonging to secondary tenants).
-func (st SystemTarget) targetsEntireCluster() bool {
- return st.SourceTenantID == roachpb.SystemTenantID && st.TargetTenantID == nil
+func (st SystemTarget) targetsEntireKeyspace() bool {
+ return st.systemTargetType == SystemTargetTypeEntireKeyspace
+}
+
+// IsReadOnly returns true if the system target is read-only. Read only targets
+// should not be persisted.
+func (st SystemTarget) IsReadOnly() bool {
+ return st.systemTargetType == SystemTargetTypeAllTenantKeyspaceTargetsSet
}
// encode returns an encoded span associated with the receiver which is suitable
-// for persistence in system.span_configurations.
+// for interaction with system.span_configurations table.
func (st SystemTarget) encode() roachpb.Span {
var k roachpb.Key
- if st.SourceTenantID == roachpb.SystemTenantID &&
- st.TargetTenantID == nil {
+ switch st.systemTargetType {
+ case SystemTargetTypeEntireKeyspace:
k = keys.SystemSpanConfigEntireKeyspace
- } else if st.SourceTenantID == roachpb.SystemTenantID {
- k = encoding.EncodeUvarintAscending(
- keys.SystemSpanConfigHostOnTenantKeyspace, st.TargetTenantID.ToUint64(),
- )
- } else {
- k = encoding.EncodeUvarintAscending(
- keys.SystemSpanConfigSecondaryTenantOnEntireKeyspace, st.SourceTenantID.ToUint64(),
- )
+ case SystemTargetTypeSpecificTenantKeyspace:
+ if st.sourceTenantID == roachpb.SystemTenantID {
+ k = encoding.EncodeUvarintAscending(
+ keys.SystemSpanConfigHostOnTenantKeyspace, st.targetTenantID.ToUint64(),
+ )
+ } else {
+ k = encoding.EncodeUvarintAscending(
+ keys.SystemSpanConfigSecondaryTenantOnEntireKeyspace, st.sourceTenantID.ToUint64(),
+ )
+ }
+ case SystemTargetTypeAllTenantKeyspaceTargetsSet:
+ if st.sourceTenantID == roachpb.SystemTenantID {
+ k = keys.SystemSpanConfigHostOnTenantKeyspace
+ } else {
+ k = encoding.EncodeUvarintAscending(
+ keys.SystemSpanConfigSecondaryTenantOnEntireKeyspace, st.sourceTenantID.ToUint64(),
+ )
+ }
}
return roachpb.Span{Key: k, EndKey: k.PrefixEnd()}
}
// validate ensures that the receiver is well-formed.
func (st SystemTarget) validate() error {
- if st.SourceTenantID == roachpb.SystemTenantID {
- // The system tenant can target itself, other secondary tenants, and the
- // entire cluster (including secondary tenant ranges).
- return nil
- }
- if st.TargetTenantID == nil {
- return errors.AssertionFailedf(
- "secondary tenant %s cannot have unspecified target tenant ID", st.SourceTenantID,
- )
- }
- if st.SourceTenantID != *st.TargetTenantID {
- return errors.AssertionFailedf(
- "secondary tenant %s cannot target another tenant with ID %s",
- st.SourceTenantID,
- st.TargetTenantID,
- )
+ switch st.systemTargetType {
+ case SystemTargetTypeAllTenantKeyspaceTargetsSet:
+ if st.targetTenantID != nil {
+ return errors.AssertionFailedf(
+ "targetTenantID must be unset when targeting everything installed on tenants",
+ )
+ }
+ case SystemTargetTypeEntireKeyspace:
+ if st.sourceTenantID != roachpb.SystemTenantID {
+ return errors.AssertionFailedf("only the host tenant is allowed to target the entire keyspace")
+ }
+ if st.targetTenantID != nil {
+ return errors.AssertionFailedf("malformed system target for entire keyspace; targetTenantID set")
+ }
+ case SystemTargetTypeSpecificTenantKeyspace:
+ if st.targetTenantID == nil {
+ return errors.AssertionFailedf(
+ "malformed system target for specific tenant keyspace; targetTenantID unset",
+ )
+ }
+ if st.sourceTenantID != roachpb.SystemTenantID && st.sourceTenantID != *st.targetTenantID {
+ return errors.AssertionFailedf(
+ "secondary tenant %s cannot target another tenant with ID %s",
+ st.sourceTenantID,
+ st.targetTenantID,
+ )
+ }
+ default:
+ return errors.AssertionFailedf("invalid system target type")
}
return nil
}
// isEmpty returns true if the receiver is empty.
func (st SystemTarget) isEmpty() bool {
- return st.SourceTenantID.Equal(roachpb.TenantID{}) && st.TargetTenantID.Equal(nil)
+ return st.sourceTenantID.Equal(roachpb.TenantID{}) && st.targetTenantID.Equal(nil)
}
// less returns true if the receiver is considered less than the supplied
// target. The semantics are defined as follows:
-// - host installed targets that target the entire cluster come first.
-// - host installed targets that target a tenant come next (ordered by target
-// tenant ID).
-// - secondary tenant installed targets come next, ordered by secondary tenant
-// ID.
+// - read only targets come first, ordered by tenant ID.
+// - targets that target the entire keyspace come next.
+// - targets that target a specific tenant's keyspace come last, sorted by
+// source tenant ID; target tenant ID is used as a tiebreaker if two targets
+// have the same source.
func (st SystemTarget) less(ot SystemTarget) bool {
- if st.SourceTenantID == roachpb.SystemTenantID &&
- ot.SourceTenantID == roachpb.SystemTenantID {
- if st.targetsEntireCluster() {
- return true
- } else if ot.targetsEntireCluster() {
- return false
- }
+ if st.IsReadOnly() && ot.IsReadOnly() {
+ return st.sourceTenantID.ToUint64() < ot.sourceTenantID.ToUint64()
+ }
- return st.TargetTenantID.ToUint64() < ot.TargetTenantID.ToUint64()
+ if st.IsReadOnly() {
+ return true
+ } else if ot.IsReadOnly() {
+ return false
}
- if st.SourceTenantID == roachpb.SystemTenantID {
+ if st.targetsEntireKeyspace() {
return true
- } else if ot.SourceTenantID == roachpb.SystemTenantID {
+ } else if ot.targetsEntireKeyspace() {
return false
}
- return st.SourceTenantID.ToUint64() < ot.SourceTenantID.ToUint64()
+ if st.sourceTenantID.ToUint64() == ot.sourceTenantID.ToUint64() {
+ return st.targetTenantID.ToUint64() < ot.targetTenantID.ToUint64()
+ }
+
+ return st.sourceTenantID.ToUint64() < ot.sourceTenantID.ToUint64()
}
// equal returns true iff the receiver is equal to the supplied system target.
func (st SystemTarget) equal(ot SystemTarget) bool {
- return st.SourceTenantID.Equal(ot.SourceTenantID) && st.TargetTenantID.Equal(ot.TargetTenantID)
+ return st.sourceTenantID.Equal(ot.sourceTenantID) && st.targetTenantID.Equal(ot.targetTenantID)
}
// String returns a pretty printed version of a system target.
func (st SystemTarget) String() string {
- if st.targetsEntireCluster() {
- return "{cluster}"
+ switch st.systemTargetType {
+ case SystemTargetTypeEntireKeyspace:
+ return "{entire-keyspace}"
+ case SystemTargetTypeAllTenantKeyspaceTargetsSet:
+ return fmt.Sprintf("{source=%d, all-tenant-keyspace-targets-set}", st.sourceTenantID)
+ case SystemTargetTypeSpecificTenantKeyspace:
+ return fmt.Sprintf(
+ "{source=%d,target=%d}",
+ st.sourceTenantID.ToUint64(),
+ st.targetTenantID.ToUint64(),
+ )
+ default:
+ panic("unreachable")
}
- return fmt.Sprintf(
- "{source=%d,target=%d}",
- st.SourceTenantID.ToUint64(),
- st.TargetTenantID.ToUint64(),
- )
}
// decodeSystemTarget converts the given span into a SystemTarget. An error is
-// returned if the supplied span does not conform to a system target's
-// encoding.
+// returned if the supplied span does not conform to a system target's encoding.
func decodeSystemTarget(span roachpb.Span) (SystemTarget, error) {
// Validate the end key is well-formed.
if !span.EndKey.Equal(span.Key.PrefixEnd()) {
@@ -182,7 +288,7 @@ func decodeSystemTarget(span roachpb.Span) (SystemTarget, error) {
}
switch {
case bytes.Equal(span.Key, keys.SystemSpanConfigEntireKeyspace):
- return MakeClusterTarget(), nil
+ return MakeEntireKeyspaceTarget(), nil
case bytes.HasPrefix(span.Key, keys.SystemSpanConfigHostOnTenantKeyspace):
// System span config was applied by the host tenant over a secondary
// tenant's entire keyspace.
@@ -192,7 +298,7 @@ func decodeSystemTarget(span roachpb.Span) (SystemTarget, error) {
return SystemTarget{}, err
}
tenID := roachpb.MakeTenantID(tenIDRaw)
- return MakeTenantTarget(roachpb.SystemTenantID, tenID)
+ return MakeTenantKeyspaceTarget(roachpb.SystemTenantID, tenID)
case bytes.HasPrefix(span.Key, keys.SystemSpanConfigSecondaryTenantOnEntireKeyspace):
// System span config was applied by a secondary tenant over its entire
// keyspace.
@@ -202,7 +308,7 @@ func decodeSystemTarget(span roachpb.Span) (SystemTarget, error) {
return SystemTarget{}, err
}
tenID := roachpb.MakeTenantID(tenIDRaw)
- return MakeTenantTarget(tenID, tenID)
+ return MakeTenantKeyspaceTarget(tenID, tenID)
default:
return SystemTarget{},
errors.AssertionFailedf("span %s did not conform to SystemTarget encoding", span)
diff --git a/pkg/spanconfig/target.go b/pkg/spanconfig/target.go
index cd69dd604f6d..d0c1703826d0 100644
--- a/pkg/spanconfig/target.go
+++ b/pkg/spanconfig/target.go
@@ -11,6 +11,7 @@
package spanconfig
import (
+ "github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/errors"
)
@@ -26,9 +27,9 @@ type Target struct {
func MakeTarget(t roachpb.SpanConfigTarget) (Target, error) {
switch t.Union.(type) {
case *roachpb.SpanConfigTarget_Span:
- return MakeTargetFromSpan(*t.GetSpan()), nil
+ return MakeSpanTargetFromProto(t)
case *roachpb.SpanConfigTarget_SystemSpanConfigTarget:
- systemTarget, err := MakeSystemTargetFromProto(t.GetSystemSpanConfigTarget())
+ systemTarget, err := makeSystemTargetFromProto(t.GetSystemSpanConfigTarget())
if err != nil {
return Target{}, err
}
@@ -38,8 +39,28 @@ func MakeTarget(t roachpb.SpanConfigTarget) (Target, error) {
}
}
-// MakeTargetFromSpan constructs and returns a span target.
+// MakeSpanTargetFromProto returns a new Target backed by an underlying span.
+// An error is returned if the proto does not contain a span or if the span
+// overlaps with the reserved system span config keyspace.
+func MakeSpanTargetFromProto(spanTarget roachpb.SpanConfigTarget) (Target, error) {
+ if spanTarget.GetSpan() == nil {
+ return Target{}, errors.AssertionFailedf("span config target did not contain a span")
+ }
+ if keys.SystemSpanConfigSpan.Overlaps(*spanTarget.GetSpan()) {
+ return Target{}, errors.AssertionFailedf(
+ "cannot target spans in reserved system span config keyspace",
+ )
+ }
+ return MakeTargetFromSpan(*spanTarget.GetSpan()), nil
+}
+
+// MakeTargetFromSpan constructs and returns a span target. Callers are not
+// allowed to target the reserved system span config keyspace (or part of it)
+// directly; system targets should be used instead.
func MakeTargetFromSpan(span roachpb.Span) Target {
+ if keys.SystemSpanConfigSpan.Overlaps(span) {
+ panic("cannot target spans in reserved system span config keyspace")
+ }
return Target{span: span}
}
@@ -76,8 +97,8 @@ func (t Target) GetSystemTarget() SystemTarget {
return t.systemTarget
}
-// Encode returns an encoded span suitable for persistence in
-// system.span_configurations.
+// Encode returns an encoded span suitable for interaction with the
+// system.span_configurations table.
func (t Target) Encode() roachpb.Span {
switch {
case t.IsSpanTarget():
@@ -155,10 +176,7 @@ func (t Target) ToProto() roachpb.SpanConfigTarget {
case t.IsSystemTarget():
return roachpb.SpanConfigTarget{
Union: &roachpb.SpanConfigTarget_SystemSpanConfigTarget{
- SystemSpanConfigTarget: &roachpb.SystemSpanConfigTarget{
- SourceTenantID: t.GetSystemTarget().SourceTenantID,
- TargetTenantID: t.GetSystemTarget().TargetTenantID,
- },
+ SystemSpanConfigTarget: t.GetSystemTarget().toProto(),
},
}
default:
@@ -248,3 +266,15 @@ func TargetsFromProtos(protoTargets []roachpb.SpanConfigTarget) ([]Target, error
}
return targets, nil
}
+
+// TestingEntireSpanConfigurationStateTargets returns a list of targets which
+// can be used to read the entire span configuration state. This includes all
+// span configurations installed by all tenants and all system span
+// configurations, including those installed by secondary tenants.
+func TestingEntireSpanConfigurationStateTargets() []Target {
+ return Targets{
+ Target{
+ span: keys.EverythingSpan,
+ },
+ }
+}
diff --git a/pkg/spanconfig/target_test.go b/pkg/spanconfig/target_test.go
index c31e8c25ff00..7d5a63598e71 100644
--- a/pkg/spanconfig/target_test.go
+++ b/pkg/spanconfig/target_test.go
@@ -26,13 +26,13 @@ import (
func TestEncodeDecodeSystemTarget(t *testing.T) {
for _, testTarget := range []SystemTarget{
// Tenant targeting its logical cluster.
- makeTenantTargetOrFatal(t, roachpb.MakeTenantID(10), roachpb.MakeTenantID(10)),
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.MakeTenantID(10), roachpb.MakeTenantID(10)),
// System tenant targeting its logical cluster.
- makeTenantTargetOrFatal(t, roachpb.SystemTenantID, roachpb.SystemTenantID),
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.SystemTenantID, roachpb.SystemTenantID),
// System tenant targeting a secondary tenant.
- makeTenantTargetOrFatal(t, roachpb.SystemTenantID, roachpb.MakeTenantID(10)),
- // System tenant targeting the entire cluster.
- MakeClusterTarget(),
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.SystemTenantID, roachpb.MakeTenantID(10)),
+ // System tenant targeting the entire keyspace.
+ MakeEntireKeyspaceTarget(),
} {
systemTarget, err := decodeSystemTarget(testTarget.encode())
require.NoError(t, err)
@@ -45,6 +45,37 @@ func TestEncodeDecodeSystemTarget(t *testing.T) {
}
}
+// TestTargetToFromProto ensures that converting system targets to protos and
+// from protos is roundtripable.
+func TestTargetToFromProto(t *testing.T) {
+ for _, testSystemTarget := range []SystemTarget{
+ // Tenant targeting its logical cluster.
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.MakeTenantID(10), roachpb.MakeTenantID(10)),
+ // System tenant targeting its logical cluster.
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.SystemTenantID, roachpb.SystemTenantID),
+ // System tenant targeting a secondary tenant.
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.SystemTenantID, roachpb.MakeTenantID(10)),
+ // System tenant targeting the entire keyspace.
+ MakeEntireKeyspaceTarget(),
+ // System tenant's read-only target to fetch all system span configurations
+ // it has set over secondary tenant keyspaces.
+ MakeAllTenantKeyspaceTargetsSet(roachpb.SystemTenantID),
+ // A secondary tenant's read-only target to fetch all system span
+ // configurations it has set over secondary tenant keyspaces.
+ MakeAllTenantKeyspaceTargetsSet(roachpb.MakeTenantID(10)),
+ } {
+ systemTarget, err := makeSystemTargetFromProto(testSystemTarget.toProto())
+ require.NoError(t, err)
+ require.Equal(t, testSystemTarget, systemTarget)
+
+ // For good measure, let's also test at the level of a spanconfig.Target.
+ testTarget := MakeTargetFromSystemTarget(testSystemTarget)
+ target, err := MakeTarget(testTarget.ToProto())
+ require.NoError(t, err)
+ require.Equal(t, target, testTarget)
+ }
+}
+
// TestDecodeInvalidSpanAsSystemTarget ensures that decoding an invalid span
// as a system target fails.
func TestDecodeInvalidSpanAsSystemTarget(t *testing.T) {
@@ -87,45 +118,128 @@ func TestDecodeInvalidSpanAsSystemTarget(t *testing.T) {
// TestSystemTargetValidation ensures target.validate() works as expected.
func TestSystemTargetValidation(t *testing.T) {
+ tenant10 := roachpb.MakeTenantID(10)
+ tenant20 := roachpb.MakeTenantID(20)
for _, tc := range []struct {
sourceTenantID roachpb.TenantID
- targetTenantID roachpb.TenantID
+ targetTenantID *roachpb.TenantID
+ targetType systemTargetType
expErr string
}{
{
// Secondary tenants cannot target the system tenant.
- sourceTenantID: roachpb.MakeTenantID(10),
- targetTenantID: roachpb.SystemTenantID,
- expErr: "secondary tenant 10 cannot target another tenant with ID",
+ sourceTenantID: tenant10,
+ targetTenantID: &roachpb.SystemTenantID,
+ targetType: SystemTargetTypeSpecificTenantKeyspace,
+ expErr: "secondary tenant 10 cannot target another tenant with ID system",
},
{
// Secondary tenants cannot target other secondary tenants.
- sourceTenantID: roachpb.MakeTenantID(10),
- targetTenantID: roachpb.MakeTenantID(20),
- expErr: "secondary tenant 10 cannot target another tenant with ID",
+ sourceTenantID: tenant10,
+ targetTenantID: &tenant20,
+ targetType: SystemTargetTypeSpecificTenantKeyspace,
+ expErr: "secondary tenant 10 cannot target another tenant with ID 20",
+ },
+ {
+ // Secondary tenants cannot target the entire keyspace.
+ sourceTenantID: tenant10,
+ targetTenantID: nil,
+ targetType: SystemTargetTypeEntireKeyspace,
+ expErr: "only the host tenant is allowed to target the entire keyspace",
+ },
+ {
+ // Ensure secondary tenants can't target the entire keyspace even if they
+ // set targetTenantID to themselves.
+ sourceTenantID: tenant10,
+ targetTenantID: &tenant10,
+ targetType: SystemTargetTypeEntireKeyspace,
+ expErr: "only the host tenant is allowed to target the entire keyspace",
+ },
+ {
+ // System tenant can't set both targetTenantID and target everything
+ // installed on tenants.
+ sourceTenantID: roachpb.SystemTenantID,
+ targetTenantID: &tenant10,
+ targetType: SystemTargetTypeAllTenantKeyspaceTargetsSet,
+ expErr: "targetTenantID must be unset when targeting everything installed",
+ },
+ {
+ // System tenant must fill in a targetTenantID when targeting a specific
+ // tenant.
+ sourceTenantID: roachpb.SystemTenantID,
+ targetTenantID: nil,
+ targetType: SystemTargetTypeSpecificTenantKeyspace,
+ expErr: "malformed system target for specific tenant keyspace; targetTenantID unset",
+ },
+ {
+ // System tenant can't set both targetTenantID and target the entire
+ // keyspace.
+ sourceTenantID: roachpb.SystemTenantID,
+ targetTenantID: &tenant10,
+ targetType: SystemTargetTypeEntireKeyspace,
+ expErr: "malformed system target for entire keyspace; targetTenantID set",
+ },
+ {
+ // secondary tenant can't set both targetTenantID and target everything
+ // installed on tenants.
+ sourceTenantID: tenant10,
+ targetTenantID: &tenant10,
+ targetType: SystemTargetTypeAllTenantKeyspaceTargetsSet,
+ expErr: "targetTenantID must be unset when targeting everything installed",
},
// Test some valid targets.
{
// System tenant targeting secondary tenant is allowed.
sourceTenantID: roachpb.SystemTenantID,
- targetTenantID: roachpb.MakeTenantID(20),
+ targetTenantID: &tenant20,
+ targetType: SystemTargetTypeSpecificTenantKeyspace,
+ },
+ {
+ // System tenant targeting the entire keyspace is allowed.
+ sourceTenantID: roachpb.SystemTenantID,
+ targetTenantID: nil,
+ targetType: SystemTargetTypeEntireKeyspace,
},
{
// System tenant targeting itself is allowed.
sourceTenantID: roachpb.SystemTenantID,
- targetTenantID: roachpb.SystemTenantID,
+ targetTenantID: &roachpb.SystemTenantID,
+ targetType: SystemTargetTypeSpecificTenantKeyspace,
},
{
// Secondary tenant targeting itself is allowed.
- sourceTenantID: roachpb.MakeTenantID(10),
- targetTenantID: roachpb.MakeTenantID(10),
+ sourceTenantID: tenant10,
+ targetTenantID: &tenant10,
+ targetType: SystemTargetTypeSpecificTenantKeyspace,
+ },
+ {
+ // Secondary tenant targeting everything installed on tenants by it is
+ // allowed.
+ sourceTenantID: tenant10,
+ targetTenantID: nil,
+ targetType: SystemTargetTypeAllTenantKeyspaceTargetsSet,
+ },
+ {
+ // System tenant targeting everything installed on tenants by it is
+ // allowed.
+ sourceTenantID: roachpb.SystemTenantID,
+ targetTenantID: nil,
+ targetType: SystemTargetTypeAllTenantKeyspaceTargetsSet,
},
} {
target := SystemTarget{
- SourceTenantID: tc.sourceTenantID,
- TargetTenantID: &tc.targetTenantID,
+ sourceTenantID: tc.sourceTenantID,
+ targetTenantID: tc.targetTenantID,
+ systemTargetType: tc.targetType,
}
- require.True(t, testutils.IsError(target.validate(), tc.expErr))
+ err := target.validate()
+ require.True(
+ t,
+ testutils.IsError(err, tc.expErr),
+ "expected: %s got: %s ",
+ tc.expErr,
+ err,
+ )
}
}
@@ -133,12 +247,24 @@ func TestSystemTargetValidation(t *testing.T) {
func TestTargetSortingRandomized(t *testing.T) {
// Construct a set of sorted targets.
sortedTargets := Targets{
- MakeTargetFromSystemTarget(MakeClusterTarget()),
- MakeTargetFromSystemTarget(makeTenantTargetOrFatal(t, roachpb.SystemTenantID, roachpb.SystemTenantID)),
- MakeTargetFromSystemTarget(makeTenantTargetOrFatal(t, roachpb.SystemTenantID, roachpb.MakeTenantID(10))),
- MakeTargetFromSystemTarget(makeTenantTargetOrFatal(t, roachpb.SystemTenantID, roachpb.MakeTenantID(20))),
- MakeTargetFromSystemTarget(makeTenantTargetOrFatal(t, roachpb.MakeTenantID(5), roachpb.MakeTenantID(5))),
- MakeTargetFromSystemTarget(makeTenantTargetOrFatal(t, roachpb.MakeTenantID(10), roachpb.MakeTenantID(10))),
+ MakeTargetFromSystemTarget(MakeAllTenantKeyspaceTargetsSet(roachpb.SystemTenantID)),
+ MakeTargetFromSystemTarget(MakeAllTenantKeyspaceTargetsSet(roachpb.MakeTenantID(10))),
+ MakeTargetFromSystemTarget(MakeEntireKeyspaceTarget()),
+ MakeTargetFromSystemTarget(
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.SystemTenantID, roachpb.SystemTenantID),
+ ),
+ MakeTargetFromSystemTarget(
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.SystemTenantID, roachpb.MakeTenantID(10)),
+ ),
+ MakeTargetFromSystemTarget(
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.SystemTenantID, roachpb.MakeTenantID(20)),
+ ),
+ MakeTargetFromSystemTarget(
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.MakeTenantID(5), roachpb.MakeTenantID(5)),
+ ),
+ MakeTargetFromSystemTarget(
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.MakeTenantID(10), roachpb.MakeTenantID(10)),
+ ),
MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("b")}),
MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("a"), EndKey: roachpb.Key("d")}),
MakeTargetFromSpan(roachpb.Span{Key: roachpb.Key("y"), EndKey: roachpb.Key("z")}),
@@ -158,10 +284,38 @@ func TestTargetSortingRandomized(t *testing.T) {
}
}
-func makeTenantTargetOrFatal(
+// TestSpanTargetsConstructedInSystemSpanConfigKeyspace ensures that
+// constructing span targets
+func TestSpanTargetsConstructedInSystemSpanConfigKeyspace(t *testing.T) {
+ for _, tc := range []roachpb.Span{
+ MakeEntireKeyspaceTarget().encode(),
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.MakeTenantID(10), roachpb.MakeTenantID(10)).encode(),
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.SystemTenantID, roachpb.SystemTenantID).encode(),
+ makeTenantKeyspaceTargetOrFatal(t, roachpb.SystemTenantID, roachpb.MakeTenantID(10)).encode(),
+ {
+ // Extends into from the left
+ Key: keys.TimeseriesKeyMax,
+ EndKey: keys.SystemSpanConfigPrefix.Next(), // End Key isn't inclusive.
+ },
+ {
+ // Entirely contained.
+ Key: keys.SystemSpanConfigPrefix.Next(),
+ EndKey: keys.SystemSpanConfigPrefix.Next().PrefixEnd(),
+ },
+ {
+ // Extends beyond on the right.
+ Key: keys.SystemSpanConfigPrefix.Next().PrefixEnd(),
+ EndKey: keys.SystemSpanConfigKeyMax.Next().Next(),
+ },
+ } {
+ require.Panics(t, func() { MakeTargetFromSpan(tc) })
+ }
+}
+
+func makeTenantKeyspaceTargetOrFatal(
t *testing.T, sourceID roachpb.TenantID, targetID roachpb.TenantID,
) SystemTarget {
- target, err := MakeTenantTarget(sourceID, targetID)
+ target, err := MakeTenantKeyspaceTarget(sourceID, targetID)
require.NoError(t, err)
return target
}
diff --git a/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/connectionsTable.tsx b/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/connectionsTable.tsx
index bdd7fccd8496..b9fa02ab2697 100644
--- a/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/connectionsTable.tsx
+++ b/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/connectionsTable.tsx
@@ -48,7 +48,7 @@ const connectionTableColumns: ConnectionTableColumn[] = [
extract: problem => problem.no_raft_leader_range_ids.length,
},
{
- title: "Invalid Lease",
+ title: "Expired Lease",
extract: problem => problem.no_lease_range_ids.length,
},
{
diff --git a/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/index.tsx b/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/index.tsx
index e8e733486802..5924137679ea 100644
--- a/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/index.tsx
+++ b/pkg/ui/workspaces/db-console/src/views/reports/containers/problemRanges/index.tsx
@@ -46,6 +46,7 @@ function ProblemRangeList(props: {
name: string;
problems: NodeProblems$Properties[];
extract: (p: NodeProblems$Properties) => Long[];
+ description?: string;
}) {
const ids = _.chain(props.problems)
.filter(problem => _.isEmpty(problem.error_message))
@@ -61,6 +62,9 @@ function ProblemRangeList(props: {
return (
{props.name}
+ {props.description && (
+
{props.description}
+ )}
{_.map(ids, id => {
return (
@@ -179,9 +183,10 @@ export class ProblemRanges extends React.Component
{
extract={problem => problem.no_raft_leader_range_ids}
/>
problem.no_lease_range_ids}
+ description="Note that having expired leases is unlikely to be a problem. They can occur after node restarts and will clear on its own in up to 24 hours."
/>