diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index afb33731b23c..e355e941b703 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -1153,8 +1153,18 @@ func createImportingDescriptors( return err } for _, tenant := range details.Tenants { - // Mark the tenant info as adding. - tenant.State = descpb.TenantInfo_ADD + switch tenant.State { + case descpb.TenantInfo_ACTIVE: + // If the tenant was backed up in an `ACTIVE` state then we create + // the restored record in an `ADDING` state and mark it `ACTIVE` at + // the end of the restore. + tenant.State = descpb.TenantInfo_ADD + case descpb.TenantInfo_DROP, descpb.TenantInfo_ADD: + // If the tenant was backed up in a `DROP` or `ADD` state then we must + // create the restored tenant record in that state as well. + default: + return errors.AssertionFailedf("unknown tenant state %v", tenant) + } if err := sql.CreateTenantRecord(ctx, p.ExecCfg(), txn, &tenant, initialTenantZoneConfig); err != nil { return err } @@ -2042,8 +2052,20 @@ func (r *restoreResumer) publishDescriptors( } for _, tenant := range details.Tenants { - if err := sql.ActivateTenant(ctx, r.execCfg, txn, tenant.ID); err != nil { - return err + switch tenant.State { + case descpb.TenantInfo_ACTIVE: + // If the tenant was backed up in an `ACTIVE` state then we must activate + // the tenant as the final step of the restore. The tenant has already + // been created at an earlier stage in the restore in an `ADD` state. + tenant.State = descpb.TenantInfo_ADD + if err := sql.ActivateTenant(ctx, r.execCfg, txn, tenant.ID); err != nil { + return err + } + case descpb.TenantInfo_DROP, descpb.TenantInfo_ADD: + // If the tenant was backed up in a `DROP` or `ADD` state then we do not + // want to activate the tenant. + default: + return errors.AssertionFailedf("unknown tenant state %v", tenant) } } diff --git a/pkg/ccl/backupccl/targets.go b/pkg/ccl/backupccl/targets.go index ff2ec09d6d87..5a5450461be2 100644 --- a/pkg/ccl/backupccl/targets.go +++ b/pkg/ccl/backupccl/targets.go @@ -38,6 +38,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/timeutil" + "github.com/cockroachdb/cockroach/pkg/util/tracing" "github.com/cockroachdb/errors" ) @@ -276,7 +277,7 @@ func fullClusterTargets( } func fullClusterTargetsRestore( - allDescs []catalog.Descriptor, lastBackupManifest backuppb.BackupManifest, + ctx context.Context, allDescs []catalog.Descriptor, lastBackupManifest backuppb.BackupManifest, ) ( []catalog.Descriptor, []catalog.DatabaseDescriptor, @@ -284,6 +285,10 @@ func fullClusterTargetsRestore( []descpb.TenantInfoWithUsage, error, ) { + ctx, span := tracing.ChildSpan(ctx, "backupccl.fullClusterTargetsRestore") + _ = ctx // ctx is currently unused, but this new ctx should be used below in the future. + defer span.Finish() + fullClusterDescs, fullClusterDBs, err := fullClusterTargets(allDescs) var filteredDescs []catalog.Descriptor var filteredDBs []catalog.DatabaseDescriptor @@ -354,10 +359,12 @@ func selectTargets( []descpb.TenantInfoWithUsage, error, ) { + ctx, span := tracing.ChildSpan(ctx, "backupccl.selectTargets") + defer span.Finish() allDescs, lastBackupManifest := backupinfo.LoadSQLDescsFromBackupsAtTime(backupManifests, asOf) if descriptorCoverage == tree.AllDescriptors { - return fullClusterTargetsRestore(allDescs, lastBackupManifest) + return fullClusterTargetsRestore(ctx, allDescs, lastBackupManifest) } if descriptorCoverage == tree.SystemUsers { diff --git a/pkg/ccl/backupccl/testdata/backup-restore/restore-tenants b/pkg/ccl/backupccl/testdata/backup-restore/restore-tenants new file mode 100644 index 000000000000..f4c4c7a76684 --- /dev/null +++ b/pkg/ccl/backupccl/testdata/backup-restore/restore-tenants @@ -0,0 +1,60 @@ +new-server name=s1 +---- + +# Create a few tenants. +exec-sql +SELECT crdb_internal.create_tenant(5); +---- + +exec-sql +SELECT crdb_internal.create_tenant(6); +---- + +# Drop one of them. +exec-sql +SELECT crdb_internal.destroy_tenant(5); +---- + +query-sql +SELECT id,active,crdb_internal.pb_to_json('cockroach.sql.sqlbase.TenantInfo', info, true) FROM system.tenants; +---- +5 false {"id": "5", "state": "DROP"} +6 true {"id": "6", "state": "ACTIVE"} + +exec-sql +BACKUP INTO 'nodelocal://1/cluster' +---- + +exec-sql expect-error-regex=(tenant 5 is not active) +BACKUP TENANT 5 INTO 'nodelocal://1/tenant5' +---- +regex matches error + +exec-sql +BACKUP TENANT 6 INTO 'nodelocal://1/tenant6' +---- + +new-server name=s2 share-io-dir=s1 +---- + +exec-sql +RESTORE FROM LATEST IN 'nodelocal://1/cluster' +---- + +# A dropped tenant should be restored as an inactive tenant. +query-sql +SELECT id,active,crdb_internal.pb_to_json('cockroach.sql.sqlbase.TenantInfo', info, true) FROM system.tenants; +---- +5 false {"id": "5", "state": "DROP"} +6 true {"id": "6", "state": "ACTIVE"} + +exec-sql +RESTORE TENANT 6 FROM LATEST IN 'nodelocal://1/tenant6' WITH tenant = '7'; +---- + +query-sql +SELECT id,active,crdb_internal.pb_to_json('cockroach.sql.sqlbase.TenantInfo', info, true) FROM system.tenants; +---- +5 false {"id": "5", "state": "DROP"} +6 true {"id": "6", "state": "ACTIVE"} +7 true {"id": "7", "state": "ACTIVE"}