diff --git a/docs/generated/sql/bnf/show_backup.bnf b/docs/generated/sql/bnf/show_backup.bnf index 8a08abd80324..ce065f1b09d4 100644 --- a/docs/generated/sql/bnf/show_backup.bnf +++ b/docs/generated/sql/bnf/show_backup.bnf @@ -1,23 +1,9 @@ show_backup_stmt ::= 'SHOW' 'BACKUPS' 'IN' location_opt_list - | 'SHOW' 'BACKUP' show_backup_details 'FROM' string_or_placeholder 'IN' string_or_placeholder_opt_list 'WITH' kv_option_list - | 'SHOW' 'BACKUP' show_backup_details 'FROM' string_or_placeholder 'IN' string_or_placeholder_opt_list 'WITH' 'OPTIONS' '(' kv_option_list ')' - | 'SHOW' 'BACKUP' show_backup_details 'FROM' string_or_placeholder 'IN' string_or_placeholder_opt_list - | 'SHOW' 'BACKUP' subdirectory 'IN' location_opt_list 'WITH' kv_option_list - | 'SHOW' 'BACKUP' subdirectory 'IN' location_opt_list 'WITH' 'OPTIONS' '(' kv_option_list ')' - | 'SHOW' 'BACKUP' subdirectory 'IN' location_opt_list - | 'SHOW' 'BACKUP' string_or_placeholder 'WITH' kv_option_list - | 'SHOW' 'BACKUP' string_or_placeholder 'WITH' 'OPTIONS' '(' kv_option_list ')' - | 'SHOW' 'BACKUP' string_or_placeholder - | 'SHOW' 'BACKUP' 'SCHEMAS' location 'WITH' kv_option_list - | 'SHOW' 'BACKUP' 'SCHEMAS' location 'WITH' 'OPTIONS' '(' kv_option_list ')' - | 'SHOW' 'BACKUP' 'SCHEMAS' location - | 'SHOW' 'BACKUP' 'FILES' string_or_placeholder 'WITH' kv_option_list - | 'SHOW' 'BACKUP' 'FILES' string_or_placeholder 'WITH' 'OPTIONS' '(' kv_option_list ')' - | 'SHOW' 'BACKUP' 'FILES' string_or_placeholder - | 'SHOW' 'BACKUP' 'RANGES' string_or_placeholder 'WITH' kv_option_list - | 'SHOW' 'BACKUP' 'RANGES' string_or_placeholder 'WITH' 'OPTIONS' '(' kv_option_list ')' - | 'SHOW' 'BACKUP' 'RANGES' string_or_placeholder - | 'SHOW' 'BACKUP' 'VALIDATE' string_or_placeholder 'WITH' kv_option_list - | 'SHOW' 'BACKUP' 'VALIDATE' string_or_placeholder 'WITH' 'OPTIONS' '(' kv_option_list ')' - | 'SHOW' 'BACKUP' 'VALIDATE' string_or_placeholder + | 'SHOW' 'BACKUP' show_backup_details 'FROM' string_or_placeholder 'IN' string_or_placeholder_opt_list opt_with_show_backup_options + | 'SHOW' 'BACKUP' subdirectory 'IN' location_opt_list opt_with_show_backup_options + | 'SHOW' 'BACKUP' string_or_placeholder opt_with_show_backup_options + | 'SHOW' 'BACKUP' 'SCHEMAS' location opt_with_show_backup_options + | 'SHOW' 'BACKUP' 'FILES' string_or_placeholder opt_with_show_backup_options + | 'SHOW' 'BACKUP' 'RANGES' string_or_placeholder opt_with_show_backup_options + | 'SHOW' 'BACKUP' 'VALIDATE' string_or_placeholder opt_with_show_backup_options diff --git a/docs/generated/sql/bnf/stmt_block.bnf b/docs/generated/sql/bnf/stmt_block.bnf index 3a7b05157a97..55e94ecb0024 100644 --- a/docs/generated/sql/bnf/stmt_block.bnf +++ b/docs/generated/sql/bnf/stmt_block.bnf @@ -771,13 +771,13 @@ use_stmt ::= show_backup_stmt ::= 'SHOW' 'BACKUPS' 'IN' string_or_placeholder_opt_list - | 'SHOW' 'BACKUP' show_backup_details 'FROM' string_or_placeholder 'IN' string_or_placeholder_opt_list opt_with_options - | 'SHOW' 'BACKUP' string_or_placeholder 'IN' string_or_placeholder_opt_list opt_with_options - | 'SHOW' 'BACKUP' string_or_placeholder opt_with_options - | 'SHOW' 'BACKUP' 'SCHEMAS' string_or_placeholder opt_with_options - | 'SHOW' 'BACKUP' 'FILES' string_or_placeholder opt_with_options - | 'SHOW' 'BACKUP' 'RANGES' string_or_placeholder opt_with_options - | 'SHOW' 'BACKUP' 'VALIDATE' string_or_placeholder opt_with_options + | 'SHOW' 'BACKUP' show_backup_details 'FROM' string_or_placeholder 'IN' string_or_placeholder_opt_list opt_with_show_backup_options + | 'SHOW' 'BACKUP' string_or_placeholder 'IN' string_or_placeholder_opt_list opt_with_show_backup_options + | 'SHOW' 'BACKUP' string_or_placeholder opt_with_show_backup_options + | 'SHOW' 'BACKUP' 'SCHEMAS' string_or_placeholder opt_with_show_backup_options + | 'SHOW' 'BACKUP' 'FILES' string_or_placeholder opt_with_show_backup_options + | 'SHOW' 'BACKUP' 'RANGES' string_or_placeholder opt_with_show_backup_options + | 'SHOW' 'BACKUP' 'VALIDATE' string_or_placeholder opt_with_show_backup_options show_columns_stmt ::= 'SHOW' 'COLUMNS' 'FROM' table_name with_comment @@ -1010,6 +1010,7 @@ unreserved_keyword ::= | 'ALTER' | 'ALWAYS' | 'ASENSITIVE' + | 'AS_JSON' | 'AT' | 'ATOMIC' | 'ATTRIBUTE' @@ -1032,6 +1033,7 @@ unreserved_keyword ::= | 'CAPABILITY' | 'CASCADE' | 'CHANGEFEED' + | 'CHECK_FILES' | 'CLOSE' | 'CLUSTER' | 'COLUMNS' @@ -1069,7 +1071,9 @@ unreserved_keyword ::= | 'DATABASES' | 'DAY' | 'DEALLOCATE' + | 'DEBUG_IDS' | 'DEBUG_PAUSE_ON' + | 'DEBUG_DUMP_METADATA_SST' | 'DECLARE' | 'DELETE' | 'DEFAULTS' @@ -1087,6 +1091,7 @@ unreserved_keyword ::= | 'ENCODING' | 'ENCRYPTED' | 'ENCRYPTION_PASSPHRASE' + | 'ENCRYPTION_INFO_DIR' | 'ENUM' | 'ENUMS' | 'ESCAPE' @@ -1901,6 +1906,11 @@ show_backup_details ::= | 'RANGES' | 'VALIDATE' +opt_with_show_backup_options ::= + 'WITH' show_backup_options_list + | 'WITH' 'OPTIONS' '(' show_backup_options_list ')' + | + with_comment ::= 'WITH' 'COMMENT' | @@ -2642,6 +2652,9 @@ extra_var_value ::= 'ON' | cockroachdb_extra_reserved_keyword +show_backup_options_list ::= + ( show_backup_options ) ( ( ',' show_backup_options ) )* + targets_roles ::= 'ROLE' role_spec_list | 'SCHEMA' schema_name_list @@ -3181,6 +3194,17 @@ for_locking_item ::= var_list ::= ( var_value ) ( ( ',' var_value ) )* +show_backup_options ::= + 'AS_JSON' + | 'CHECK_FILES' + | 'DEBUG_IDS' + | 'INCREMENTAL_LOCATION' '=' string_or_placeholder_opt_list + | 'KMS' '=' string_or_placeholder_opt_list + | 'ENCRYPTION_PASSPHRASE' '=' string_or_placeholder + | 'PRIVILEGES' + | 'ENCRYPTION_INFO_DIR' '=' string_or_placeholder + | 'DEBUG_DUMP_METADATA_SST' + schema_wildcard ::= wildcard_pattern @@ -3382,11 +3406,16 @@ family_name ::= name bare_label_keywords ::= - 'ATOMIC' + 'AS_JSON' + | 'ATOMIC' | 'CALLED' | 'COST' + | 'CHECK_FILES' + | 'DEBUG_IDS' + | 'DEBUG_DUMP_METADATA_SST' | 'DEFINER' | 'DEPENDS' + | 'ENCRYPTION_INFO_DIR' | 'EXTERNAL' | 'IMMUTABLE' | 'INPUT' diff --git a/pkg/ccl/backupccl/backup_planning.go b/pkg/ccl/backupccl/backup_planning.go index 6d4020734c90..268ddd8c9a5f 100644 --- a/pkg/ccl/backupccl/backup_planning.go +++ b/pkg/ccl/backupccl/backup_planning.go @@ -62,14 +62,6 @@ import ( ) const ( - backupOptRevisionHistory = "revision_history" - backupOptWithPrivileges = "privileges" - backupOptAsJSON = "as_json" - backupOptWithDebugIDs = "debug_ids" - backupOptIncStorage = "incremental_location" - backupOptDebugMetadataSST = "debug_dump_metadata_sst" - backupOptEncDir = "encryption_info_dir" - backupOptCheckFiles = "check_files" // backupPartitionDescriptorPrefix is the file name prefix for serialized // BackupPartitionDescriptor protos. backupPartitionDescriptorPrefix = "BACKUP_PART" diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index f0e0e480d94a..2aee64a65e11 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -1226,19 +1226,16 @@ func TestEncryptedBackupRestoreSystemJobs(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { var encryptionOption string - var sanitizedEncryptionOption1 string - var sanitizedEncryptionOption2 string + var sanitizedEncryptionOption string if tc.useKMS { correctKMSURI, _ := getAWSKMSURI(t, regionEnvVariable, keyIDEnvVariable) encryptionOption = fmt.Sprintf("kms = '%s'", correctKMSURI) sanitizedURI, err := redactTestKMSURI(correctKMSURI) require.NoError(t, err) - sanitizedEncryptionOption1 = fmt.Sprintf("kms = '%s'", sanitizedURI) - sanitizedEncryptionOption2 = sanitizedEncryptionOption1 + sanitizedEncryptionOption = fmt.Sprintf("kms = '%s'", sanitizedURI) } else { encryptionOption = "encryption_passphrase = 'abcdefg'" - sanitizedEncryptionOption1 = "encryption_passphrase = '*****'" - sanitizedEncryptionOption2 = "encryption_passphrase = 'redacted'" + sanitizedEncryptionOption = "encryption_passphrase = '*****'" } _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, multiNode, 3, InitManualReplication) conn := sqlDB.DB.(*gosql.DB) @@ -1261,7 +1258,7 @@ func TestEncryptedBackupRestoreSystemJobs(t *testing.T) { Username: username.RootUserName(), Description: fmt.Sprintf( `BACKUP DATABASE data TO '%s' WITH %s`, - backupLoc1, sanitizedEncryptionOption1), + backupLoc1, sanitizedEncryptionOption), DescriptorIDs: descpb.IDs{ descpb.ID(backupDatabaseID), descpb.ID(backupSchemaID), @@ -1280,7 +1277,7 @@ into_db='restoredb', %s)`, encryptionOption), backupLoc1) Username: username.RootUserName(), Description: fmt.Sprintf( `RESTORE TABLE data.bank FROM '%s' WITH %s, into_db = 'restoredb'`, - backupLoc1, sanitizedEncryptionOption2, + backupLoc1, sanitizedEncryptionOption, ), DescriptorIDs: descpb.IDs{ descpb.ID(restoreDatabaseID + 1), @@ -4256,9 +4253,8 @@ func TestEncryptedBackup(t *testing.T) { sqlDB.Exec(t, fmt.Sprintf(`SHOW BACKUP $1 WITH %s`, encryptionOption), backupLoc1) - sqlDB.Exec(t, fmt.Sprintf(`SHOW BACKUP $1 WITH %s,encryption_info_dir='%s'`, - encryptionOption, - backupLoc1), + sqlDB.Exec(t, fmt.Sprintf(`SHOW BACKUP $1 WITH %s, encryption_info_dir = '%s'`, + encryptionOption, backupLoc1), backupLoc1inc) var expectedShowError string @@ -4330,7 +4326,7 @@ func TestRegionalKMSEncryptedBackup(t *testing.T) { backupLoc1 := localFoo + "/x?COCKROACH_LOCALITY=default" backupLoc2 := localFoo + "/x2?COCKROACH_LOCALITY=" + url.QueryEscape("dc=dc1") - sqlDB.Exec(t, fmt.Sprintf(`BACKUP TO ($1, $2) WITH %s`, + sqlDB.Exec(t, fmt.Sprintf(`BACKUP INTO ($1, $2) WITH %s`, concatMultiRegionKMSURIs(multiRegionKMSURIs)), backupLoc1, backupLoc2) @@ -4342,14 +4338,18 @@ func TestRegionalKMSEncryptedBackup(t *testing.T) { checkBackupFilesEncrypted(t, rawDir) - sqlDB.Exec(t, fmt.Sprintf(`SHOW BACKUP $1 WITH KMS='%s'`, multiRegionKMSURIs[0]), + // check that SHOW BACKUP is valid when a single kms uri is provided and when multiple are + sqlDB.Exec(t, fmt.Sprintf(`SHOW BACKUP FROM LATEST IN $1 WITH KMS='%s'`, multiRegionKMSURIs[0]), backupLoc1) + sqlDB.Exec(t, fmt.Sprintf(`SHOW BACKUP FROM LATEST IN ($1, $2) WITH %s`, + concatMultiRegionKMSURIs(multiRegionKMSURIs)), backupLoc1, backupLoc2) + // Attempt to RESTORE using each of the regional KMSs independently. for _, uri := range multiRegionKMSURIs { sqlDB.Exec(t, `DROP DATABASE neverappears CASCADE`) - sqlDB.Exec(t, fmt.Sprintf(`RESTORE DATABASE neverappears FROM ($1, $2) WITH %s`, + sqlDB.Exec(t, fmt.Sprintf(`RESTORE DATABASE neverappears FROM LATEST IN ($1, $2) WITH %s`, concatMultiRegionKMSURIs([]string{uri})), backupLoc1, backupLoc2) sqlDB.CheckQueryResults(t, `SHOW EXPERIMENTAL_FINGERPRINTS FROM TABLE neverappears.neverappears`, before) @@ -9899,18 +9899,13 @@ func TestBackupRestoreSeparateExplicitIsDefault(t *testing.T) { sqlDB.Exec(t, fmt.Sprintf("SHOW BACKUPS IN %s", dest)) sqlDB.Exec(t, fmt.Sprintf("SHOW BACKUP FROM LATEST IN %s", dest)) + // Locality aware show backups will eventually fail if not all localities are provided, + // but for now, they're ok. if len(br.dest) > 1 { - // Locality aware show backups will eventually fail if not all localities are provided, - // but for now, they're ok. sqlDB.Exec(t, fmt.Sprintf("SHOW BACKUP FROM LATEST IN %s", br.dest[1])) - - errorMsg := "SHOW BACKUP on locality aware backups using incremental_location is not" + - " supported yet" - sqlDB.ExpectErr(t, errorMsg, fmt.Sprintf("SHOW BACKUP FROM LATEST IN %s WITH incremental_location= %s", dest, br.inc[0])) - } else { - // non locality aware show backup with incremental_location should work! - sqlDB.Exec(t, fmt.Sprintf("SHOW BACKUP FROM LATEST IN %s WITH incremental_location= %s", dest, inc)) } + + sqlDB.Exec(t, fmt.Sprintf("SHOW BACKUP FROM LATEST IN %s WITH incremental_location= %s", dest, inc)) } sir := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'inc_fkdb', incremental_location = %s", dest, inc) sqlDB.Exec(t, sir) diff --git a/pkg/ccl/backupccl/show.go b/pkg/ccl/backupccl/show.go index a185c3ca7f52..133555a33c6d 100644 --- a/pkg/ccl/backupccl/show.go +++ b/pkg/ccl/backupccl/show.go @@ -187,11 +187,15 @@ func showBackupTypeCheck( } if err := exprutil.TypeCheck( ctx, "SHOW BACKUP", p.SemaCtx(), - exprutil.Strings{backup.Path}, - exprutil.StringArrays{tree.Exprs(backup.InCollection)}, - exprutil.KVOptions{ - KVOptions: backup.Options, - Validation: showBackupOptions, + exprutil.Strings{ + backup.Path, + backup.Options.EncryptionPassphrase, + backup.Options.EncryptionInfoDir, + }, + exprutil.StringArrays{ + tree.Exprs(backup.InCollection), + tree.Exprs(backup.Options.IncrementalStorage), + tree.Exprs(backup.Options.DecryptionKMSURI), }, ); err != nil { return false, nil, err @@ -200,52 +204,40 @@ func showBackupTypeCheck( return true, infoReader.header(), nil } -var showBackupOptions = exprutil.KVOptionValidationMap{ - backupencryption.BackupOptEncPassphrase: exprutil.KVStringOptRequireValue, - backupencryption.BackupOptEncKMS: exprutil.KVStringOptRequireValue, - backupOptWithPrivileges: exprutil.KVStringOptRequireNoValue, - backupOptAsJSON: exprutil.KVStringOptRequireNoValue, - backupOptWithDebugIDs: exprutil.KVStringOptRequireNoValue, - backupOptIncStorage: exprutil.KVStringOptRequireValue, - backupOptDebugMetadataSST: exprutil.KVStringOptRequireNoValue, - backupOptEncDir: exprutil.KVStringOptRequireValue, - backupOptCheckFiles: exprutil.KVStringOptRequireNoValue, -} - // showBackupPlanHook implements PlanHookFn. func showBackupPlanHook( ctx context.Context, stmt tree.Statement, p sql.PlanHookState, ) (sql.PlanHookRowFn, colinfo.ResultColumns, []sql.PlanNode, bool, error) { - backup, ok := stmt.(*tree.ShowBackup) + showStmt, ok := stmt.(*tree.ShowBackup) if !ok { return nil, nil, nil, false, nil } exprEval := p.ExprEvaluator("SHOW BACKUP") - if backup.Path == nil && backup.InCollection != nil { + if showStmt.Path == nil && showStmt.InCollection != nil { collection, err := exprEval.StringArray( - ctx, tree.Exprs(backup.InCollection), + ctx, tree.Exprs(showStmt.InCollection), ) if err != nil { return nil, nil, nil, false, err } - return showBackupsInCollectionPlanHook(ctx, collection, backup, p) + return showBackupsInCollectionPlanHook(ctx, collection, showStmt, p) } - to, err := exprEval.String(ctx, backup.Path) + to, err := exprEval.String(ctx, showStmt.Path) if err != nil { return nil, nil, nil, false, err } var inCol []string - if backup.InCollection != nil { - inCol, err = exprEval.StringArray(ctx, tree.Exprs(backup.InCollection)) + if showStmt.InCollection != nil { + inCol, err = exprEval.StringArray(ctx, tree.Exprs(showStmt.InCollection)) if err != nil { return nil, nil, nil, false, err } } - infoReader := getBackupInfoReader(p, backup) - opts, err := exprEval.KVOptions(ctx, backup.Options, showBackupOptions) + infoReader := getBackupInfoReader(p, showStmt) + if err != nil { return nil, nil, nil, false, err } @@ -305,12 +297,13 @@ func showBackupPlanHook( } // TODO(msbutler): put encryption resolution in helper function, hopefully shared with RESTORE - // A user that calls SHOW BACKUP on an encrypted incremental - // backup will need to pass their full backup's directory to the - // encryption_info_dir parameter because the `ENCRYPTION-INFO` file - // necessary to decode the incremental backup lives in the full backup dir. + encStore := baseStores[0] - if encDir, ok := opts[backupOptEncDir]; ok { + if showStmt.Options.EncryptionInfoDir != nil { + encDir, err := exprEval.String(ctx, showStmt.Options.EncryptionInfoDir) + if err != nil { + return err + } encStore, err = p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, encDir, p.User()) if err != nil { return errors.Wrap(err, "make storage") @@ -326,7 +319,11 @@ func showBackupPlanHook( ) showEncErr := `If you are running SHOW BACKUP exclusively on an incremental backup, you must pass the 'encryption_info_dir' parameter that points to the directory of your full backup` - if passphrase, ok := opts[backupencryption.BackupOptEncPassphrase]; ok { + if showStmt.Options.EncryptionPassphrase != nil { + passphrase, err := exprEval.String(ctx, showStmt.Options.EncryptionPassphrase) + if err != nil { + return err + } opts, err := backupencryption.ReadEncryptionOptions(ctx, encStore) if errors.Is(err, backupencryption.ErrEncryptionInfoRead) { return errors.WithHint(err, showEncErr) @@ -339,7 +336,11 @@ you must pass the 'encryption_info_dir' parameter that points to the directory o Mode: jobspb.EncryptionMode_Passphrase, Key: encryptionKey, } - } else if kms, ok := opts[backupencryption.BackupOptEncKMS]; ok { + } else if showStmt.Options.DecryptionKMSURI != nil { + kms, err := exprEval.StringArray(ctx, tree.Exprs(showStmt.Options.DecryptionKMSURI)) + if err != nil { + return err + } opts, err := backupencryption.ReadEncryptionOptions(ctx, encStore) if errors.Is(err, backupencryption.ErrEncryptionInfoRead) { return errors.WithHint(err, showEncErr) @@ -347,12 +348,11 @@ you must pass the 'encryption_info_dir' parameter that points to the directory o if err != nil { return err } - var defaultKMSInfo *jobspb.BackupEncryptionOptions_KMSInfo for _, encFile := range opts { defaultKMSInfo, err = backupencryption.ValidateKMSURIsAgainstFullBackup( ctx, - []string{kms}, + kms, backupencryption.NewEncryptedDataKeyMapFromProtoMap(encFile.EncryptedDataKeyByKMSMasterKeyID), &kmsEnv, ) @@ -368,16 +368,13 @@ you must pass the 'encryption_info_dir' parameter that points to the directory o KMSInfo: defaultKMSInfo, } } - explicitIncPaths := make([]string, 0) - explicitIncPath := opts[backupOptIncStorage] - if len(explicitIncPath) > 0 { - explicitIncPaths = append(explicitIncPaths, explicitIncPath) - if len(dest) > 1 { - return errors.New("SHOW BACKUP on locality aware backups using incremental_location is" + - " not supported yet") + var explicitIncPaths []string + if showStmt.Options.IncrementalStorage != nil { + explicitIncPaths, err = exprEval.StringArray(ctx, tree.Exprs(showStmt.Options.IncrementalStorage)) + if err != nil { + return err } } - collection, computedSubdir := backupdest.CollectionAndSubdir(dest[0], subdir) fullyResolvedIncrementalsDirectory, err := backupdest.ResolveIncrementalsBackupLocation( ctx, @@ -465,7 +462,7 @@ you must pass the 'encryption_info_dir' parameter that points to the directory o len(dest))) } } - if _, ok := opts[backupOptCheckFiles]; ok { + if showStmt.Options.CheckFiles { fileSizes, err := checkBackupFiles(ctx, info, p.ExecCfg(), p.User(), encryption, &kmsEnv) if err != nil { return err @@ -475,7 +472,7 @@ you must pass the 'encryption_info_dir' parameter that points to the directory o if err := infoReader.showBackup(ctx, &mem, mkStore, info, p.User(), &kmsEnv, resultsCh); err != nil { return err } - if backup.InCollection == nil { + if showStmt.InCollection == nil { telemetry.Count("show-backup.deprecated-subdir-syntax") } else { telemetry.Count("show-backup.collection") @@ -486,26 +483,26 @@ you must pass the 'encryption_info_dir' parameter that points to the directory o return fn, infoReader.header(), nil, false, nil } -func getBackupInfoReader(p sql.PlanHookState, backup *tree.ShowBackup) backupInfoReader { +func getBackupInfoReader(p sql.PlanHookState, showStmt *tree.ShowBackup) backupInfoReader { var infoReader backupInfoReader - if dumpSST := backup.Options.HasKey(backupOptDebugMetadataSST); dumpSST { + if showStmt.Options.DebugMetadataSST { infoReader = metadataSSTInfoReader{} - } else if asJSON := backup.Options.HasKey(backupOptAsJSON); asJSON { + } else if showStmt.Options.AsJson { infoReader = manifestInfoReader{shower: jsonShower} } else { var shower backupShower - switch backup.Details { + switch showStmt.Details { case tree.BackupRangeDetails: shower = backupShowerRanges case tree.BackupFileDetails: - shower = backupShowerFileSetup(p, backup.InCollection) + shower = backupShowerFileSetup(p, showStmt.InCollection) case tree.BackupSchemaDetails: - shower = backupShowerDefault(p, true, backup.Options) + shower = backupShowerDefault(p, true, showStmt.Options) case tree.BackupValidateDetails: shower = backupShowerDoctor default: - shower = backupShowerDefault(p, false, backup.Options) + shower = backupShowerDefault(p, false, showStmt.Options) } infoReader = manifestInfoReader{shower: shower} } @@ -659,7 +656,7 @@ type backupShower struct { } // backupShowerHeaders defines the schema for the table presented to the user. -func backupShowerHeaders(showSchemas bool, opts tree.KVOptions) colinfo.ResultColumns { +func backupShowerHeaders(showSchemas bool, opts tree.ShowBackupOptions) colinfo.ResultColumns { baseHeaders := colinfo.ResultColumns{ {Name: "database_name", Typ: types.String}, {Name: "parent_schema_name", Typ: types.String}, @@ -676,14 +673,14 @@ func backupShowerHeaders(showSchemas bool, opts tree.KVOptions) colinfo.ResultCo if showSchemas { baseHeaders = append(baseHeaders, colinfo.ResultColumn{Name: "create_statement", Typ: types.String}) } - if shouldShowPrivleges := opts.HasKey(backupOptWithPrivileges); shouldShowPrivleges { + if opts.Privileges { baseHeaders = append(baseHeaders, colinfo.ResultColumn{Name: "privileges", Typ: types.String}) baseHeaders = append(baseHeaders, colinfo.ResultColumn{Name: "owner", Typ: types.String}) } - if checkFiles := opts.HasKey(backupOptCheckFiles); checkFiles { + if opts.CheckFiles { baseHeaders = append(baseHeaders, colinfo.ResultColumn{Name: "file_bytes", Typ: types.Int}) } - if shouldShowIDs := opts.HasKey(backupOptWithDebugIDs); shouldShowIDs { + if opts.DebugIDs { baseHeaders = append( colinfo.ResultColumns{ baseHeaders[0], @@ -699,10 +696,9 @@ func backupShowerHeaders(showSchemas bool, opts tree.KVOptions) colinfo.ResultCo return baseHeaders } -func backupShowerDefault(p sql.PlanHookState, showSchemas bool, opts tree.KVOptions) backupShower { - shouldShowPrivileges := opts.HasKey(backupOptWithPrivileges) - checkFiles := opts.HasKey(backupOptCheckFiles) - shouldShowIDs := opts.HasKey(backupOptWithDebugIDs) +func backupShowerDefault( + p sql.PlanHookState, showSchemas bool, opts tree.ShowBackupOptions, +) backupShower { return backupShower{ header: backupShowerHeaders(showSchemas, opts), fn: func(ctx context.Context, info backupInfo) ([]tree.Datums, error) { @@ -871,15 +867,15 @@ func backupShowerDefault(p sql.PlanHookState, showSchemas bool, opts tree.KVOpti if showSchemas { row = append(row, createStmtDatum) } - if shouldShowPrivileges { + if opts.Privileges { row = append(row, tree.NewDString(showPrivileges(ctx, desc))) owner := desc.GetPrivileges().Owner().SQLIdentifier() row = append(row, tree.NewDString(owner)) } - if checkFiles { + if opts.CheckFiles { row = append(row, fileSizeDatum) } - if shouldShowIDs { + if opts.DebugIDs { // If showing debug IDs, interleave the IDs with the corresponding object names. row = append( tree.Datums{ @@ -912,13 +908,13 @@ func backupShowerDefault(p sql.PlanHookState, showSchemas bool, opts tree.KVOpti if showSchemas { row = append(row, tree.DNull) } - if shouldShowPrivileges { + if opts.Privileges { row = append(row, tree.DNull) } - if checkFiles { + if opts.CheckFiles { row = append(row, tree.DNull) } - if shouldShowIDs { + if opts.DebugIDs { // If showing debug IDs, interleave the IDs with the corresponding object names. row = append( tree.Datums{ @@ -1433,7 +1429,7 @@ var showBackupsInCollectionHeader = colinfo.ResultColumns{ // showBackupPlanHook implements PlanHookFn. func showBackupsInCollectionPlanHook( - ctx context.Context, collection []string, backup *tree.ShowBackup, p sql.PlanHookState, + ctx context.Context, collection []string, showStmt *tree.ShowBackup, p sql.PlanHookState, ) (sql.PlanHookRowFn, colinfo.ResultColumns, []sql.PlanNode, bool, error) { if err := cloudprivilege.CheckDestinationPrivileges(ctx, p, collection); err != nil { @@ -1441,7 +1437,7 @@ func showBackupsInCollectionPlanHook( } fn := func(ctx context.Context, _ []sql.PlanNode, resultsCh chan<- tree.Datums) error { - ctx, span := tracing.ChildSpan(ctx, backup.StatementTag()) + ctx, span := tracing.ChildSpan(ctx, showStmt.StatementTag()) defer span.Finish() store, err := p.ExecCfg().DistSQLSrv.ExternalStorageFromURI(ctx, collection[0], p.User()) diff --git a/pkg/ccl/backupccl/show_test.go b/pkg/ccl/backupccl/show_test.go index 2b7e1dc9f77d..efde7c884a37 100644 --- a/pkg/ccl/backupccl/show_test.go +++ b/pkg/ccl/backupccl/show_test.go @@ -523,7 +523,7 @@ func TestShowBackups(t *testing.T) { // check that full and remote incremental backups appear b3 := sqlDBRestore.QueryStr(t, - `SELECT * FROM [SHOW BACKUP LATEST IN $1 WITH incremental_location= 'nodelocal://0/foo/inc'] WHERE object_type='table'`, full) + `SELECT * FROM [SHOW BACKUP LATEST IN $1 WITH incremental_location = $2 ] WHERE object_type ='table'`, full, remoteInc) require.Equal(t, 3, len(b3)) } diff --git a/pkg/ccl/serverccl/statusccl/tenant_status_test.go b/pkg/ccl/serverccl/statusccl/tenant_status_test.go index 28bb645b3695..5859b8b4015a 100644 --- a/pkg/ccl/serverccl/statusccl/tenant_status_test.go +++ b/pkg/ccl/serverccl/statusccl/tenant_status_test.go @@ -943,30 +943,22 @@ func testTenantStatusCancelSessionErrorMessages(t *testing.T, helper serverccl.T testCases := []struct { sessionID string expectedError string - - // This is a temporary assertion. We should always show the following "not found" error messages, - // regardless of admin status, but our current behavior is slightly broken and will be fixed in #77676. - nonAdminSeesError bool }{ { - sessionID: "", - expectedError: "session ID 00000000000000000000000000000000 not found", - nonAdminSeesError: true, + sessionID: "", + expectedError: "session ID 00000000000000000000000000000000 not found", }, { - sessionID: "01", // This query ID claims to have SQL instance ID 1, different from the one we're talking to. - expectedError: "session ID 00000000000000000000000000000001 not found", - nonAdminSeesError: false, + sessionID: "01", // This query ID claims to have SQL instance ID 1, different from the one we're talking to. + expectedError: "session ID 00000000000000000000000000000001 not found", }, { - sessionID: "02", // This query ID claims to have SQL instance ID 2, the instance we're talking to. - expectedError: "session ID 00000000000000000000000000000002 not found", - nonAdminSeesError: false, + sessionID: "02", // This query ID claims to have SQL instance ID 2, the instance we're talking to. + expectedError: "session ID 00000000000000000000000000000002 not found", }, { - sessionID: "42", // This query ID claims to have SQL instance ID 42, which does not exist. - expectedError: "session ID 00000000000000000000000000000042 not found", - nonAdminSeesError: true, + sessionID: "42", // This query ID claims to have SQL instance ID 42, which does not exist. + expectedError: "session ID 00000000000000000000000000000042 not found", }, } @@ -982,12 +974,8 @@ func testTenantStatusCancelSessionErrorMessages(t *testing.T, helper serverccl.T err = client.PostJSONChecked("/_status/cancel_session/0", &serverpb.CancelSessionRequest{ SessionID: sessionID.GetBytes(), }, &resp) - if isAdmin || testCase.nonAdminSeesError { - require.NoError(t, err) - require.Equal(t, testCase.expectedError, resp.Error) - } else { - require.Error(t, err) - } + require.NoError(t, err) + require.Equal(t, testCase.expectedError, resp.Error) }) } }) @@ -1072,36 +1060,27 @@ func testTenantStatusCancelQueryErrorMessages(t *testing.T, helper serverccl.Ten testCases := []struct { queryID string expectedError string - - // This is a temporary assertion. We should always show the following "not found" error messages, - // regardless of admin status, but our current behavior is slightly broken and will be fixed in #77676. - nonAdminSeesError bool }{ { queryID: "BOGUS_QUERY_ID", expectedError: "query ID 00000000000000000000000000000000 malformed: " + "could not decode BOGUS_QUERY_ID as hex: encoding/hex: invalid byte: U+004F 'O'", - nonAdminSeesError: true, }, { - queryID: "", - expectedError: "query ID 00000000000000000000000000000000 not found", - nonAdminSeesError: true, + queryID: "", + expectedError: "query ID 00000000000000000000000000000000 not found", }, { - queryID: "01", // This query ID claims to have SQL instance ID 1, different from the one we're talking to. - expectedError: "query ID 00000000000000000000000000000001 not found", - nonAdminSeesError: false, + queryID: "01", // This query ID claims to have SQL instance ID 1, different from the one we're talking to. + expectedError: "query ID 00000000000000000000000000000001 not found", }, { - queryID: "02", // This query ID claims to have SQL instance ID 2, the instance we're talking to. - expectedError: "query ID 00000000000000000000000000000002 not found", - nonAdminSeesError: false, + queryID: "02", // This query ID claims to have SQL instance ID 2, the instance we're talking to. + expectedError: "query ID 00000000000000000000000000000002 not found", }, { - queryID: "42", // This query ID claims to have SQL instance ID 42, which does not exist. - expectedError: "query ID 00000000000000000000000000000042 not found", - nonAdminSeesError: true, + queryID: "42", // This query ID claims to have SQL instance ID 42, which does not exist. + expectedError: "query ID 00000000000000000000000000000042 not found", }, } @@ -1115,12 +1094,8 @@ func testTenantStatusCancelQueryErrorMessages(t *testing.T, helper serverccl.Ten err := client.PostJSONChecked("/_status/cancel_query/0", &serverpb.CancelQueryRequest{ QueryID: testCase.queryID, }, &resp) - if isAdmin || testCase.nonAdminSeesError { - require.NoError(t, err) - require.Equal(t, testCase.expectedError, resp.Error) - } else { - require.Error(t, err) - } + require.NoError(t, err) + require.Equal(t, testCase.expectedError, resp.Error) }) } }) diff --git a/pkg/server/status.go b/pkg/server/status.go index dfc669db4947..b1e214f6edd0 100644 --- a/pkg/server/status.go +++ b/pkg/server/status.go @@ -270,90 +270,42 @@ func (b *baseStatusServer) getLocalSessions( return userSessions, nil } -type sessionFinder func(sessions []serverpb.Session) (serverpb.Session, error) - -func findSessionBySessionID(sessionID []byte) sessionFinder { - return func(sessions []serverpb.Session) (serverpb.Session, error) { - var session serverpb.Session - for _, s := range sessions { - if bytes.Equal(sessionID, s.ID) { - session = s - break - } - } - if len(session.ID) == 0 { - return session, fmt.Errorf("session ID %s not found", clusterunique.IDFromBytes(sessionID)) - } - return session, nil - } -} - -func findSessionByQueryID(queryID string) sessionFinder { - return func(sessions []serverpb.Session) (serverpb.Session, error) { - var session serverpb.Session - for _, s := range sessions { - for _, q := range s.ActiveQueries { - if queryID == q.ID { - session = s - break - } - } - } - if len(session.ID) == 0 { - return session, fmt.Errorf("query ID %s not found", queryID) - } - return session, nil - } -} - // checkCancelPrivilege returns nil if the user has the necessary cancel action // privileges for a session. This function returns a proper gRPC error status. func (b *baseStatusServer) checkCancelPrivilege( - ctx context.Context, userName username.SQLUsername, findSession sessionFinder, + ctx context.Context, reqUsername username.SQLUsername, sessionUsername username.SQLUsername, ) error { ctx = propagateGatewayMetadata(ctx) ctx = b.AnnotateCtx(ctx) - // reqUser is the user who made the cancellation request. - var reqUser username.SQLUsername - { - sessionUser, isAdmin, err := b.privilegeChecker.getUserAndRole(ctx) - if err != nil { - return serverError(ctx, err) - } - if userName.Undefined() || userName == sessionUser { - reqUser = sessionUser - } else { - // When CANCEL QUERY is run as a SQL statement, sessionUser is always root - // and the user who ran the statement is passed as req.Username. - if !isAdmin { - return errRequiresAdmin - } - reqUser = userName - } + + ctxUsername, isAdmin, err := b.privilegeChecker.getUserAndRole(ctx) + if err != nil { + return serverError(ctx, err) + } + if reqUsername.Undefined() { + reqUsername = ctxUsername + } else if reqUsername != ctxUsername && !isAdmin { + // When CANCEL QUERY is run as a SQL statement, sessionUser is always root + // and the user who ran the statement is passed as req.Username. + return errRequiresAdmin } - hasAdmin, err := b.privilegeChecker.hasAdminRole(ctx, reqUser) + hasAdmin, err := b.privilegeChecker.hasAdminRole(ctx, reqUsername) if err != nil { return serverError(ctx, err) } if !hasAdmin { // Check if the user has permission to see the session. - session, err := findSession(b.sessionRegistry.SerializeAll()) - if err != nil { - return serverError(ctx, err) - } - - sessionUser := username.MakeSQLUsernameFromPreNormalizedString(session.Username) - if sessionUser != reqUser { + if sessionUsername != reqUsername { // Must have CANCELQUERY privilege to cancel other users' // sessions/queries. - hasCancelQuery, err := b.privilegeChecker.hasGlobalPrivilege(ctx, reqUser, privilege.CANCELQUERY) + hasCancelQuery, err := b.privilegeChecker.hasGlobalPrivilege(ctx, reqUsername, privilege.CANCELQUERY) if err != nil { return serverError(ctx, err) } if !hasCancelQuery { - ok, err := b.privilegeChecker.hasRoleOption(ctx, reqUser, roleoption.CANCELQUERY) + ok, err := b.privilegeChecker.hasRoleOption(ctx, reqUsername, roleoption.CANCELQUERY) if err != nil { return serverError(ctx, err) } @@ -362,7 +314,7 @@ func (b *baseStatusServer) checkCancelPrivilege( } } // Non-admins cannot cancel admins' sessions/queries. - isAdminSession, err := b.privilegeChecker.hasAdminRole(ctx, sessionUser) + isAdminSession, err := b.privilegeChecker.hasAdminRole(ctx, sessionUsername) if err != nil { return serverError(ctx, err) } @@ -3063,7 +3015,13 @@ func (s *statusServer) CancelSession( ctx = propagateGatewayMetadata(ctx) ctx = s.AnnotateCtx(ctx) - sessionID := clusterunique.IDFromBytes(req.SessionID) + sessionIDBytes := req.SessionID + if len(sessionIDBytes) != 16 { + return &serverpb.CancelSessionResponse{ + Error: fmt.Sprintf("session ID %v malformed", sessionIDBytes), + }, nil + } + sessionID := clusterunique.IDFromBytes(sessionIDBytes) nodeID := sessionID.GetNodeID() local := nodeID == int32(s.serverIterator.getID()) if !local { @@ -3071,8 +3029,7 @@ func (s *statusServer) CancelSession( if err != nil { if errors.Is(err, sqlinstance.NonExistentInstanceError) { return &serverpb.CancelSessionResponse{ - Canceled: false, - Error: fmt.Sprintf("session ID %s not found", sessionID), + Error: fmt.Sprintf("session ID %s not found", sessionID), }, nil } return nil, serverError(ctx, err) @@ -3085,17 +3042,21 @@ func (s *statusServer) CancelSession( return nil, status.Errorf(codes.InvalidArgument, err.Error()) } - if err := s.checkCancelPrivilege(ctx, reqUsername, findSessionBySessionID(req.SessionID)); err != nil { + session, ok := s.sessionRegistry.GetSessionByID(sessionID) + if !ok { + return &serverpb.CancelSessionResponse{ + Error: fmt.Sprintf("session ID %s not found", sessionID), + }, nil + } + + if err := s.checkCancelPrivilege(ctx, reqUsername, session.BaseSessionUser()); err != nil { // NB: not using serverError() here since the priv checker // already returns a proper gRPC error status. return nil, err } - r, err := s.sessionRegistry.CancelSession(req.SessionID) - if err != nil { - return nil, serverError(ctx, err) - } - return r, nil + session.CancelSession() + return &serverpb.CancelSessionResponse{Canceled: true}, nil } // CancelQuery responds to a query cancellation request, and cancels @@ -3109,8 +3070,7 @@ func (s *statusServer) CancelQuery( queryID, err := clusterunique.IDFromString(req.QueryID) if err != nil { return &serverpb.CancelQueryResponse{ - Canceled: false, - Error: errors.Wrapf(err, "query ID %s malformed", queryID).Error(), + Error: errors.Wrapf(err, "query ID %s malformed", queryID).Error(), }, nil } @@ -3122,8 +3082,7 @@ func (s *statusServer) CancelQuery( if err != nil { if errors.Is(err, sqlinstance.NonExistentInstanceError) { return &serverpb.CancelQueryResponse{ - Canceled: false, - Error: fmt.Sprintf("query ID %s not found", queryID), + Error: fmt.Sprintf("query ID %s not found", queryID), }, nil } return nil, serverError(ctx, err) @@ -3136,18 +3095,23 @@ func (s *statusServer) CancelQuery( return nil, status.Errorf(codes.InvalidArgument, err.Error()) } - if err := s.checkCancelPrivilege(ctx, reqUsername, findSessionByQueryID(req.QueryID)); err != nil { + session, ok := s.sessionRegistry.GetSessionByQueryID(queryID) + if !ok { + return &serverpb.CancelQueryResponse{ + Error: fmt.Sprintf("query ID %s not found", queryID), + }, nil + } + + if err := s.checkCancelPrivilege(ctx, reqUsername, session.BaseSessionUser()); err != nil { // NB: not using serverError() here since the priv checker // already returns a proper gRPC error status. return nil, err } - output := &serverpb.CancelQueryResponse{} - output.Canceled, err = s.sessionRegistry.CancelQuery(req.QueryID) - if err != nil { - output.Error = err.Error() - } - return output, nil + isCanceled := session.CancelQuery(queryID) + return &serverpb.CancelQueryResponse{ + Canceled: isCanceled, + }, nil } // CancelQueryByKey responds to a pgwire query cancellation request, and cancels @@ -3184,12 +3148,18 @@ func (s *statusServer) CancelQueryByKey( }() if local { - resp = &serverpb.CancelQueryByKeyResponse{} - resp.Canceled, err = s.sessionRegistry.CancelQueryByKey(req.CancelQueryKey) - if err != nil { - resp.Error = err.Error() + cancelQueryKey := req.CancelQueryKey + session, ok := s.sessionRegistry.GetSessionByCancelKey(cancelQueryKey) + if !ok { + return &serverpb.CancelQueryByKeyResponse{ + Error: fmt.Sprintf("session for cancel key %d not found", cancelQueryKey), + }, nil } - return resp, nil + + isCanceled := session.CancelActiveQueries() + return &serverpb.CancelQueryByKeyResponse{ + Canceled: isCanceled, + }, nil } // This request needs to be forwarded to another node. diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index 0e51778ee8a4..8cc4ed2c0350 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -3134,8 +3134,16 @@ func (ex *connExecutor) initStatementResult( return nil } -// cancelQuery is part of the registrySession interface. -func (ex *connExecutor) cancelQuery(queryID clusterunique.ID) bool { +// hasQuery is part of the RegistrySession interface. +func (ex *connExecutor) hasQuery(queryID clusterunique.ID) bool { + ex.mu.RLock() + defer ex.mu.RUnlock() + _, exists := ex.mu.ActiveQueries[queryID] + return exists +} + +// CancelQuery is part of the RegistrySession interface. +func (ex *connExecutor) CancelQuery(queryID clusterunique.ID) bool { ex.mu.Lock() defer ex.mu.Unlock() if queryMeta, exists := ex.mu.ActiveQueries[queryID]; exists { @@ -3145,8 +3153,8 @@ func (ex *connExecutor) cancelQuery(queryID clusterunique.ID) bool { return false } -// cancelCurrentQueries is part of the registrySession interface. -func (ex *connExecutor) cancelCurrentQueries() bool { +// CancelActiveQueries is part of the RegistrySession interface. +func (ex *connExecutor) CancelActiveQueries() bool { ex.mu.Lock() defer ex.mu.Unlock() canceled := false @@ -3157,8 +3165,8 @@ func (ex *connExecutor) cancelCurrentQueries() bool { return canceled } -// cancelSession is part of the registrySession interface. -func (ex *connExecutor) cancelSession() { +// CancelSession is part of the RegistrySession interface. +func (ex *connExecutor) CancelSession() { if ex.onCancelSession == nil { return } @@ -3166,12 +3174,17 @@ func (ex *connExecutor) cancelSession() { ex.onCancelSession() } -// user is part of the registrySession interface. +// user is part of the RegistrySession interface. func (ex *connExecutor) user() username.SQLUsername { return ex.sessionData().User() } -// serialize is part of the registrySession interface. +// BaseSessionUser is part of the RegistrySession interface. +func (ex *connExecutor) BaseSessionUser() username.SQLUsername { + return ex.sessionDataStack.Base().SessionUser() +} + +// serialize is part of the RegistrySession interface. func (ex *connExecutor) serialize() serverpb.Session { ex.mu.RLock() defer ex.mu.RUnlock() diff --git a/pkg/sql/conn_executor_exec.go b/pkg/sql/conn_executor_exec.go index c0d8bdf33ada..3cd46ad7ab23 100644 --- a/pkg/sql/conn_executor_exec.go +++ b/pkg/sql/conn_executor_exec.go @@ -149,7 +149,7 @@ func (ex *connExecutor) execStmt( // Cancel the session if the idle time exceeds the idle in session timeout. ex.mu.IdleInSessionTimeout = timeout{time.AfterFunc( ex.sessionData().IdleInSessionTimeout, - ex.cancelSession, + ex.CancelSession, )} } @@ -162,7 +162,7 @@ func (ex *connExecutor) execStmt( default: ex.mu.IdleInTransactionSessionTimeout = timeout{time.AfterFunc( ex.sessionData().IdleInTransactionSessionTimeout, - ex.cancelSession, + ex.CancelSession, )} } } diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index adcd27f79640..a6e0e8081de3 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -2067,8 +2067,8 @@ type SessionArgs struct { type SessionRegistry struct { mu struct { syncutil.RWMutex - sessionsByID map[clusterunique.ID]registrySession - sessionsByCancelKey map[pgwirecancel.BackendKeyData]registrySession + sessionsByID map[clusterunique.ID]RegistrySession + sessionsByCancelKey map[pgwirecancel.BackendKeyData]RegistrySession } } @@ -2076,31 +2076,40 @@ type SessionRegistry struct { // of sessions. func NewSessionRegistry() *SessionRegistry { r := SessionRegistry{} - r.mu.sessionsByID = make(map[clusterunique.ID]registrySession) - r.mu.sessionsByCancelKey = make(map[pgwirecancel.BackendKeyData]registrySession) + r.mu.sessionsByID = make(map[clusterunique.ID]RegistrySession) + r.mu.sessionsByCancelKey = make(map[pgwirecancel.BackendKeyData]RegistrySession) return &r } -func (r *SessionRegistry) getSessionByID(id clusterunique.ID) (registrySession, bool) { +func (r *SessionRegistry) GetSessionByID(sessionID clusterunique.ID) (RegistrySession, bool) { r.mu.RLock() defer r.mu.RUnlock() - session, ok := r.mu.sessionsByID[id] + session, ok := r.mu.sessionsByID[sessionID] return session, ok } -func (r *SessionRegistry) getSessionByCancelKey( +func (r *SessionRegistry) GetSessionByQueryID(queryID clusterunique.ID) (RegistrySession, bool) { + for _, session := range r.getSessions() { + if session.hasQuery(queryID) { + return session, true + } + } + return nil, false +} + +func (r *SessionRegistry) GetSessionByCancelKey( cancelKey pgwirecancel.BackendKeyData, -) (registrySession, bool) { +) (RegistrySession, bool) { r.mu.RLock() defer r.mu.RUnlock() session, ok := r.mu.sessionsByCancelKey[cancelKey] return session, ok } -func (r *SessionRegistry) getSessions() []registrySession { +func (r *SessionRegistry) getSessions() []RegistrySession { r.mu.RLock() defer r.mu.RUnlock() - sessions := make([]registrySession, 0, len(r.mu.sessionsByID)) + sessions := make([]RegistrySession, 0, len(r.mu.sessionsByID)) for _, session := range r.mu.sessionsByID { sessions = append(sessions, session) } @@ -2108,7 +2117,7 @@ func (r *SessionRegistry) getSessions() []registrySession { } func (r *SessionRegistry) register( - id clusterunique.ID, queryCancelKey pgwirecancel.BackendKeyData, s registrySession, + id clusterunique.ID, queryCancelKey pgwirecancel.BackendKeyData, s RegistrySession, ) { r.mu.Lock() defer r.mu.Unlock() @@ -2125,65 +2134,22 @@ func (r *SessionRegistry) deregister( delete(r.mu.sessionsByCancelKey, queryCancelKey) } -type registrySession interface { +type RegistrySession interface { user() username.SQLUsername - cancelQuery(queryID clusterunique.ID) bool - cancelCurrentQueries() bool - cancelSession() + // BaseSessionUser returns the base session's username. + BaseSessionUser() username.SQLUsername + hasQuery(queryID clusterunique.ID) bool + // CancelQuery cancels the query specified by queryID if it exists. + CancelQuery(queryID clusterunique.ID) bool + // CancelActiveQueries cancels all currently active queries. + CancelActiveQueries() bool + // CancelSession cancels the session. + CancelSession() // serialize serializes a Session into a serverpb.Session // that can be served over RPC. serialize() serverpb.Session } -// CancelQuery looks up the associated query in the session registry and cancels -// it. The caller is responsible for all permission checks. -func (r *SessionRegistry) CancelQuery(queryIDStr string) (bool, error) { - queryID, err := clusterunique.IDFromString(queryIDStr) - if err != nil { - return false, errors.Wrapf(err, "query ID %s malformed", queryID) - } - - for _, session := range r.getSessions() { - if session.cancelQuery(queryID) { - return true, nil - } - } - - return false, fmt.Errorf("query ID %s not found", queryID) -} - -// CancelQueryByKey looks up the associated query in the session registry and -// cancels it. -func (r *SessionRegistry) CancelQueryByKey( - queryCancelKey pgwirecancel.BackendKeyData, -) (canceled bool, err error) { - session, ok := r.getSessionByCancelKey(queryCancelKey) - if !ok { - return false, fmt.Errorf("session for cancel key %d not found", queryCancelKey) - } - return session.cancelCurrentQueries(), nil -} - -// CancelSession looks up the specified session in the session registry and -// cancels it. The caller is responsible for all permission checks. -func (r *SessionRegistry) CancelSession( - sessionIDBytes []byte, -) (*serverpb.CancelSessionResponse, error) { - if len(sessionIDBytes) != 16 { - return nil, errors.Errorf("invalid non-16-byte UUID %v", sessionIDBytes) - } - sessionID := clusterunique.IDFromBytes(sessionIDBytes) - - session, ok := r.getSessionByID(sessionID) - if !ok { - return &serverpb.CancelSessionResponse{ - Error: fmt.Sprintf("session ID %s not found", sessionID), - }, nil - } - session.cancelSession() - return &serverpb.CancelSessionResponse{Canceled: true}, nil -} - // SerializeAll returns a slice of all sessions in the registry converted to // serverpb.Sessions. func (r *SessionRegistry) SerializeAll() []serverpb.Session { diff --git a/pkg/sql/parser/sql.y b/pkg/sql/parser/sql.y index 9d2d5df4043c..ca9b733e201d 100644 --- a/pkg/sql/parser/sql.y +++ b/pkg/sql/parser/sql.y @@ -704,6 +704,9 @@ func (u *sqlSymUnion) copyOptions() *tree.CopyOptions { func (u *sqlSymUnion) showBackupDetails() tree.ShowBackupDetails { return u.val.(tree.ShowBackupDetails) } +func (u *sqlSymUnion) showBackupOptions() *tree.ShowBackupOptions { + return u.val.(*tree.ShowBackupOptions) +} func (u *sqlSymUnion) restoreOptions() *tree.RestoreOptions { return u.val.(*tree.RestoreOptions) } @@ -875,7 +878,7 @@ func (u *sqlSymUnion) showTenantOpts() tree.ShowTenantOptions { // Ordinary key words in alphabetical order. %token ABORT ABSOLUTE ACCESS ACTION ADD ADMIN AFTER AGGREGATE -%token ALL ALTER ALWAYS ANALYSE ANALYZE AND AND_AND ANY ANNOTATE_TYPE ARRAY AS ASC AT_AT +%token ALL ALTER ALWAYS ANALYSE ANALYZE AND AND_AND ANY ANNOTATE_TYPE ARRAY AS ASC AS_JSON AT_AT %token ASENSITIVE ASYMMETRIC AT ATOMIC ATTRIBUTE AUTHORIZATION AUTOMATIC AVAILABILITY %token BACKUP BACKUPS BACKWARD BEFORE BEGIN BETWEEN BIGINT BIGSERIAL BINARY BIT @@ -883,7 +886,7 @@ func (u *sqlSymUnion) showTenantOpts() tree.ShowTenantOptions { %token BOOLEAN BOTH BOX2D BUNDLE BY %token CACHE CALLED CANCEL CANCELQUERY CAPABILITIES CAPABILITY CASCADE CASE CAST CBRT CHANGEFEED CHAR -%token CHARACTER CHARACTERISTICS CHECK CLOSE +%token CHARACTER CHARACTERISTICS CHECK CHECK_FILES CLOSE %token CLUSTER COALESCE COLLATE COLLATION COLUMN COLUMNS COMMENT COMMENTS COMMIT %token COMMITTED COMPACT COMPLETE COMPLETIONS CONCAT CONCURRENTLY CONFIGURATION CONFIGURATIONS CONFIGURE %token CONFLICT CONNECTION CONNECTIONS CONSTRAINT CONSTRAINTS CONTAINS CONTROLCHANGEFEED CONTROLJOB @@ -892,11 +895,11 @@ func (u *sqlSymUnion) showTenantOpts() tree.ShowTenantOptions { %token CURRENT_ROLE CURRENT_TIME CURRENT_TIMESTAMP %token CURRENT_USER CURSOR CYCLE -%token DATA DATABASE DATABASES DATE DAY DEBUG_PAUSE_ON DEC DECIMAL DEFAULT DEFAULTS DEFINER +%token DATA DATABASE DATABASES DATE DAY DEBUG_IDS DEBUG_PAUSE_ON DEC DEBUG_DUMP_METADATA_SST DECIMAL DEFAULT DEFAULTS DEFINER %token DEALLOCATE DECLARE DEFERRABLE DEFERRED DELETE DELIMITER DEPENDS DESC DESTINATION DETACHED DETAILS %token DISCARD DISTINCT DO DOMAIN DOUBLE DROP -%token ELSE ENCODING ENCRYPTED ENCRYPTION_PASSPHRASE END ENUM ENUMS ESCAPE EXCEPT EXCLUDE EXCLUDING +%token ELSE ENCODING ENCRYPTED ENCRYPTION_INFO_DIR ENCRYPTION_PASSPHRASE END ENUM ENUMS ESCAPE EXCEPT EXCLUDE EXCLUDING %token EXISTS EXECUTE EXECUTION EXPERIMENTAL %token EXPERIMENTAL_FINGERPRINTS EXPERIMENTAL_REPLICA %token EXPERIMENTAL_AUDIT EXPERIMENTAL_RELOCATE @@ -1307,6 +1310,7 @@ func (u *sqlSymUnion) showTenantOpts() tree.ShowTenantOptions { %type <*tree.RestoreOptions> opt_with_restore_options restore_options restore_options_list %type <*tree.TenantReplicationOptions> opt_with_tenant_replication_options tenant_replication_options tenant_replication_options_list %type show_backup_details +%type <*tree.ShowBackupOptions> opt_with_show_backup_options show_backup_options show_backup_options_list %type <*tree.CopyOptions> opt_with_copy_options copy_options copy_options_list %type import_format %type storage_parameter_key @@ -7095,63 +7099,63 @@ show_backup_stmt: InCollection: $4.stringOrPlaceholderOptList(), } } -| SHOW BACKUP show_backup_details FROM string_or_placeholder IN string_or_placeholder_opt_list opt_with_options +| SHOW BACKUP show_backup_details FROM string_or_placeholder IN string_or_placeholder_opt_list opt_with_show_backup_options { $$.val = &tree.ShowBackup{ From: true, Details: $3.showBackupDetails(), Path: $5.expr(), InCollection: $7.stringOrPlaceholderOptList(), - Options: $8.kvOptions(), + Options: *$8.showBackupOptions(), } } -| SHOW BACKUP string_or_placeholder IN string_or_placeholder_opt_list opt_with_options +| SHOW BACKUP string_or_placeholder IN string_or_placeholder_opt_list opt_with_show_backup_options { $$.val = &tree.ShowBackup{ Details: tree.BackupDefaultDetails, Path: $3.expr(), InCollection: $5.stringOrPlaceholderOptList(), - Options: $6.kvOptions(), + Options: *$6.showBackupOptions(), } } -| SHOW BACKUP string_or_placeholder opt_with_options +| SHOW BACKUP string_or_placeholder opt_with_show_backup_options { $$.val = &tree.ShowBackup{ Details: tree.BackupDefaultDetails, Path: $3.expr(), - Options: $4.kvOptions(), + Options: *$4.showBackupOptions(), } } -| SHOW BACKUP SCHEMAS string_or_placeholder opt_with_options +| SHOW BACKUP SCHEMAS string_or_placeholder opt_with_show_backup_options { $$.val = &tree.ShowBackup{ Details: tree.BackupSchemaDetails, Path: $4.expr(), - Options: $5.kvOptions(), + Options: *$5.showBackupOptions(), } } -| SHOW BACKUP FILES string_or_placeholder opt_with_options +| SHOW BACKUP FILES string_or_placeholder opt_with_show_backup_options { $$.val = &tree.ShowBackup{ Details: tree.BackupFileDetails, Path: $4.expr(), - Options: $5.kvOptions(), + Options: *$5.showBackupOptions(), } } -| SHOW BACKUP RANGES string_or_placeholder opt_with_options +| SHOW BACKUP RANGES string_or_placeholder opt_with_show_backup_options { $$.val = &tree.ShowBackup{ Details: tree.BackupRangeDetails, Path: $4.expr(), - Options: $5.kvOptions(), + Options: *$5.showBackupOptions(), } } -| SHOW BACKUP VALIDATE string_or_placeholder opt_with_options +| SHOW BACKUP VALIDATE string_or_placeholder opt_with_show_backup_options { $$.val = &tree.ShowBackup{ Details: tree.BackupValidateDetails, Path: $4.expr(), - Options: $5.kvOptions(), + Options: *$5.showBackupOptions(), } } | SHOW BACKUP error // SHOW HELP: SHOW BACKUP @@ -7178,6 +7182,71 @@ show_backup_details: $$.val = tree.BackupValidateDetails } +opt_with_show_backup_options: + WITH show_backup_options_list + { + $$.val = $2.showBackupOptions() + } +| WITH OPTIONS '(' show_backup_options_list ')' + { + $$.val = $4.showBackupOptions() + } +| /* EMPTY */ + { + $$.val = &tree.ShowBackupOptions{} + } + +show_backup_options_list: + // Require at least one option + show_backup_options + { + $$.val = $1.showBackupOptions() + } +| show_backup_options_list ',' show_backup_options + { + if err := $1.showBackupOptions().CombineWith($3.showBackupOptions()); err != nil { + return setErr(sqllex, err) + } + } + +show_backup_options: + AS_JSON + { + $$.val = &tree.ShowBackupOptions{AsJson: true} + } + | CHECK_FILES + { + $$.val = &tree.ShowBackupOptions{CheckFiles: true} + } + | DEBUG_IDS + { + $$.val = &tree.ShowBackupOptions{DebugIDs: true} + } + | INCREMENTAL_LOCATION '=' string_or_placeholder_opt_list + { + $$.val = &tree.ShowBackupOptions{IncrementalStorage: $3.stringOrPlaceholderOptList()} + } + | KMS '=' string_or_placeholder_opt_list + { + $$.val = &tree.ShowBackupOptions{DecryptionKMSURI: $3.stringOrPlaceholderOptList()} + } + | ENCRYPTION_PASSPHRASE '=' string_or_placeholder + { + $$.val = &tree.ShowBackupOptions{EncryptionPassphrase: $3.expr()} + } + | PRIVILEGES + { + $$.val = &tree.ShowBackupOptions{Privileges: true} + } + | ENCRYPTION_INFO_DIR '=' string_or_placeholder + { + $$.val = &tree.ShowBackupOptions{EncryptionInfoDir: $3.expr()} + } + | DEBUG_DUMP_METADATA_SST + { + $$.val = &tree.ShowBackupOptions{DebugMetadataSST: true} + } + // %Help: SHOW CLUSTER SETTING - display cluster settings // %Category: Cfg // %Text: @@ -15853,6 +15922,7 @@ unreserved_keyword: | ALTER | ALWAYS | ASENSITIVE +| AS_JSON | AT | ATOMIC | ATTRIBUTE @@ -15875,6 +15945,7 @@ unreserved_keyword: | CAPABILITY | CASCADE | CHANGEFEED +| CHECK_FILES | CLOSE | CLUSTER | COLUMNS @@ -15912,7 +15983,9 @@ unreserved_keyword: | DATABASES | DAY | DEALLOCATE +| DEBUG_IDS | DEBUG_PAUSE_ON +| DEBUG_DUMP_METADATA_SST | DECLARE | DELETE | DEFAULTS @@ -15930,6 +16003,7 @@ unreserved_keyword: | ENCODING | ENCRYPTED | ENCRYPTION_PASSPHRASE +| ENCRYPTION_INFO_DIR | ENUM | ENUMS | ESCAPE @@ -16279,11 +16353,16 @@ unreserved_keyword: // query like "SELECT col label FROM table" where "label" is a new keyword. // Any new keyword should be added to this list. bare_label_keywords: - ATOMIC + AS_JSON +| ATOMIC | CALLED | COST +| CHECK_FILES +| DEBUG_IDS +| DEBUG_DUMP_METADATA_SST | DEFINER | DEPENDS +| ENCRYPTION_INFO_DIR | EXTERNAL | IMMUTABLE | INPUT diff --git a/pkg/sql/parser/testdata/backup_restore b/pkg/sql/parser/testdata/backup_restore index 950df44ffaa1..01fea821f897 100644 --- a/pkg/sql/parser/testdata/backup_restore +++ b/pkg/sql/parser/testdata/backup_restore @@ -103,12 +103,30 @@ SHOW BACKUP '_' -- literals removed SHOW BACKUP 'bar' -- identifiers removed parse -SHOW BACKUP 'bar' WITH foo = 'bar' +SHOW BACKUP 'bar' WITH ENCRYPTION_PASSPHRASE = 'secret', CHECK_FILES ---- -SHOW BACKUP 'bar' WITH foo = 'bar' -SHOW BACKUP ('bar') WITH foo = ('bar') -- fully parenthesized -SHOW BACKUP '_' WITH foo = '_' -- literals removed -SHOW BACKUP 'bar' WITH _ = 'bar' -- identifiers removed +SHOW BACKUP 'bar' WITH check_files, encryption_passphrase = '*****' -- normalized! +SHOW BACKUP ('bar') WITH check_files, encryption_passphrase = '*****' -- fully parenthesized +SHOW BACKUP '_' WITH check_files, encryption_passphrase = '*****' -- literals removed +SHOW BACKUP 'bar' WITH check_files, encryption_passphrase = '*****' -- identifiers removed +SHOW BACKUP 'bar' WITH check_files, encryption_passphrase = 'secret' -- passwords exposed + +parse +SHOW BACKUP FROM LATEST IN 'bar' WITH incremental_location = 'baz' +---- +SHOW BACKUP FROM 'latest' IN 'bar' WITH incremental_location = 'baz' -- normalized! +SHOW BACKUP FROM ('latest') IN ('bar') WITH incremental_location = ('baz') -- fully parenthesized +SHOW BACKUP FROM '_' IN '_' WITH incremental_location = '_' -- literals removed +SHOW BACKUP FROM 'latest' IN 'bar' WITH incremental_location = 'baz' -- identifiers removed + +parse +SHOW BACKUP FROM LATEST IN ('bar','bar1') WITH KMS = ('foo', 'bar'), incremental_location=('hi','hello') +---- +SHOW BACKUP FROM 'latest' IN ('bar', 'bar1') WITH incremental_location = ('hi', 'hello'), kms = ('foo', 'bar') -- normalized! +SHOW BACKUP FROM ('latest') IN (('bar'), ('bar1')) WITH incremental_location = (('hi'), ('hello')), kms = (('foo'), ('bar')) -- fully parenthesized +SHOW BACKUP FROM '_' IN ('_', '_') WITH incremental_location = ('_', '_'), kms = ('_', '_') -- literals removed +SHOW BACKUP FROM 'latest' IN ('bar', 'bar1') WITH incremental_location = ('hi', 'hello'), kms = ('foo', 'bar') -- identifiers removed + parse EXPLAIN SHOW BACKUP 'bar' @@ -134,14 +152,6 @@ SHOW BACKUP FILES ('bar') -- fully parenthesized SHOW BACKUP FILES '_' -- literals removed SHOW BACKUP FILES 'bar' -- identifiers removed -parse -SHOW BACKUP FILES 'bar' WITH foo = 'bar' ----- -SHOW BACKUP FILES 'bar' WITH foo = 'bar' -SHOW BACKUP FILES ('bar') WITH foo = ('bar') -- fully parenthesized -SHOW BACKUP FILES '_' WITH foo = '_' -- literals removed -SHOW BACKUP FILES 'bar' WITH _ = 'bar' -- identifiers removed - parse SHOW BACKUPS IN 'bar' ---- @@ -167,12 +177,12 @@ SHOW BACKUP '_' IN '_' -- literals removed SHOW BACKUP 'foo' IN 'bar' -- identifiers removed parse -SHOW BACKUP FROM $1 IN $2 WITH foo = 'bar' +SHOW BACKUP FROM $1 IN $2 WITH privileges ---- -SHOW BACKUP FROM $1 IN $2 WITH foo = 'bar' -SHOW BACKUP FROM ($1) IN ($2) WITH foo = ('bar') -- fully parenthesized -SHOW BACKUP FROM $1 IN $1 WITH foo = '_' -- literals removed -SHOW BACKUP FROM $1 IN $2 WITH _ = 'bar' -- identifiers removed +SHOW BACKUP FROM $1 IN $2 WITH privileges +SHOW BACKUP FROM ($1) IN ($2) WITH privileges -- fully parenthesized +SHOW BACKUP FROM $1 IN $1 WITH privileges -- literals removed +SHOW BACKUP FROM $1 IN $2 WITH privileges -- identifiers removed parse SHOW BACKUP FILES FROM 'foo' IN 'bar' @@ -199,12 +209,13 @@ SHOW BACKUP SCHEMAS FROM '_' IN '_' -- literals removed SHOW BACKUP SCHEMAS FROM 'foo' IN 'bar' -- identifiers removed parse -SHOW BACKUP $1 IN $2 WITH foo = 'bar' +SHOW BACKUP $1 IN $2 WITH ENCRYPTION_PASSPHRASE = 'secret', ENCRYPTION_INFO_DIR = 'long_live_backupper' ---- -SHOW BACKUP $1 IN $2 WITH foo = 'bar' -SHOW BACKUP ($1) IN ($2) WITH foo = ('bar') -- fully parenthesized -SHOW BACKUP $1 IN $1 WITH foo = '_' -- literals removed -SHOW BACKUP $1 IN $2 WITH _ = 'bar' -- identifiers removed +SHOW BACKUP $1 IN $2 WITH encryption_passphrase = '*****', encryption_info_dir = 'long_live_backupper' -- normalized! +SHOW BACKUP ($1) IN ($2) WITH encryption_passphrase = '*****', encryption_info_dir = ('long_live_backupper') -- fully parenthesized +SHOW BACKUP $1 IN $1 WITH encryption_passphrase = '*****', encryption_info_dir = '_' -- literals removed +SHOW BACKUP $1 IN $2 WITH encryption_passphrase = '*****', encryption_info_dir = 'long_live_backupper' -- identifiers removed +SHOW BACKUP $1 IN $2 WITH encryption_passphrase = 'secret', encryption_info_dir = 'long_live_backupper' -- passwords exposed parse BACKUP TABLE foo TO 'bar' AS OF SYSTEM TIME '1' INCREMENTAL FROM 'baz' @@ -692,20 +703,21 @@ parse RESTORE foo FROM 'bar' WITH OPTIONS (encryption_passphrase='secret', into_db='baz', debug_pause_on='error', skip_missing_foreign_keys, skip_missing_sequences, skip_missing_sequence_owners, skip_missing_views, detached, skip_localities_check) ---- -RESTORE TABLE foo FROM 'bar' WITH encryption_passphrase = 'secret', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, detached, skip_localities_check -- normalized! -RESTORE TABLE (foo) FROM ('bar') WITH encryption_passphrase = ('secret'), into_db = ('baz'), debug_pause_on = ('error'), skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, detached, skip_localities_check -- fully parenthesized -RESTORE TABLE foo FROM '_' WITH encryption_passphrase = '_', into_db = '_', debug_pause_on = '_', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, detached, skip_localities_check -- literals removed -RESTORE TABLE _ FROM 'bar' WITH encryption_passphrase = 'secret', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, detached, skip_localities_check -- identifiers removed +RESTORE TABLE foo FROM 'bar' WITH encryption_passphrase = '*****', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, detached, skip_localities_check -- normalized! +RESTORE TABLE (foo) FROM ('bar') WITH encryption_passphrase = '*****', into_db = ('baz'), debug_pause_on = ('error'), skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, detached, skip_localities_check -- fully parenthesized +RESTORE TABLE foo FROM '_' WITH encryption_passphrase = '*****', into_db = '_', debug_pause_on = '_', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, detached, skip_localities_check -- literals removed +RESTORE TABLE _ FROM 'bar' WITH encryption_passphrase = '*****', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, detached, skip_localities_check -- identifiers removed +RESTORE TABLE foo FROM 'bar' WITH encryption_passphrase = 'secret', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, detached, skip_localities_check -- passwords exposed parse RESTORE foo FROM 'bar' WITH ENCRYPTION_PASSPHRASE = 'secret', INTO_DB=baz, DEBUG_PAUSE_ON='error', SKIP_MISSING_FOREIGN_KEYS, SKIP_MISSING_SEQUENCES, SKIP_MISSING_SEQUENCE_OWNERS, SKIP_MISSING_VIEWS, SKIP_LOCALITIES_CHECK ---- -RESTORE TABLE foo FROM 'bar' WITH encryption_passphrase = 'secret', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_localities_check -- normalized! -RESTORE TABLE (foo) FROM ('bar') WITH encryption_passphrase = ('secret'), into_db = ('baz'), debug_pause_on = ('error'), skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_localities_check -- fully parenthesized -RESTORE TABLE foo FROM '_' WITH encryption_passphrase = '_', into_db = '_', debug_pause_on = '_', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_localities_check -- literals removed -RESTORE TABLE _ FROM 'bar' WITH encryption_passphrase = 'secret', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_localities_check -- identifiers removed - +RESTORE TABLE foo FROM 'bar' WITH encryption_passphrase = '*****', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_localities_check -- normalized! +RESTORE TABLE (foo) FROM ('bar') WITH encryption_passphrase = '*****', into_db = ('baz'), debug_pause_on = ('error'), skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_localities_check -- fully parenthesized +RESTORE TABLE foo FROM '_' WITH encryption_passphrase = '*****', into_db = '_', debug_pause_on = '_', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_localities_check -- literals removed +RESTORE TABLE _ FROM 'bar' WITH encryption_passphrase = '*****', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_localities_check -- identifiers removed +RESTORE TABLE foo FROM 'bar' WITH encryption_passphrase = 'secret', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_localities_check -- passwords exposed parse RESTORE TENANT 36 FROM ($1, $2) AS OF SYSTEM TIME '1' diff --git a/pkg/sql/sem/tree/backup.go b/pkg/sql/sem/tree/backup.go index 0d4355ffb576..e4e17367dc86 100644 --- a/pkg/sql/sem/tree/backup.go +++ b/pkg/sql/sem/tree/backup.go @@ -356,7 +356,11 @@ func (o *RestoreOptions) Format(ctx *FmtCtx) { if o.EncryptionPassphrase != nil { addSep = true ctx.WriteString("encryption_passphrase = ") - ctx.FormatNode(o.EncryptionPassphrase) + if ctx.flags.HasFlags(FmtShowPasswords) { + ctx.FormatNode(o.EncryptionPassphrase) + } else { + ctx.WriteString(PasswordSubstitution) + } } if o.DecryptionKMSURI != nil { diff --git a/pkg/sql/sem/tree/show.go b/pkg/sql/sem/tree/show.go index a4677a8271e7..3a8d515b8bc8 100644 --- a/pkg/sql/sem/tree/show.go +++ b/pkg/sql/sem/tree/show.go @@ -23,6 +23,8 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/sql/lexbase" + "github.com/cockroachdb/errors" + "github.com/google/go-cmp/cmp" ) // ShowVar represents a SHOW statement. @@ -94,12 +96,14 @@ const ( // Path to Subdir and InCollection to Dest. // ShowBackup represents a SHOW BACKUP statement. +// +// TODO(msbutler): implement a walkableStmt for ShowBackup. type ShowBackup struct { Path Expr InCollection StringOrPlaceholderOptList From bool Details ShowBackupDetails - Options KVOptions + Options ShowBackupOptions } // Format implements the NodeFormatter interface. @@ -129,12 +133,179 @@ func (node *ShowBackup) Format(ctx *FmtCtx) { ctx.WriteString(" IN ") ctx.FormatNode(&node.InCollection) } - if len(node.Options) > 0 { + if !node.Options.IsDefault() { ctx.WriteString(" WITH ") ctx.FormatNode(&node.Options) } } +type ShowBackupOptions struct { + AsJson bool + CheckFiles bool + DebugIDs bool + IncrementalStorage StringOrPlaceholderOptList + DecryptionKMSURI StringOrPlaceholderOptList + EncryptionPassphrase Expr + Privileges bool + + // EncryptionInfoDir is a hidden option used when the user wants to run the deprecated + // + // SHOW BACKUP + // + // on an encrypted incremental backup will need to pass their full backup's + // directory to the encryption_info_dir parameter because the + // `ENCRYPTION-INFO` file necessary to decode the incremental backup lives in + // the full backup dir. + EncryptionInfoDir Expr + DebugMetadataSST bool +} + +var _ NodeFormatter = &ShowBackupOptions{} + +func (o *ShowBackupOptions) Format(ctx *FmtCtx) { + var addSep bool + maybeAddSep := func() { + if addSep { + ctx.WriteString(", ") + } + addSep = true + } + if o.AsJson { + ctx.WriteString("as_json") + addSep = true + } + if o.CheckFiles { + maybeAddSep() + ctx.WriteString("check_files") + } + if o.DebugIDs { + maybeAddSep() + ctx.WriteString("debug_ids") + } + if o.EncryptionPassphrase != nil { + maybeAddSep() + ctx.WriteString("encryption_passphrase = ") + if ctx.flags.HasFlags(FmtShowPasswords) { + ctx.FormatNode(o.EncryptionPassphrase) + } else { + ctx.WriteString(PasswordSubstitution) + } + } + if o.IncrementalStorage != nil { + maybeAddSep() + ctx.WriteString("incremental_location = ") + ctx.FormatNode(&o.IncrementalStorage) + } + + if o.Privileges { + maybeAddSep() + ctx.WriteString("privileges") + } + + if o.EncryptionInfoDir != nil { + maybeAddSep() + ctx.WriteString("encryption_info_dir = ") + ctx.FormatNode(o.EncryptionInfoDir) + } + if o.DecryptionKMSURI != nil { + maybeAddSep() + ctx.WriteString("kms = ") + ctx.FormatNode(&o.DecryptionKMSURI) + } + if o.DebugMetadataSST { + ctx.WriteString("debug_dump_metadata_sst") + } + +} + +func (o ShowBackupOptions) IsDefault() bool { + options := ShowBackupOptions{} + return o.AsJson == options.AsJson && + o.CheckFiles == options.CheckFiles && + o.DebugIDs == options.DebugIDs && + cmp.Equal(o.IncrementalStorage, options.IncrementalStorage) && + cmp.Equal(o.DecryptionKMSURI, options.DecryptionKMSURI) && + o.EncryptionPassphrase == options.EncryptionPassphrase && + o.Privileges == options.Privileges && + o.DebugMetadataSST == options.DebugMetadataSST && + o.EncryptionInfoDir == options.EncryptionInfoDir +} + +func combineBools(v1 bool, v2 bool, label string) (bool, error) { + if v2 && v1 { + return false, errors.Newf("% option specified multiple times", label) + } + return v2 || v1, nil +} +func combineExpr(v1 Expr, v2 Expr, label string) (Expr, error) { + if v1 != nil { + if v2 != nil { + return v1, errors.Newf("% option specified multiple times", label) + } + return v1, nil + } + return v2, nil +} +func combineStringOrPlaceholderOptList( + v1 StringOrPlaceholderOptList, v2 StringOrPlaceholderOptList, label string, +) (StringOrPlaceholderOptList, error) { + if v1 != nil { + if v2 != nil { + return v1, errors.Newf("% option specified multiple times", label) + } + return v1, nil + } + return v2, nil +} + +// CombineWith merges other backup options into this backup options struct. +// An error is returned if the same option merged multiple times. +func (o *ShowBackupOptions) CombineWith(other *ShowBackupOptions) error { + var err error + o.AsJson, err = combineBools(o.AsJson, other.AsJson, "as_json") + if err != nil { + return err + } + o.CheckFiles, err = combineBools(o.CheckFiles, other.CheckFiles, "check_files") + if err != nil { + return err + } + o.DebugIDs, err = combineBools(o.DebugIDs, other.DebugIDs, "debug_ids") + if err != nil { + return err + } + o.EncryptionPassphrase, err = combineExpr(o.EncryptionPassphrase, other.EncryptionPassphrase, + "encryption_passphrase") + if err != nil { + return err + } + o.IncrementalStorage, err = combineStringOrPlaceholderOptList(o.IncrementalStorage, + other.IncrementalStorage, "incremental_location") + if err != nil { + return err + } + o.DecryptionKMSURI, err = combineStringOrPlaceholderOptList(o.DecryptionKMSURI, + other.DecryptionKMSURI, "kms") + if err != nil { + return err + } + o.Privileges, err = combineBools(o.Privileges, other.Privileges, "privileges") + if err != nil { + return err + } + o.DebugMetadataSST, err = combineBools(o.DebugMetadataSST, other.DebugMetadataSST, + "debug_dump_metadata_sst") + if err != nil { + return err + } + o.EncryptionInfoDir, err = combineExpr(o.EncryptionInfoDir, other.EncryptionInfoDir, + "encryption_info_dir") + if err != nil { + return err + } + return nil +} + // ShowColumns represents a SHOW COLUMNS statement. type ShowColumns struct { Table *UnresolvedObjectName