diff --git a/pkg/ccl/backupccl/alter_backup_schedule.go b/pkg/ccl/backupccl/alter_backup_schedule.go index ae1f928bb5b3..c1b38b0c591a 100644 --- a/pkg/ccl/backupccl/alter_backup_schedule.go +++ b/pkg/ccl/backupccl/alter_backup_schedule.go @@ -10,6 +10,7 @@ package backupccl import ( "context" + "strconv" "github.com/cockroachdb/cockroach/pkg/ccl/backupccl/backuppb" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" @@ -31,8 +32,10 @@ const alterBackupScheduleOp = "ALTER BACKUP SCHEDULE" type scheduleDetails struct { fullJob *jobs.ScheduledJob fullArgs *backuppb.ScheduledBackupExecutionArgs + fullStmt *tree.Backup incJob *jobs.ScheduledJob incArgs *backuppb.ScheduledBackupExecutionArgs + incStmt *tree.Backup } func loadSchedules( @@ -56,26 +59,44 @@ func loadSchedules( return s, errors.Wrap(err, "un-marshaling args") } + node, err := parser.ParseOne(args.BackupStatement) + if err != nil { + return scheduleDetails{}, err + } + stmt, ok := node.AST.(*tree.Backup) + if !ok { + return scheduleDetails{}, errors.Newf("unexpected node type %T", node) + } + var dependentSchedule *jobs.ScheduledJob var dependentArgs *backuppb.ScheduledBackupExecutionArgs + var dependentStmt *tree.Backup if args.DependentScheduleID != 0 { dependentSchedule, err = jobs.LoadScheduledJob(ctx, env, args.DependentScheduleID, execCfg.InternalExecutor, p.Txn()) if err != nil { - return s, err + return scheduleDetails{}, err } dependentArgs = &backuppb.ScheduledBackupExecutionArgs{} if err := pbtypes.UnmarshalAny(dependentSchedule.ExecutionArgs().Args, dependentArgs); err != nil { return s, errors.Wrap(err, "un-marshaling args") } + node, err := parser.ParseOne(dependentArgs.BackupStatement) + if err != nil { + return scheduleDetails{}, err + } + dependentStmt, ok = node.AST.(*tree.Backup) + if !ok { + return scheduleDetails{}, errors.Newf("unexpected node type %T", node) + } } if args.BackupType == backuppb.ScheduledBackupExecutionArgs_FULL { - s.fullJob, s.fullArgs = schedule, args - s.incJob, s.incArgs = dependentSchedule, dependentArgs + s.fullJob, s.fullArgs, s.fullStmt = schedule, args, stmt + s.incJob, s.incArgs, s.incStmt = dependentSchedule, dependentArgs, dependentStmt } else { - s.fullJob, s.fullArgs = dependentSchedule, dependentArgs - s.incJob, s.incArgs = schedule, args + s.fullJob, s.fullArgs, s.fullStmt = dependentSchedule, dependentArgs, dependentStmt + s.incJob, s.incArgs, s.incStmt = schedule, args, stmt } return s, nil } @@ -104,30 +125,48 @@ func doAlterBackupSchedules( return pgerror.Newf(pgcode.InsufficientPrivilege, "only the OWNER of a schedule may alter it") } - s, err = processFullBackupRecurrence( + if s, err = processFullBackupRecurrence( ctx, p, eval.fullBackupAlways, eval.fullBackupRecurrence, eval.isEnterpriseUser, s, - ) - if err != nil { + ); err != nil { return err } - s.fullJob, s.incJob, err = processRecurrence( + if err := processRecurrence( eval.recurrence, s.fullJob, s.incJob, - ) - if err != nil { + ); err != nil { + return err + } + + if err := validateFullIncrementalFrequencies(p, s); err != nil { return err } - // TODO(benbardin): Verify backup statement. Not needed yet since we can't - // modify that statement yet. + if err := processLabel(eval, s); err != nil { + return err + } + + if err := processInto(p, eval, s); err != nil { + return err + } + + if err := processOptions(eval, s); err != nil { + return err + } + + if err := processScheduleOptions(ctx, p, eval, s); err != nil { + return err + } + + // TODO(benbardin): Verify backup statement. + s.fullArgs.BackupStatement = tree.AsStringWithFlags(s.fullStmt, tree.FmtParsable|tree.FmtShowPasswords) fullAny, err := pbtypes.MarshalAny(s.fullArgs) if err != nil { return err @@ -140,6 +179,7 @@ func doAlterBackupSchedules( } if s.incJob != nil { + s.incArgs.BackupStatement = tree.AsStringWithFlags(s.incStmt, tree.FmtParsable|tree.FmtShowPasswords) incAny, err := pbtypes.MarshalAny(s.incArgs) if err != nil { return err @@ -155,26 +195,139 @@ func doAlterBackupSchedules( return nil } +func processScheduleOptions( + ctx context.Context, p sql.PlanHookState, eval *alterBackupScheduleEval, s scheduleDetails, +) error { + if eval.scheduleOptions == nil { + return nil + } + scheduleOptions, err := eval.scheduleOptions() + if err != nil { + return err + } + fullDetails := s.fullJob.ScheduleDetails() + var incDetails *jobspb.ScheduleDetails + if s.incJob != nil { + incDetails = s.incJob.ScheduleDetails() + } + for k, v := range scheduleOptions { + switch k { + case optOnExecFailure: + if err := parseOnError(v, fullDetails); err != nil { + return err + } + // Set the schedule to mark the column as dirty. + s.fullJob.SetScheduleDetails(*fullDetails) + if incDetails == nil { + continue + } + if err := parseOnError(v, incDetails); err != nil { + return err + } + s.incJob.SetScheduleDetails(*incDetails) + case optOnPreviousRunning: + if err := parseWaitBehavior(v, fullDetails); err != nil { + return err + } + + s.fullJob.SetScheduleDetails(*fullDetails) + if incDetails == nil { + continue + } + if err := parseWaitBehavior(v, incDetails); err != nil { + return err + } + s.incJob.SetScheduleDetails(*incDetails) + case optUpdatesLastBackupMetric: + // NB: as of 20.2, schedule creation requires admin so this is duplicative + // but in the future we might relax so you can schedule anything that you + // can backup, but then this cluster-wide metric should be admin-only. + if err := p.RequireAdminRole(ctx, optUpdatesLastBackupMetric); err != nil { + return pgerror.Wrap(err, pgcode.InsufficientPrivilege, "") + } + + updatesLastBackupMetric, err := strconv.ParseBool(v) + if err != nil { + return errors.Wrapf(err, "unexpected value for %s: %s", k, v) + } + s.fullArgs.UpdatesLastBackupMetric = updatesLastBackupMetric + if s.incArgs == nil { + continue + } + s.incArgs.UpdatesLastBackupMetric = updatesLastBackupMetric + default: + return errors.Newf("unexpected schedule option: %s = %s", k, v) + } + } + return nil +} + +func processOptions(eval *alterBackupScheduleEval, s scheduleDetails) error { + opts := eval.backupOptions + fullOpts := &s.fullStmt.Options + if err := processOptionsForArgs(opts, fullOpts); err != nil { + return err + } + if s.incStmt == nil { + return nil + } + incOpts := &s.incStmt.Options + if err := processOptionsForArgs(opts, incOpts); err != nil { + return err + } + return nil +} + +func processOptionsForArgs(inOpts tree.BackupOptions, outOpts *tree.BackupOptions) error { + if inOpts.CaptureRevisionHistory != nil { + outOpts.CaptureRevisionHistory = inOpts.CaptureRevisionHistory + } + + // If a string-y option is set to empty, interpret this as "unset." + if inOpts.EncryptionPassphrase != nil { + if tree.AsStringWithFlags(inOpts.EncryptionPassphrase, tree.FmtBareStrings) == "" { + outOpts.EncryptionPassphrase = nil + } else { + outOpts.EncryptionPassphrase = inOpts.EncryptionPassphrase + } + } + if inOpts.EncryptionKMSURI != nil { + if tree.AsStringWithFlags(&inOpts.EncryptionKMSURI, tree.FmtBareStrings) == "" { + outOpts.EncryptionKMSURI = nil + } else { + outOpts.EncryptionKMSURI = inOpts.EncryptionKMSURI + } + } + if inOpts.IncrementalStorage != nil { + if tree.AsStringWithFlags(&inOpts.IncrementalStorage, tree.FmtBareStrings) == "" { + outOpts.IncrementalStorage = nil + } else { + outOpts.IncrementalStorage = inOpts.IncrementalStorage + } + } + return nil +} + func processRecurrence( recurrence func() (string, error), fullJob *jobs.ScheduledJob, incJob *jobs.ScheduledJob, -) (*jobs.ScheduledJob, *jobs.ScheduledJob, error) { +) error { if recurrence == nil { - return fullJob, incJob, nil + return nil } recurrenceStr, err := recurrence() if err != nil { - return nil, nil, err + return err } if incJob != nil { if err := incJob.SetSchedule(recurrenceStr); err != nil { - return nil, nil, err + return err } } else { if err := fullJob.SetSchedule(recurrenceStr); err != nil { - return nil, nil, err + return err } } - return fullJob, incJob, nil + return nil } func processFullBackupRecurrence( @@ -222,15 +375,9 @@ func processFullBackupRecurrence( if s.incJob == nil { // No existing incremental job, so we need to create it, copying details // from the full. - node, err := parser.ParseOne(s.fullArgs.BackupStatement) - if err != nil { - return scheduleDetails{}, err - } - stmt, ok := node.AST.(*tree.Backup) - if !ok { - return scheduleDetails{}, errors.Newf("unexpected node type %T", node) - } - stmt.AppendToLatest = true + s.incStmt = &tree.Backup{} + *s.incStmt = *s.fullStmt + s.incStmt.AppendToLatest = true scheduleExprFn := func() (string, error) { return s.fullJob.ScheduleExpr(), nil @@ -248,7 +395,7 @@ func processFullBackupRecurrence( *s.fullJob.ScheduleDetails(), jobs.InvalidScheduleID, s.fullArgs.UpdatesLastBackupMetric, - stmt, + s.incStmt, s.fullArgs.ChainProtectedTimestampRecords, ) @@ -298,14 +445,90 @@ func processFullBackupRecurrence( return s, nil } +func validateFullIncrementalFrequencies(p sql.PlanHookState, s scheduleDetails) error { + if s.incJob == nil { + return nil + } + env := sql.JobSchedulerEnv(p.ExecCfg()) + now := env.Now() + + fullFreq, err := frequencyFromCron(now, s.fullJob.ScheduleExpr()) + if err != nil { + return err + } + incFreq, err := frequencyFromCron(now, s.incJob.ScheduleExpr()) + if err != nil { + return err + } + if fullFreq-incFreq < 0 { + return errors.Newf("incremental backups must occur more often than full backups") + } + return nil +} + +func processLabel(eval *alterBackupScheduleEval, s scheduleDetails) error { + if eval.label == nil { + return nil + } + label, err := eval.label() + if err != nil { + return err + } + s.fullJob.SetScheduleLabel(label) + if s.incJob == nil { + return nil + } + s.incJob.SetScheduleLabel(label) + return nil +} + +func processInto(p sql.PlanHookState, eval *alterBackupScheduleEval, s scheduleDetails) error { + if eval.into == nil { + return nil + } + into, err := eval.into() + if err != nil { + return err + } + s.fullStmt.To = make([]tree.Expr, len(into)) + for i, dest := range into { + s.fullStmt.To[i] = tree.NewStrVal(dest) + } + + if s.incJob == nil { + return nil + } + + s.incStmt.To = make([]tree.Expr, len(into)) + for i, dest := range into { + s.incStmt.To[i] = tree.NewStrVal(dest) + } + + // With a new destination, no full backup has completed yet. + // Pause incrementals until a full backup completes. + s.incJob.Pause() + s.incJob.SetScheduleStatus("Waiting for initial backup to complete") + s.fullArgs.UnpauseOnSuccess = s.incJob.ScheduleID() + + // Kick off a full backup immediately so we can unpause incrementals. + // This mirrors the behavior of CREATE SCHEDULE FOR BACKUP. + env := sql.JobSchedulerEnv(p.ExecCfg()) + s.fullJob.SetNextRun(env.Now()) + + return nil +} + type alterBackupScheduleEval struct { // Schedule specific properties that get evaluated. - stmt *tree.AlterBackupSchedule scheduleID uint64 recurrence func() (string, error) fullBackupRecurrence func() (string, error) fullBackupAlways bool isEnterpriseUser bool + label func() (string, error) + into func() ([]string, error) + backupOptions tree.BackupOptions + scheduleOptions func() (map[string]string, error) } // makeScheduleBackupEval prepares helper scheduledBackupEval struct to assist in evaluation @@ -314,9 +537,8 @@ func makeAlterBackupScheduleEval( ctx context.Context, p sql.PlanHookState, alterStmt *tree.AlterBackupSchedule, ) (*alterBackupScheduleEval, error) { eval := &alterBackupScheduleEval{ - stmt: alterStmt, + scheduleID: alterStmt.ScheduleID, } - eval.scheduleID = alterStmt.ScheduleID var err error observed := make(map[string]interface{}) empty := struct{}{} @@ -327,6 +549,7 @@ func makeAlterBackupScheduleEval( observed[key] = empty return nil } + scheduleOptions := make([]tree.KVOption, 0) for _, cmd := range alterStmt.Cmds { switch typedCmd := cmd.(type) { case *tree.AlterBackupScheduleSetFullBackup: @@ -343,12 +566,41 @@ func makeAlterBackupScheduleEval( return nil, err } eval.recurrence, err = p.TypeAsString(ctx, typedCmd.Recurrence, alterBackupScheduleOp) + case *tree.AlterBackupScheduleSetLabel: + if err := observe("SET LABEL"); err != nil { + return nil, err + } + eval.label, err = p.TypeAsString(ctx, typedCmd.Label, alterBackupScheduleOp) + case *tree.AlterBackupScheduleSetInto: + if err := observe("SET INTO"); err != nil { + return nil, err + } + eval.into, err = p.TypeAsStringArray(ctx, tree.Exprs(typedCmd.Into), alterBackupScheduleOp) + case *tree.AlterBackupScheduleSetWith: + if typedCmd.With.Detached != nil { + err = errors.Newf("DETACHED is required for scheduled backups and cannot be altered") + } else { + err = eval.backupOptions.CombineWith(typedCmd.With) + } + case *tree.AlterBackupScheduleSetScheduleOption: + scheduleOptions = append(scheduleOptions, typedCmd.Option) default: return nil, errors.Newf("not yet implemented: %v", tree.AsString(typedCmd)) } if err != nil { return nil, err } + // TODO(benbardin): Block duplicate schedule options if possible. + eval.scheduleOptions, err = p.TypeAsStringOpts(ctx, scheduleOptions, map[string]sql.KVStringOptValidate{ + // optFirstRun and optIgnoreExistingBackups excluded here, as they don't + // make much sense in the context of ALTER. + optOnExecFailure: sql.KVStringOptAny, + optOnPreviousRunning: sql.KVStringOptAny, + optUpdatesLastBackupMetric: sql.KVStringOptAny, + }) + if err != nil { + return nil, err + } } enterpriseCheckErr := utilccl.CheckEnterpriseEnabled( diff --git a/pkg/ccl/backupccl/create_scheduled_backup.go b/pkg/ccl/backupccl/create_scheduled_backup.go index e3e878ffc4e1..a6b42e144ad2 100644 --- a/pkg/ccl/backupccl/create_scheduled_backup.go +++ b/pkg/ccl/backupccl/create_scheduled_backup.go @@ -188,6 +188,17 @@ type scheduleRecurrence struct { // A sentinel value indicating the schedule never recurs. var neverRecurs *scheduleRecurrence +func frequencyFromCron(now time.Time, cronStr string) (time.Duration, error) { + expr, err := cron.ParseStandard(cronStr) + if err != nil { + return 0, errors.Newf( + `error parsing schedule expression: %q; it must be a valid cron expression`, + cronStr) + } + nextRun := expr.Next(now) + return expr.Next(nextRun).Sub(nextRun), nil +} + func computeScheduleRecurrence( now time.Time, evalFn func() (string, error), ) (*scheduleRecurrence, error) { @@ -198,14 +209,12 @@ func computeScheduleRecurrence( if err != nil { return nil, err } - expr, err := cron.ParseStandard(cronStr) + + frequency, err := frequencyFromCron(now, cronStr) if err != nil { - return nil, errors.Newf( - `error parsing schedule expression: %q; it must be a valid cron expression`, - cronStr) + return nil, err } - nextRun := expr.Next(now) - frequency := expr.Next(nextRun).Sub(nextRun) + return &scheduleRecurrence{cronStr, frequency}, nil } @@ -271,13 +280,16 @@ func doCreateBackupSchedules( if err != nil { return err } - origFullRecurrence, err := computeScheduleRecurrence(env.Now(), eval.fullBackupRecurrence) + fullRecurrence, err := computeScheduleRecurrence(env.Now(), eval.fullBackupRecurrence) if err != nil { return err } + if fullRecurrence != nil && incRecurrence != nil && incRecurrence.frequency > fullRecurrence.frequency { + return errors.Newf("incremental backups must occur more often than full backups") + } + fullRecurrencePicked := false - fullRecurrence := origFullRecurrence if incRecurrence != nil && fullRecurrence == nil { // It's an enterprise user; let's see if we can pick a reasonable // full backup recurrence based on requested incremental recurrence. diff --git a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/backup-options b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/backup-options new file mode 100644 index 000000000000..04f1cc3c7fe7 --- /dev/null +++ b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/backup-options @@ -0,0 +1,96 @@ +new-server name=s1 allow-implicit-access +---- + +# Create test schedules. + +exec-sql +create schedule datatest for backup into 'nodelocal://1/example-schedule' recurring '@daily' full backup '@weekly'; +---- + +let $fullID $incID +with schedules as (show schedules) select id from schedules where label='datatest' order by command->>'backup_type' asc; +---- + +query-sql +with schedules as (show schedules) select id, command->'backup_statement' from schedules where label='datatest' order by command->>'backup_type' asc; +---- +$fullID "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached" +$incID "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached" + +# Can't use the same command twice. + +exec-sql expect-error-ignore +alter backup schedule $fullID set recurring '0 0 1 * *', set recurring '@weekly'; +---- +ignoring expected error + +exec-sql expect-error-ignore +alter backup schedule $fullID set full backup '0 0 1 * *', set recurring '0 0 1 * *', set full backup '@weekly'; +---- +ignoring expected error + +# Set an option + +exec-sql +alter backup schedule $incID set with revision_history = false; +---- + +query-sql +with schedules as (show schedules) select id, command->'backup_statement' from schedules where label='datatest' order by command->>'backup_type' asc; +---- +$fullID "BACKUP INTO 'nodelocal://1/example-schedule' WITH revision_history = false, detached" +$incID "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH revision_history = false, detached" + +# Change an option and set another. + +exec-sql +alter backup schedule $incID set with revision_history = true, set with encryption_passphrase = 'abc'; +---- + +query-sql +with schedules as (show schedules) select id, command->'backup_statement' from schedules where label='datatest' order by command->>'backup_type' asc; +---- +$fullID "BACKUP INTO 'nodelocal://1/example-schedule' WITH revision_history = true, encryption_passphrase = '*****', detached" +$incID "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH revision_history = true, encryption_passphrase = '*****', detached" + +# Add a list-option + +exec-sql +alter backup schedule $incID set with kms = ('aws:///key1?region=r1', 'aws:///key2?region=r2'), set with incremental_location = 'inc'; +---- + +query-sql +with schedules as (show schedules) select id, command->'backup_statement' from schedules where label='datatest' order by command->>'backup_type' asc; +---- +$fullID "BACKUP INTO 'nodelocal://1/example-schedule' WITH revision_history = true, encryption_passphrase = '*****', detached, kms = ('aws:///redacted?region=r1', 'aws:///redacted?region=r2'), incremental_location = 'inc'" +$incID "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH revision_history = true, encryption_passphrase = '*****', detached, kms = ('aws:///redacted?region=r1', 'aws:///redacted?region=r2'), incremental_location = 'inc'" + +# Set options to empty (unset). + +exec-sql +alter backup schedule $incID set with kms = '', set with incremental_location = (''), set with encryption_passphrase = ''; +---- + +query-sql +with schedules as (show schedules) select id, command->'backup_statement' from schedules where label='datatest' order by command->>'backup_type' asc; +---- +$fullID "BACKUP INTO 'nodelocal://1/example-schedule' WITH revision_history = true, detached" +$incID "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH revision_history = true, detached" + +# Setting DETACHED throws an error. + +exec-sql expect-error-ignore +alter backup schedule $incID set with detached = true; +---- +ignoring expected error + +exec-sql expect-error-ignore +alter backup schedule $incID set with detached = false; +---- +ignoring expected error + +query-sql +with schedules as (show schedules) select id, command->'backup_statement' from schedules where label='datatest' order by command->>'backup_type' asc; +---- +$fullID "BACKUP INTO 'nodelocal://1/example-schedule' WITH revision_history = true, detached" +$incID "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH revision_history = true, detached" diff --git a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/recurrence similarity index 97% rename from pkg/ccl/backupccl/testdata/backup-restore/alter-schedule rename to pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/recurrence index 7c1563f7de40..005618d720f1 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule +++ b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/recurrence @@ -175,6 +175,13 @@ with schedules as (show schedules) select id, state, recurrence, owner, command $fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} $incID Waiting for initial backup to complete @weekly root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +# Can't set incremental schedule to be slower than full. + +exec-sql expect-error-ignore +alter backup schedule $fullID set recurring '@weekly', set full backup '@daily'; +---- +ignoring expected error + # Remove incremental backup and change full cadence in the same command. exec-sql @@ -198,15 +205,3 @@ query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- $fullID @daily root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true} - -# Can't use the same command twice. - -exec-sql expect-error-ignore -alter backup schedule $incID set recurring '0 0 1 * *', set recurring '@weekly'; ----- -ignoring expected error - -exec-sql expect-error-ignore -alter backup schedule $incID set full backup '0 0 1 * *', set recurring '0 0 1 * *', set full backup '@weekly'; ----- -ignoring expected error diff --git a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/schedule-options b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/schedule-options new file mode 100644 index 000000000000..d27981110ff8 --- /dev/null +++ b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/schedule-options @@ -0,0 +1,81 @@ +new-server name=s1 allow-implicit-access +---- + +# Create test schedules. + +exec-sql +create schedule datatest for backup into 'nodelocal://1/example-schedule' recurring '@daily' full backup '@weekly'; +---- + +let $fullID $incID +with schedules as (show schedules) select id from schedules where label='datatest' order by command->>'backup_type' asc; +---- + +query-sql +with schedules as (show schedules) select id, label from schedules where id in ($fullID, $incID) order by command->>'backup_type' asc; +---- +$fullID datatest +$incID datatest + +exec-sql +alter backup schedule $fullID set label 'datatest2' +---- + +query-sql +with schedules as (show schedules) select id, label from schedules where id in ($fullID, $incID) order by command->>'backup_type' asc; +---- +$fullID datatest2 +$incID datatest2 + +exec-sql +alter backup schedule $fullID set into 'nodelocal://1/example-schedule-2' +---- + +query-sql +with schedules as (show schedules) select id, command->'backup_statement' from schedules where id in ($fullID, $incID) order by command->>'backup_type' asc; +---- +$fullID "BACKUP INTO 'nodelocal://1/example-schedule-2' WITH detached" +$incID "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule-2' WITH detached" + +# Hard to validate these, so settle for checking they execute without errors. + +exec-sql +alter backup schedule $fullID set schedule option on_previous_running='skip', set schedule option on_execution_failure = 'pause'; +---- + +exec-sql +alter backup schedule $fullID set schedule option updates_cluster_last_backup_time_metric = '1'; +alter backup schedule $fullID set schedule option updates_cluster_last_backup_time_metric = '0'; +alter backup schedule $fullID set schedule option updates_cluster_last_backup_time_metric = 'TRUE'; +alter backup schedule $fullID set schedule option updates_cluster_last_backup_time_metric = 'False'; +alter backup schedule $fullID set schedule option updates_cluster_last_backup_time_metric = 't'; +---- + +exec-sql expect-error-ignore +alter backup schedule $fullID set schedule option updates_cluster_last_backup_time_metric = 'yeah for sure true'; +---- +ignoring expected error + +exec-sql +create user testuser; +grant admin to testuser; +---- + +# Cluster backup as a non-admin user should fail. +exec-sql user=testuser +create schedule datatest3 for backup into 'nodelocal://1/example-schedule' recurring '@daily' full backup '@weekly'; +---- + +exec-sql +revoke admin from testuser; +---- + +let $fullID $incID +with schedules as (show schedules) select id from schedules where label='datatest3' order by command->>'backup_type' asc; +---- + +exec-sql user=testuser expect-error-ignore +alter backup schedule $fullID set schedule option updates_cluster_last_backup_time_metric = '1'; +---- +ignoring expected error +