From 2468ed7bd0e0788441d5fa82526b36cfce4e341b Mon Sep 17 00:00:00 2001 From: Chengxiong Ruan Date: Mon, 31 Jul 2023 11:10:19 -0400 Subject: [PATCH] sql: fix formatting of import, backup and create tenant Informs: #99185 This commit cherry-picked changes from #107723 and add fixes for formatting `EXPORT` options. Release note: None --- .../backupccl/alter_backup_schedule_test.go | 4 +- pkg/ccl/backupccl/backup_test.go | 16 +- .../backupccl/create_scheduled_backup_test.go | 72 +- .../alter-schedule/backup-options | 37 +- .../alter-schedule/missing-schedule | 12 +- .../backup-restore/alter-schedule/recurrence | 48 +- .../alter-schedule/schedule-options | 4 +- .../backup-restore/schedule-privileges | 8 +- .../backup-restore/show-schedules-old | 2 +- pkg/ccl/changefeedccl/changefeed_test.go | 6 +- .../scheduled_changefeed_test.go | 10 +- .../show_changefeed_jobs_test.go | 2 +- .../testdata/logic_test/changefeed | 4 +- .../streamingccl/streamingest/testdata/simple | 2 +- .../telemetryccl/telemetry_logging_test.go | 6 +- .../import_csv_mark_redaction_test.go | 4 +- pkg/sql/importer/import_stmt_test.go | 50 +- pkg/sql/parser/testdata/backup_restore | 1031 +---------------- pkg/sql/parser/testdata/changefeed | 8 +- pkg/sql/parser/testdata/import_export | 80 +- pkg/sql/parser/testdata/prepared_stmts | 16 +- pkg/sql/parser/testdata/show | 8 + pkg/sql/sem/tree/backup.go | 26 +- pkg/sql/sem/tree/changefeed.go | 3 +- pkg/sql/sem/tree/create.go | 15 + pkg/sql/sem/tree/export.go | 3 +- pkg/sql/sem/tree/expr.go | 16 +- pkg/sql/sem/tree/import.go | 3 +- pkg/sql/sem/tree/show.go | 13 +- 29 files changed, 271 insertions(+), 1238 deletions(-) diff --git a/pkg/ccl/backupccl/alter_backup_schedule_test.go b/pkg/ccl/backupccl/alter_backup_schedule_test.go index 5621c5ac80dc..2656b807da44 100644 --- a/pkg/ccl/backupccl/alter_backup_schedule_test.go +++ b/pkg/ccl/backupccl/alter_backup_schedule_test.go @@ -133,8 +133,8 @@ INSERT INTO t1 values (1), (10), (100); require.Equal(t, []string{"PAUSED: Waiting for initial backup to complete", "ACTIVE"}, statuses) require.Equal(t, []string{"@daily", "@weekly"}, schedules) require.Equal(t, []string{ - "BACKUP TABLE mydb.public.t1 INTO LATEST IN 'nodelocal://1/backup/alter-schedule' WITH detached", - "BACKUP TABLE mydb.public.t1 INTO 'nodelocal://1/backup/alter-schedule' WITH detached", + "BACKUP TABLE mydb.public.t1 INTO LATEST IN 'nodelocal://1/backup/alter-schedule' WITH OPTIONS (detached)", + "BACKUP TABLE mydb.public.t1 INTO 'nodelocal://1/backup/alter-schedule' WITH OPTIONS (detached)", }, backupStmts) diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index 3c6c0fbedf7a..c0f483dd9e8c 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -808,7 +808,7 @@ func TestBackupAndRestoreJobDescription(t *testing.T) { sqlDB.Exec(t, "BACKUP TO ($1,$2,$3) INCREMENTAL FROM $4", append(incrementals, backups[0])...) sqlDB.Exec(t, "BACKUP INTO ($1, $2, $3)", collections...) sqlDB.Exec(t, "BACKUP INTO LATEST IN ($1, $2, $3)", collections...) - sqlDB.Exec(t, "BACKUP INTO LATEST IN ($1, $2, $3) WITH incremental_location = ($4, $5, $6)", + sqlDB.Exec(t, "BACKUP INTO LATEST IN ($1, $2, $3) WITH OPTIONS (incremental_location = ($4, $5, $6))", append(collections, incrementals...)...) sqlDB.ExpectErr(t, "the incremental_location option must contain the same number of locality", @@ -848,7 +848,7 @@ func TestBackupAndRestoreJobDescription(t *testing.T) { collections[1], collections[2])}, {fmt.Sprintf("BACKUP INTO '%s' IN ('%s', '%s', '%s')", full1, collections[0], collections[1], collections[2])}, - {fmt.Sprintf("BACKUP INTO '%s' IN ('%s', '%s', '%s') WITH incremental_location = ('%s', '%s', '%s')", + {fmt.Sprintf("BACKUP INTO '%s' IN ('%s', '%s', '%s') WITH OPTIONS (incremental_location = ('%s', '%s', '%s'))", full1, collections[0], collections[1], collections[2], incrementals[0], incrementals[1], incrementals[2])}, {fmt.Sprintf("BACKUP INTO '%s' IN ('%s', '%s', '%s') AS OF SYSTEM TIME '-1s'", asOf1, collections[0], @@ -904,7 +904,7 @@ func TestBackupAndRestoreJobDescription(t *testing.T) { {fmt.Sprintf("RESTORE DATABASE data FROM ('%s', '%s', '%s')", resolvedCollectionURIs[0], resolvedCollectionURIs[1], resolvedCollectionURIs[2])}, - {fmt.Sprintf("RESTORE DATABASE data FROM ('%s', '%s', '%s') WITH incremental_location = ('%s', '%s', '%s')", + {fmt.Sprintf("RESTORE DATABASE data FROM ('%s', '%s', '%s') WITH OPTIONS (incremental_location = ('%s', '%s', '%s'))", resolvedCollectionURIs[0], resolvedCollectionURIs[1], resolvedCollectionURIs[2], resolvedIncURIs[0], resolvedIncURIs[1], resolvedIncURIs[2])}, {fmt.Sprintf("RESTORE DATABASE data FROM ('%s', '%s', '%s')", @@ -1308,7 +1308,7 @@ func TestBackupRestoreSystemJobs(t *testing.T) { if err := jobutils.VerifySystemJob(t, sqlDB, 0, jobspb.TypeRestore, jobs.StatusSucceeded, jobs.Record{ Username: username.RootUserName(), Description: fmt.Sprintf( - `RESTORE TABLE bank FROM '%s', '%s' WITH into_db = 'restoredb'`, + `RESTORE TABLE bank FROM '%s', '%s' WITH OPTIONS (into_db = 'restoredb')`, sanitizedFullDir+"redacted", sanitizedIncDir+"redacted", ), DescriptorIDs: descpb.IDs{ @@ -1395,7 +1395,7 @@ func TestEncryptedBackupRestoreSystemJobs(t *testing.T) { jobs.Record{ Username: username.RootUserName(), Description: fmt.Sprintf( - `BACKUP DATABASE data TO '%s' WITH %s`, + `BACKUP DATABASE data TO '%s' WITH OPTIONS (%s)`, backupLoc1, sanitizedEncryptionOption), DescriptorIDs: descpb.IDs{ descpb.ID(backupDatabaseID), @@ -1414,7 +1414,7 @@ into_db='restoredb', %s)`, encryptionOption), backupLoc1) if err := jobutils.VerifySystemJob(t, sqlDB, 0, jobspb.TypeRestore, jobs.StatusSucceeded, jobs.Record{ Username: username.RootUserName(), Description: fmt.Sprintf( - `RESTORE TABLE data.bank FROM '%s' WITH %s, into_db = 'restoredb'`, + `RESTORE TABLE data.bank FROM '%s' WITH OPTIONS (%s, into_db = 'restoredb')`, backupLoc1, sanitizedEncryptionOption, ), DescriptorIDs: descpb.IDs{ @@ -5700,8 +5700,8 @@ func TestBackupRestoreShowJob(t *testing.T) { sqlDB.CheckQueryResults( t, "SELECT description FROM [SHOW JOBS] WHERE job_type != 'MIGRATION' AND description != 'updating privileges' ORDER BY description", [][]string{ - {"BACKUP DATABASE data TO 'nodelocal://1/foo' WITH revision_history = true"}, - {"RESTORE TABLE data.bank FROM 'nodelocal://1/foo' WITH into_db = 'data 2', skip_missing_foreign_keys"}, + {"BACKUP DATABASE data TO 'nodelocal://1/foo' WITH OPTIONS (revision_history = true)"}, + {"RESTORE TABLE data.bank FROM 'nodelocal://1/foo' WITH OPTIONS (into_db = 'data 2', skip_missing_foreign_keys)"}, }, ) } diff --git a/pkg/ccl/backupccl/create_scheduled_backup_test.go b/pkg/ccl/backupccl/create_scheduled_backup_test.go index 06c83f85c527..9b78cf679b9b 100644 --- a/pkg/ccl/backupccl/create_scheduled_backup_test.go +++ b/pkg/ccl/backupccl/create_scheduled_backup_test.go @@ -251,57 +251,57 @@ CREATE TABLE other_db.t1(a int); { name: "fully-qualified-table-name", query: "CREATE SCHEDULE FOR BACKUP mydb.public.t1 INTO $1 RECURRING '@hourly'", - expectedBackupStmt: "BACKUP TABLE mydb.public.t1 INTO '%s' WITH detached", + expectedBackupStmt: "BACKUP TABLE mydb.public.t1 INTO '%s' WITH OPTIONS (detached)", }, { name: "schema-qualified-table-name", query: "CREATE SCHEDULE FOR BACKUP public.t1 INTO $1 RECURRING '@hourly'", - expectedBackupStmt: "BACKUP TABLE mydb.public.t1 INTO '%s' WITH detached", + expectedBackupStmt: "BACKUP TABLE mydb.public.t1 INTO '%s' WITH OPTIONS (detached)", }, { name: "uds-qualified-table-name", query: "CREATE SCHEDULE FOR BACKUP myschema.mytbl INTO $1 RECURRING '@hourly'", - expectedBackupStmt: "BACKUP TABLE mydb.myschema.mytbl INTO '%s' WITH detached", + expectedBackupStmt: "BACKUP TABLE mydb.myschema.mytbl INTO '%s' WITH OPTIONS (detached)", }, { name: "db-qualified-table-name", query: "CREATE SCHEDULE FOR BACKUP mydb.t1 INTO $1 RECURRING '@hourly'", - expectedBackupStmt: "BACKUP TABLE mydb.public.t1 INTO '%s' WITH detached", + expectedBackupStmt: "BACKUP TABLE mydb.public.t1 INTO '%s' WITH OPTIONS (detached)", }, { name: "unqualified-table-name", query: "CREATE SCHEDULE FOR BACKUP t1 INTO $1 RECURRING '@hourly'", - expectedBackupStmt: "BACKUP TABLE mydb.public.t1 INTO '%s' WITH detached", + expectedBackupStmt: "BACKUP TABLE mydb.public.t1 INTO '%s' WITH OPTIONS (detached)", }, { name: "unqualified-table-name-with-symbols", query: `CREATE SCHEDULE FOR BACKUP "my.tbl" INTO $1 RECURRING '@hourly'`, - expectedBackupStmt: `BACKUP TABLE mydb.public."my.tbl" INTO '%s' WITH detached`, + expectedBackupStmt: `BACKUP TABLE mydb.public."my.tbl" INTO '%s' WITH OPTIONS (detached)`, }, { name: "table-names-from-different-db", query: "CREATE SCHEDULE FOR BACKUP t1, other_db.t1 INTO $1 RECURRING '@hourly'", - expectedBackupStmt: "BACKUP TABLE mydb.public.t1, other_db.public.t1 INTO '%s' WITH detached", + expectedBackupStmt: "BACKUP TABLE mydb.public.t1, other_db.public.t1 INTO '%s' WITH OPTIONS (detached)", }, { name: "unqualified-all-tables-selectors", query: "CREATE SCHEDULE FOR BACKUP * INTO $1 RECURRING '@hourly'", - expectedBackupStmt: "BACKUP TABLE mydb.public.* INTO '%s' WITH detached", + expectedBackupStmt: "BACKUP TABLE mydb.public.* INTO '%s' WITH OPTIONS (detached)", }, { name: "all-tables-selectors-with-user-defined-schema", query: "CREATE SCHEDULE FOR BACKUP myschema.* INTO $1 RECURRING '@hourly'", - expectedBackupStmt: "BACKUP TABLE mydb.myschema.* INTO '%s' WITH detached", + expectedBackupStmt: "BACKUP TABLE mydb.myschema.* INTO '%s' WITH OPTIONS (detached)", }, { name: "partially-qualified-all-tables-selectors-with-different-db", query: "CREATE SCHEDULE FOR BACKUP other_db.* INTO $1 RECURRING '@hourly'", - expectedBackupStmt: "BACKUP TABLE other_db.public.* INTO '%s' WITH detached", + expectedBackupStmt: "BACKUP TABLE other_db.public.* INTO '%s' WITH OPTIONS (detached)", }, { name: "fully-qualified-all-tables-selectors-with-multiple-dbs", query: "CREATE SCHEDULE FOR BACKUP *, other_db.* INTO $1 RECURRING '@hourly'", - expectedBackupStmt: "BACKUP TABLE mydb.public.*, other_db.public.* INTO '%s' WITH detached", + expectedBackupStmt: "BACKUP TABLE mydb.public.*, other_db.public.* INTO '%s' WITH OPTIONS (detached)", }, } @@ -359,8 +359,8 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .+", - backupStmt: "BACKUP INTO 'nodelocal://1/backup?AWS_SECRET_ACCESS_KEY=neverappears' WITH detached", - shownStmt: "BACKUP INTO 'nodelocal://1/backup?AWS_SECRET_ACCESS_KEY=redacted' WITH detached", + backupStmt: "BACKUP INTO 'nodelocal://1/backup?AWS_SECRET_ACCESS_KEY=neverappears' WITH OPTIONS (detached)", + shownStmt: "BACKUP INTO 'nodelocal://1/backup?AWS_SECRET_ACCESS_KEY=redacted' WITH OPTIONS (detached)", period: time.Hour, }, }, @@ -372,7 +372,7 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { expectedSchedules: []expectedSchedule{ { nameRe: "my-backup", - backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH detached", + backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH OPTIONS (detached)", period: time.Hour, }, }, @@ -384,7 +384,7 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .+", - backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH detached", + backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH OPTIONS (detached)", period: time.Hour, }, }, @@ -396,14 +396,14 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .*", - backupStmt: "BACKUP INTO LATEST IN 'nodelocal://1/backup' WITH detached", + backupStmt: "BACKUP INTO LATEST IN 'nodelocal://1/backup' WITH OPTIONS (detached)", period: time.Hour, paused: true, chainProtectedTimestampRecord: true, }, { nameRe: "BACKUP .+", - backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH detached", + backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH OPTIONS (detached)", period: 24 * time.Hour, runsNow: true, chainProtectedTimestampRecord: true, @@ -417,14 +417,14 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { expectedSchedules: []expectedSchedule{ { nameRe: "my-backup", - backupStmt: "BACKUP INTO LATEST IN 'nodelocal://1/backup' WITH detached", + backupStmt: "BACKUP INTO LATEST IN 'nodelocal://1/backup' WITH OPTIONS (detached)", period: time.Hour, paused: true, chainProtectedTimestampRecord: true, }, { nameRe: "my-backup", - backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH detached", + backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH OPTIONS (detached)", period: 24 * time.Hour, runsNow: true, chainProtectedTimestampRecord: true, @@ -438,7 +438,7 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .+", - backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH revision_history = true, detached", + backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH OPTIONS (revision_history = true, detached)", period: time.Hour, }, }, @@ -450,14 +450,14 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .*", - backupStmt: "BACKUP INTO LATEST IN 'nodelocal://1/backup' WITH detached, incremental_location = 'nodelocal://1/incremental'", + backupStmt: "BACKUP INTO LATEST IN 'nodelocal://1/backup' WITH OPTIONS (detached, incremental_location = 'nodelocal://1/incremental')", period: time.Hour, paused: true, chainProtectedTimestampRecord: true, }, { nameRe: "BACKUP .+", - backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH detached", + backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH OPTIONS (detached)", period: 24 * time.Hour, runsNow: true, chainProtectedTimestampRecord: true, @@ -475,7 +475,7 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { nameRe: "BACKUP .*", backupStmt: "BACKUP TABLE system.public.jobs, " + "system.public.scheduled_jobs INTO LATEST IN 'nodelocal://1/backup' WITH" + - " revision_history = true, detached", + " OPTIONS (revision_history = true, detached)", period: time.Hour, paused: true, chainProtectedTimestampRecord: true, @@ -483,7 +483,7 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { { nameRe: "BACKUP .+", backupStmt: "BACKUP TABLE system.public.jobs, " + - "system.public.scheduled_jobs INTO 'nodelocal://1/backup' WITH revision_history = true, detached", + "system.public.scheduled_jobs INTO 'nodelocal://1/backup' WITH OPTIONS (revision_history = true, detached)", period: 24 * time.Hour, runsNow: true, chainProtectedTimestampRecord: true, @@ -499,14 +499,14 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .*", - backupStmt: "BACKUP DATABASE system INTO LATEST IN 'nodelocal://1/backup' WITH revision_history = true, detached", + backupStmt: "BACKUP DATABASE system INTO LATEST IN 'nodelocal://1/backup' WITH OPTIONS (revision_history = true, detached)", period: time.Hour, paused: true, chainProtectedTimestampRecord: true, }, { nameRe: "BACKUP .+", - backupStmt: "BACKUP DATABASE system INTO 'nodelocal://1/backup' WITH revision_history = true, detached", + backupStmt: "BACKUP DATABASE system INTO 'nodelocal://1/backup' WITH OPTIONS (revision_history = true, detached)", period: 24 * time.Hour, runsNow: true, chainProtectedTimestampRecord: true, @@ -522,14 +522,14 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { expectedSchedules: []expectedSchedule{ { nameRe: "BACKUP .*", - backupStmt: "BACKUP TABLE system.public.* INTO LATEST IN 'nodelocal://1/backup' WITH revision_history = true, detached", + backupStmt: "BACKUP TABLE system.public.* INTO LATEST IN 'nodelocal://1/backup' WITH OPTIONS (revision_history = true, detached)", period: time.Hour, paused: true, chainProtectedTimestampRecord: true, }, { nameRe: "BACKUP .+", - backupStmt: "BACKUP TABLE system.public.* INTO 'nodelocal://1/backup' WITH revision_history = true, detached", + backupStmt: "BACKUP TABLE system.public.* INTO 'nodelocal://1/backup' WITH OPTIONS (revision_history = true, detached)", period: 24 * time.Hour, runsNow: true, chainProtectedTimestampRecord: true, @@ -562,14 +562,14 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { expectedSchedules: []expectedSchedule{ { nameRe: "my_backup_name", - backupStmt: "BACKUP INTO LATEST IN 'nodelocal://1/backup' WITH revision_history = true, detached", + backupStmt: "BACKUP INTO LATEST IN 'nodelocal://1/backup' WITH OPTIONS (revision_history = true, detached)", period: time.Hour, paused: true, chainProtectedTimestampRecord: true, }, { nameRe: "my_backup_name", - backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH revision_history = true, detached", + backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH OPTIONS (revision_history = true, detached)", period: 24 * time.Hour, runsNow: true, chainProtectedTimestampRecord: true, @@ -584,14 +584,14 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { expectedSchedules: []expectedSchedule{ { nameRe: "my_backup_name", - backupStmt: "BACKUP INTO LATEST IN 'nodelocal://1/backup' WITH revision_history = true, detached", + backupStmt: "BACKUP INTO LATEST IN 'nodelocal://1/backup' WITH OPTIONS (revision_history = true, detached)", period: time.Hour, paused: true, chainProtectedTimestampRecord: true, }, { nameRe: "my_backup_name", - backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH revision_history = true, detached", + backupStmt: "BACKUP INTO 'nodelocal://1/backup' WITH OPTIONS (revision_history = true, detached)", period: 24 * time.Hour, runsNow: true, chainProtectedTimestampRecord: true, @@ -609,10 +609,10 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { nameRe: "BACKUP .*", backupStmt: "BACKUP TABLE system.public.jobs, " + "system.public.scheduled_jobs INTO 'nodelocal://1/backup' WITH" + - " revision_history = true, encryption_passphrase = 'secret', detached", + " OPTIONS (revision_history = true, encryption_passphrase = 'secret', detached)", shownStmt: "BACKUP TABLE system.public.jobs, " + "system.public.scheduled_jobs INTO 'nodelocal://1/backup' WITH" + - " revision_history = true, encryption_passphrase = '*****', detached", + " OPTIONS (revision_history = true, encryption_passphrase = '*****', detached)", period: 7 * 24 * time.Hour, }, }, @@ -634,7 +634,7 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { nameRe: "BACKUP .+", backupStmt: "BACKUP DATABASE system INTO " + "('nodelocal://1/backup?COCKROACH_LOCALITY=x%3Dy', 'nodelocal://1/backup2?COCKROACH_LOCALITY=default') " + - "WITH revision_history = true, detached", + "WITH OPTIONS (revision_history = true, detached)", period: 24 * time.Hour, }, }, @@ -655,7 +655,7 @@ func TestSerializesScheduledBackupExecutionArgs(t *testing.T) { { nameRe: "BACKUP .+", backupStmt: "BACKUP DATABASE system INTO 'nodelocal://1/backup' " + - "WITH revision_history = true, detached, execution locality = 'region=of-france'", + "WITH OPTIONS (revision_history = true, detached, execution locality = 'region=of-france')", period: 24 * time.Hour, }, }, diff --git a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/backup-options b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/backup-options index f61620598af2..45da0fd9833b 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/backup-options +++ b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/backup-options @@ -14,8 +14,8 @@ with schedules as (show schedules for backup) select id from schedules where lab query-sql with schedules as (show schedules for backup) select command from schedules where label='datatest' order by backup_type asc; ---- -BACKUP INTO 'nodelocal://1/example-schedule' WITH detached -BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached +BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached) +BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached) # Can't use the same command twice. @@ -42,8 +42,8 @@ alter backup schedule $incID set with execution locality = 'region=us-east-1' query-sql with schedules as (show schedules for backup) select command from schedules where label='datatest' order by backup_type asc; ---- -BACKUP INTO 'nodelocal://1/example-schedule' WITH revision_history = false, detached, execution locality = 'region=us-east-1' -BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH revision_history = false, detached, execution locality = 'region=us-east-1' +BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = false, detached, execution locality = 'region=us-east-1') +BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = false, detached, execution locality = 'region=us-east-1') # Change an option and set another. @@ -54,8 +54,8 @@ alter backup schedule $incID set with revision_history = true, set with executio query-sql with schedules as (show schedules for backup) select command from schedules where label='datatest' order by backup_type asc; ---- -BACKUP INTO 'nodelocal://1/example-schedule' WITH revision_history = true, encryption_passphrase = '*****', detached -BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH revision_history = true, encryption_passphrase = '*****', detached +BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = true, encryption_passphrase = '*****', detached) +BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = true, encryption_passphrase = '*****', detached) # Add an incompatible option @@ -74,8 +74,8 @@ alter backup schedule $incID set with kms = ('aws:///key1?region=r1', 'aws:///ke query-sql with schedules as (show schedules for backup) select command from schedules where label='datatest' order by backup_type asc; ---- -BACKUP INTO 'nodelocal://1/example-schedule' WITH revision_history = true, detached, kms = ('aws:///redacted?region=r1', 'aws:///redacted?region=r2'), incremental_location = 'inc' -BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH revision_history = true, detached, kms = ('aws:///redacted?region=r1', 'aws:///redacted?region=r2'), incremental_location = 'inc' +BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = true, detached, kms = ('aws:///redacted?region=r1', 'aws:///redacted?region=r2'), incremental_location = 'inc') +BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = true, detached, kms = ('aws:///redacted?region=r1', 'aws:///redacted?region=r2'), incremental_location = 'inc') # Set options to empty (unset). @@ -86,8 +86,8 @@ alter backup schedule $incID set with kms = '', set with incremental_location = query-sql with schedules as (show schedules for backup) select command from schedules where label='datatest' order by backup_type asc; ---- -BACKUP INTO 'nodelocal://1/example-schedule' WITH revision_history = true, detached -BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH revision_history = true, detached +BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = true, detached) +BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = true, detached) # Setting DETACHED throws an error. @@ -104,8 +104,8 @@ regex matches error query-sql with schedules as (show schedules for backup) select command from schedules where label='datatest' order by backup_type asc; ---- -BACKUP INTO 'nodelocal://1/example-schedule' WITH revision_history = true, detached -BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH revision_history = true, detached +BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = true, detached) +BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = true, detached) exec-sql alter backup schedule $incID set with include_all_virtual_clusters = true @@ -114,9 +114,8 @@ alter backup schedule $incID set with include_all_virtual_clusters = true query-sql with schedules as (show schedules for backup) select command from schedules where label='datatest' order by backup_type asc; ---- -BACKUP INTO 'nodelocal://1/example-schedule' WITH revision_history = true, detached, include_all_virtual_clusters = true -BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH revision_history = true, detached, include_all_virtual_clusters = true - +BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = true, detached, include_all_virtual_clusters = true) +BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (revision_history = true, detached, include_all_virtual_clusters = true) exec-sql create schedule 'with-secondary' for backup into 'nodelocal://1/example-schedule-with-secondary' WITH include_all_virtual_clusters recurring '@daily' full backup '@weekly'; @@ -129,8 +128,8 @@ with schedules as (show schedules for backup) select id from schedules where lab query-sql with schedules as (show schedules for backup) select command from schedules where label='with-secondary' order by backup_type asc; ---- -BACKUP INTO 'nodelocal://1/example-schedule-with-secondary' WITH detached, include_all_virtual_clusters = true -BACKUP INTO LATEST IN 'nodelocal://1/example-schedule-with-secondary' WITH detached, include_all_virtual_clusters = true +BACKUP INTO 'nodelocal://1/example-schedule-with-secondary' WITH OPTIONS (detached, include_all_virtual_clusters = true) +BACKUP INTO LATEST IN 'nodelocal://1/example-schedule-with-secondary' WITH OPTIONS (detached, include_all_virtual_clusters = true) exec-sql alter backup schedule $withSecondaryIncID set with include_all_virtual_clusters = false @@ -139,5 +138,5 @@ alter backup schedule $withSecondaryIncID set with include_all_virtual_clusters query-sql with schedules as (show schedules for backup) select command from schedules where label='with-secondary' order by backup_type asc; ---- -BACKUP INTO 'nodelocal://1/example-schedule-with-secondary' WITH detached, include_all_virtual_clusters = false -BACKUP INTO LATEST IN 'nodelocal://1/example-schedule-with-secondary' WITH detached, include_all_virtual_clusters = false +BACKUP INTO 'nodelocal://1/example-schedule-with-secondary' WITH OPTIONS (detached, include_all_virtual_clusters = false) +BACKUP INTO LATEST IN 'nodelocal://1/example-schedule-with-secondary' WITH OPTIONS (detached, include_all_virtual_clusters = false) diff --git a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/missing-schedule b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/missing-schedule index 3f388b9e0b8b..2aa884a3e306 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/missing-schedule +++ b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/missing-schedule @@ -14,8 +14,8 @@ with schedules as (show schedules) select id from schedules where label='datates query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} -$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} +$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} exec-sql drop schedule $fullID @@ -34,8 +34,8 @@ with schedules as (show schedules) select id from schedules where label='datates query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} -$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} +$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} exec-sql drop schedule $incID @@ -46,7 +46,7 @@ drop schedule $incID query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true} +$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true} exec-sql alter backup schedule $fullID set recurring '@daily'; @@ -55,4 +55,4 @@ alter backup schedule $fullID set recurring '@daily'; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @daily root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true} +$fullID @daily root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true} diff --git a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/recurrence b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/recurrence index eac1f9529bdf..04f5f841aa82 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/recurrence +++ b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/recurrence @@ -14,8 +14,8 @@ with schedules as (show schedules) select id from schedules where label='datates query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} -$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} +$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} # Disable incremental backup. @@ -26,7 +26,7 @@ alter backup schedule $fullID set full backup always; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @daily root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true} +$fullID @daily root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true} # Verify idempotency. @@ -39,7 +39,7 @@ alter backup schedule $fullID set full backup always; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @daily root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true} +$fullID @daily root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true} # Change cadence (of full backup.) @@ -50,7 +50,7 @@ alter backup schedule $fullID set recurring '@weekly'; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true} +$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true} # Verify idempotency. @@ -63,7 +63,7 @@ alter backup schedule $fullID set recurring '@weekly'; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true} +$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true} # Add incremental backup. @@ -78,8 +78,8 @@ with schedules as (show schedules) select id from schedules where label='datates query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} -$incID Waiting for initial backup to complete @weekly root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} +$incID Waiting for initial backup to complete @weekly root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} # Verify idempotency. @@ -92,8 +92,8 @@ alter backup schedule $fullID set full backup '0 0 1 * *'; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} -$incID Waiting for initial backup to complete @weekly root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} +$incID Waiting for initial backup to complete @weekly root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} # Change cadence (of incremental backup.) @@ -105,8 +105,8 @@ alter backup schedule $fullID set recurring '@daily'; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} -$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} +$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} # Verify idempotency @@ -119,8 +119,8 @@ alter backup schedule $fullID set recurring '@daily'; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} -$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} +$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} # Change cadence (of full backup, while incremental exists.) @@ -132,8 +132,8 @@ alter backup schedule $incID set full backup '@weekly'; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} -$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} +$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} # Verify idempotency. @@ -146,8 +146,8 @@ alter backup schedule $incID set full backup '@weekly'; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} -$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +$fullID @weekly root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} +$incID Waiting for initial backup to complete @daily root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} # Alter full and incremental cadence in the same command. @@ -158,8 +158,8 @@ alter backup schedule $incID set full backup '0 0 1 * *', set recurring '@weekly query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} -$incID Waiting for initial backup to complete @weekly root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} +$incID Waiting for initial backup to complete @weekly root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} # Verify idempotency. @@ -172,8 +172,8 @@ alter backup schedule $incID set full backup '0 0 1 * *', set recurring '@weekly query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} -$incID Waiting for initial backup to complete @weekly root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH detached", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} +$fullID 0 0 1 * * root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true, "dependent_schedule_id": $incID, "unpause_on_success": $incID} +$incID Waiting for initial backup to complete @weekly root {"backup_statement": "BACKUP INTO LATEST IN 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "backup_type": 1, "chain_protected_timestamp_records": true, "dependent_schedule_id": $fullID} # Can't set incremental schedule to be slower than full. @@ -191,7 +191,7 @@ alter backup schedule $fullID set full backup always, set recurring '@daily'; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @daily root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true} +$fullID @daily root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true} # Verify idempotency. @@ -204,4 +204,4 @@ alter backup schedule $fullID set full backup always, set recurring '@daily'; query-sql with schedules as (show schedules) select id, state, recurrence, owner, command from schedules where label='datatest' order by command->>'backup_type' asc; ---- -$fullID @daily root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH detached", "chain_protected_timestamp_records": true} +$fullID @daily root {"backup_statement": "BACKUP INTO 'nodelocal://1/example-schedule' WITH OPTIONS (detached)", "chain_protected_timestamp_records": true} diff --git a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/schedule-options b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/schedule-options index c630ee4c954d..9988ec160bd1 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/schedule-options +++ b/pkg/ccl/backupccl/testdata/backup-restore/alter-schedule/schedule-options @@ -34,8 +34,8 @@ alter backup schedule $fullID set into 'nodelocal://1/example-schedule-2' query-sql with schedules as (show schedules for backup) select command, backup_type from schedules where id in ($fullID, $incID) order by backup_type asc; ---- -BACKUP INTO 'nodelocal://1/example-schedule-2' WITH detached FULL -BACKUP INTO LATEST IN 'nodelocal://1/example-schedule-2' WITH detached INCREMENTAL +BACKUP INTO 'nodelocal://1/example-schedule-2' WITH OPTIONS (detached) FULL +BACKUP INTO LATEST IN 'nodelocal://1/example-schedule-2' WITH OPTIONS (detached) INCREMENTAL # Alter the `on_previous_running` schedule option to test that incremental # schedules always have their configuration set to wait. diff --git a/pkg/ccl/backupccl/testdata/backup-restore/schedule-privileges b/pkg/ccl/backupccl/testdata/backup-restore/schedule-privileges index e0d8a31d65b5..0c9f14e7e4a6 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/schedule-privileges +++ b/pkg/ccl/backupccl/testdata/backup-restore/schedule-privileges @@ -104,8 +104,8 @@ query-sql with schedules as (SHOW SCHEDULES FOR BACKUP) SELECT label, command FROM schedules WHERE id IN ($fullID, $incID) ORDER BY next_run; ---- -foocluster_admin BACKUP INTO LATEST IN 'external://foo/cluster' WITH detached -foocluster_admin BACKUP INTO 'external://foo/cluster' WITH detached +foocluster_admin BACKUP INTO LATEST IN 'external://foo/cluster' WITH OPTIONS (detached) +foocluster_admin BACKUP INTO 'external://foo/cluster' WITH OPTIONS (detached) # nonadmin testuser is not allowed to drop a schedule they do not own. exec-sql expect-error-regex=(must be admin or owner of the schedule [0-9]+ to DROP it) user=testuser @@ -155,8 +155,8 @@ query-sql with schedules as (SHOW SCHEDULES FOR BACKUP) SELECT label, command, owner FROM schedules WHERE id IN ($testuserFullID, $testuserIncID) ORDER BY next_run; ---- -foocluster_testuser BACKUP INTO LATEST IN 'external://foo/cluster' WITH detached testuser -foocluster_testuser BACKUP INTO 'external://foo/cluster' WITH detached testuser +foocluster_testuser BACKUP INTO LATEST IN 'external://foo/cluster' WITH OPTIONS (detached) testuser +foocluster_testuser BACKUP INTO 'external://foo/cluster' WITH OPTIONS (detached) testuser # testuser owns these schedules so should be able to pause, resume, drop, alter # them without admin. diff --git a/pkg/ccl/backupccl/testdata/backup-restore/show-schedules-old b/pkg/ccl/backupccl/testdata/backup-restore/show-schedules-old index e097f125fff4..41501027a6cb 100644 --- a/pkg/ccl/backupccl/testdata/backup-restore/show-schedules-old +++ b/pkg/ccl/backupccl/testdata/backup-restore/show-schedules-old @@ -20,4 +20,4 @@ query-sql SELECT crdb_internal.pb_to_json('cockroach.jobs.jobspb.ExecutionArguments', execution_args, false, true)->'args' from scratch; ---- {"@type": "type.googleapis.com/cockroach.sql.ScheduledSQLStatsCompactorExecutionArgs"} -{"@type": "type.googleapis.com/cockroach.ccl.backupccl.ScheduledBackupExecutionArgs", "backup_statement": "BACKUP INTO 'nodelocal://1/foo' WITH detached"} +{"@type": "type.googleapis.com/cockroach.ccl.backupccl.ScheduledBackupExecutionArgs", "backup_statement": "BACKUP INTO 'nodelocal://1/foo' WITH OPTIONS (detached)"} diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 04ca32d0ff2a..dd2cd926aebd 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -4954,15 +4954,15 @@ func TestChangefeedDescription(t *testing.T) { }{ { create: "CREATE CHANGEFEED FOR foo INTO $1 WITH updated, envelope = $2", - descr: `CREATE CHANGEFEED FOR TABLE foo INTO '` + redactedSink + `' WITH envelope = 'wrapped', updated`, + descr: `CREATE CHANGEFEED FOR TABLE foo INTO '` + redactedSink + `' WITH OPTIONS (envelope = 'wrapped', updated)`, }, { create: "CREATE CHANGEFEED FOR public.foo INTO $1 WITH updated, envelope = $2", - descr: `CREATE CHANGEFEED FOR TABLE public.foo INTO '` + redactedSink + `' WITH envelope = 'wrapped', updated`, + descr: `CREATE CHANGEFEED FOR TABLE public.foo INTO '` + redactedSink + `' WITH OPTIONS (envelope = 'wrapped', updated)`, }, { create: "CREATE CHANGEFEED FOR d.public.foo INTO $1 WITH updated, envelope = $2", - descr: `CREATE CHANGEFEED FOR TABLE d.public.foo INTO '` + redactedSink + `' WITH envelope = 'wrapped', updated`, + descr: `CREATE CHANGEFEED FOR TABLE d.public.foo INTO '` + redactedSink + `' WITH OPTIONS (envelope = 'wrapped', updated)`, }, { // TODO(#85143): remove schema_change_policy='stop' from this test. diff --git a/pkg/ccl/changefeedccl/scheduled_changefeed_test.go b/pkg/ccl/changefeedccl/scheduled_changefeed_test.go index f0b390923db7..0296bee083d4 100644 --- a/pkg/ccl/changefeedccl/scheduled_changefeed_test.go +++ b/pkg/ccl/changefeedccl/scheduled_changefeed_test.go @@ -193,8 +193,8 @@ func TestSerializesScheduledChangefeedExecutionArgs(t *testing.T) { query: "CREATE SCHEDULE FOR CHANGEFEED d.public.foo INTO 'webhook-https://0/changefeed?AWS_SECRET_ACCESS_KEY=nevershown' WITH initial_scan='only' RECURRING '@hourly'", es: expectedSchedule{ nameRe: "CHANGEFEED .+", - changefeedStmt: "CREATE CHANGEFEED FOR TABLE d.public.foo INTO 'webhook-https://0/changefeed?AWS_SECRET_ACCESS_KEY=nevershown' WITH initial_scan = 'only'", - shownStmt: "CREATE CHANGEFEED FOR TABLE d.public.foo INTO 'webhook-https://0/changefeed?AWS_SECRET_ACCESS_KEY=redacted' WITH initial_scan = 'only'", + changefeedStmt: "CREATE CHANGEFEED FOR TABLE d.public.foo INTO 'webhook-https://0/changefeed?AWS_SECRET_ACCESS_KEY=nevershown' WITH OPTIONS (initial_scan = 'only')", + shownStmt: "CREATE CHANGEFEED FOR TABLE d.public.foo INTO 'webhook-https://0/changefeed?AWS_SECRET_ACCESS_KEY=redacted' WITH OPTIONS (initial_scan = 'only')", period: time.Hour, }, }, @@ -203,7 +203,7 @@ func TestSerializesScheduledChangefeedExecutionArgs(t *testing.T) { query: "CREATE SCHEDULE FOR CHANGEFEED foo INTO 'webhook-https://0/changefeed' WITH initial_scan = 'only' RECURRING '@hourly'", es: expectedSchedule{ nameRe: "CHANGEFEED .+", - changefeedStmt: "CREATE CHANGEFEED FOR TABLE d.public.foo INTO 'webhook-https://0/changefeed' WITH initial_scan = 'only'", + changefeedStmt: "CREATE CHANGEFEED FOR TABLE d.public.foo INTO 'webhook-https://0/changefeed' WITH OPTIONS (initial_scan = 'only')", period: time.Hour, }, }, @@ -219,8 +219,8 @@ func TestSerializesScheduledChangefeedExecutionArgs(t *testing.T) { queryArgs: []interface{}{th.env.Now()}, es: expectedSchedule{ nameRe: "foo-changefeed", - changefeedStmt: "CREATE CHANGEFEED FOR TABLE d.public.foo INTO 'webhook-https://0/changefeed?AWS_SECRET_ACCESS_KEY=nevershown' WITH format = 'JSON', initial_scan = 'only'", - shownStmt: "CREATE CHANGEFEED FOR TABLE d.public.foo INTO 'webhook-https://0/changefeed?AWS_SECRET_ACCESS_KEY=redacted' WITH format = 'JSON', initial_scan = 'only'", + changefeedStmt: "CREATE CHANGEFEED FOR TABLE d.public.foo INTO 'webhook-https://0/changefeed?AWS_SECRET_ACCESS_KEY=nevershown' WITH OPTIONS (format = 'JSON', initial_scan = 'only')", + shownStmt: "CREATE CHANGEFEED FOR TABLE d.public.foo INTO 'webhook-https://0/changefeed?AWS_SECRET_ACCESS_KEY=redacted' WITH OPTIONS (format = 'JSON', initial_scan = 'only')", period: time.Hour, runsNow: true, }, diff --git a/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go b/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go index 90530860856c..2da7bdbf0c66 100644 --- a/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go +++ b/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go @@ -415,7 +415,7 @@ func TestShowChangefeedJobsAlterChangefeed(t *testing.T) { out = obtainJobRowFn() require.Equal(t, jobID, out.id, "Expected id:%d but found id:%d", jobID, out.id) - require.Equal(t, "CREATE CHANGEFEED FOR TABLE d.public.bar INTO 'kafka://does.not.matter/' WITH resolved = '5s'", out.description, "Expected description:%s but found description:%s", "CREATE CHANGEFEED FOR TABLE bar INTO 'kafka://does.not.matter/ WITH resolved = '5s''", out.description) + require.Equal(t, "CREATE CHANGEFEED FOR TABLE d.public.bar INTO 'kafka://does.not.matter/' WITH OPTIONS (resolved = '5s')", out.description, "Expected description:%s but found description:%s", "CREATE CHANGEFEED FOR TABLE bar INTO 'kafka://does.not.matter/ WITH resolved = '5s''", out.description) require.Equal(t, sinkURI, out.SinkURI, "Expected sinkUri:%s but found sinkUri:%s", sinkURI, out.SinkURI) require.Equal(t, "bar", out.topics, "Expected topics:%s but found topics:%s", "bar", sortedTopics) require.Equal(t, "{d.public.bar}", string(out.FullTableNames), "Expected fullTableNames:%s but found fullTableNames:%s", "{d.public.bar}", string(out.FullTableNames)) diff --git a/pkg/ccl/logictestccl/testdata/logic_test/changefeed b/pkg/ccl/logictestccl/testdata/logic_test/changefeed index e0a07c061eb2..1f7b6f64608f 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/changefeed +++ b/pkg/ccl/logictestccl/testdata/logic_test/changefeed @@ -59,9 +59,9 @@ SELECT job_id FROM [SHOW CHANGEFEED JOBS] WHERE user_name = 'testuser' query TT SELECT user_name, description FROM [SHOW CHANGEFEED JOB $job_id] ---- -testuser CREATE CHANGEFEED FOR TABLE t INTO 'null://sink' WITH initial_scan = 'only' +testuser CREATE CHANGEFEED FOR TABLE t INTO 'null://sink' WITH OPTIONS (initial_scan = 'only') query TT SELECT user_name, description FROM [SHOW CHANGEFEED JOBS] ---- -testuser CREATE CHANGEFEED FOR TABLE t INTO 'null://sink' WITH initial_scan = 'only' \ No newline at end of file +testuser CREATE CHANGEFEED FOR TABLE t INTO 'null://sink' WITH OPTIONS (initial_scan = 'only') diff --git a/pkg/ccl/streamingccl/streamingest/testdata/simple b/pkg/ccl/streamingccl/streamingest/testdata/simple index 25d552b39a82..dff65a262e10 100644 --- a/pkg/ccl/streamingccl/streamingest/testdata/simple +++ b/pkg/ccl/streamingccl/streamingest/testdata/simple @@ -23,7 +23,7 @@ CREATE FUNCTION strip_host(s string) returns string language sql AS $$ select co query-sql as=destination-system SELECT strip_host(description) FROM [SHOW JOBS] WHERE job_type='STREAM INGESTION' ---- -CREATE VIRTUAL CLUSTER destination FROM REPLICATION OF source ON 'postgres://root@?sslcert=redacted&sslkey=redacted&sslmode=verify-full&sslrootcert=redacted' +CREATE VIRTUAL CLUSTER destination FROM REPLICATION OF source ON ('postgres://root@?sslcert=redacted&sslkey=redacted&sslmode=verify-full&sslrootcert=redacted') query-sql as=destination-system SELECT strip_host(source_cluster_uri) FROM [SHOW TENANT destination WITH REPLICATION STATUS] diff --git a/pkg/ccl/telemetryccl/telemetry_logging_test.go b/pkg/ccl/telemetryccl/telemetry_logging_test.go index 8a4b9fe654e1..d020f8c1c903 100644 --- a/pkg/ccl/telemetryccl/telemetry_logging_test.go +++ b/pkg/ccl/telemetryccl/telemetry_logging_test.go @@ -234,7 +234,7 @@ func TestBulkJobTelemetryLogging(t *testing.T) { query: fmt.Sprintf(`IMPORT INTO a CSV DATA ('%s') WITH detached`, srv.URL), sampleQueryEvent: expectedSampleQueryEvent{ eventType: "import", - stmt: fmt.Sprintf(`IMPORT INTO defaultdb.public.a CSV DATA ('%s') WITH detached`, srv.URL), + stmt: fmt.Sprintf(`IMPORT INTO defaultdb.public.a CSV DATA ('%s') WITH OPTIONS (detached)`, srv.URL), }, recoveryEvent: expectedRecoveryEvent{ numRows: 3, @@ -258,7 +258,7 @@ func TestBulkJobTelemetryLogging(t *testing.T) { query: fmt.Sprintf(`BACKUP DATABASE mydb INTO '%s' WITH detached`, nodelocal.MakeLocalStorageURI("test1")), sampleQueryEvent: expectedSampleQueryEvent{ eventType: "backup", - stmt: fmt.Sprintf(`BACKUP DATABASE mydb INTO '%s' WITH detached`, nodelocal.MakeLocalStorageURI("test1")), + stmt: fmt.Sprintf(`BACKUP DATABASE mydb INTO '%s' WITH OPTIONS (detached)`, nodelocal.MakeLocalStorageURI("test1")), }, recoveryEvent: expectedRecoveryEvent{ numRows: 3, @@ -282,7 +282,7 @@ func TestBulkJobTelemetryLogging(t *testing.T) { query: fmt.Sprintf(`RESTORE DATABASE mydb FROM LATEST IN '%s' WITH detached`, nodelocal.MakeLocalStorageURI("test1")), sampleQueryEvent: expectedSampleQueryEvent{ eventType: "restore", - stmt: fmt.Sprintf(`RESTORE DATABASE mydb FROM 'latest' IN '%s' WITH detached`, nodelocal.MakeLocalStorageURI("test1")), + stmt: fmt.Sprintf(`RESTORE DATABASE mydb FROM 'latest' IN '%s' WITH OPTIONS (detached)`, nodelocal.MakeLocalStorageURI("test1")), }, recoveryEvent: expectedRecoveryEvent{ numRows: 3, diff --git a/pkg/sql/importer/import_csv_mark_redaction_test.go b/pkg/sql/importer/import_csv_mark_redaction_test.go index a17337ed8f12..b2be497c4f74 100644 --- a/pkg/sql/importer/import_csv_mark_redaction_test.go +++ b/pkg/sql/importer/import_csv_mark_redaction_test.go @@ -30,8 +30,8 @@ func TestMarkRedactionCCLStatement(t *testing.T) { expected string }{ { - "IMPORT CSV 'file' WITH delimiter = 'foo'", - "IMPORT CSV ‹'file'› WITH delimiter = ‹'foo'›", + "IMPORT CSV 'file' WITH OPTIONS (delimiter = 'foo')", + "IMPORT CSV ‹'file'› WITH OPTIONS (delimiter = ‹'foo'›)", }, } diff --git a/pkg/sql/importer/import_stmt_test.go b/pkg/sql/importer/import_stmt_test.go index f95f5aa61987..c7f2257bcc7d 100644 --- a/pkg/sql/importer/import_stmt_test.go +++ b/pkg/sql/importer/import_stmt_test.go @@ -2254,7 +2254,7 @@ func TestImportCSVStmt(t *testing.T) { `CREATE TABLE t (a int8 primary key, b string, index (b), index (a, b))`, `IMPORT INTO t CSV DATA (%s) WITH delimiter = '|', comment = '#', nullif='', skip = '2'`, testFiles.filesWithOpts, - ` WITH comment = '#', delimiter = '|', "nullif" = '', skip = '2'`, + ` WITH OPTIONS (comment = '#', delimiter = '|', "nullif" = '', skip = '2')`, "", }, { @@ -2263,7 +2263,7 @@ func TestImportCSVStmt(t *testing.T) { `CREATE TABLE t (a int8 primary key, b string, index (b), index (a, b))`, `IMPORT INTO t CSV DATA (%s) WITH sstsize = '10K'`, testFiles.files, - ` WITH sstsize = '10K'`, + ` WITH OPTIONS (sstsize = '10K')`, "", }, { @@ -2287,7 +2287,7 @@ func TestImportCSVStmt(t *testing.T) { `CREATE TABLE t (a int8 primary key, b string, index (b), index (a, b))`, `IMPORT INTO t CSV DATA (%s) WITH decompress = 'auto'`, testFiles.files, - ` WITH decompress = 'auto'`, + ` WITH OPTIONS (decompress = 'auto')`, "", }, { @@ -2295,7 +2295,7 @@ func TestImportCSVStmt(t *testing.T) { `CREATE TABLE t (a int8 primary key, b string, index (b), index (a, b))`, `IMPORT INTO t CSV DATA (%s) WITH decompress = 'none'`, testFiles.files, - ` WITH decompress = 'none'`, + ` WITH OPTIONS (decompress = 'none')`, "", }, { @@ -2303,7 +2303,7 @@ func TestImportCSVStmt(t *testing.T) { `CREATE TABLE t (a int8 primary key, b string, index (b), index (a, b))`, `IMPORT INTO t CSV DATA (%s) WITH decompress = 'gzip'`, testFiles.gzipFiles, - ` WITH decompress = 'gzip'`, + ` WITH OPTIONS (decompress = 'gzip')`, "", }, { @@ -2311,7 +2311,7 @@ func TestImportCSVStmt(t *testing.T) { `CREATE TABLE t (a int8 primary key, b string, index (b), index (a, b))`, `IMPORT INTO t CSV DATA (%s) WITH decompress = 'auto'`, testFiles.bzipFiles, - ` WITH decompress = 'auto'`, + ` WITH OPTIONS (decompress = 'auto')`, "", }, { @@ -2327,7 +2327,7 @@ func TestImportCSVStmt(t *testing.T) { `CREATE TABLE t (a int8 primary key, b string, index (b), index (a, b))`, `IMPORT INTO t CSV DATA (%s) WITH decompress = 'bzip'`, testFiles.bzipFiles, - ` WITH decompress = 'bzip'`, + ` WITH OPTIONS (decompress = 'bzip')`, "", }, { @@ -2335,7 +2335,7 @@ func TestImportCSVStmt(t *testing.T) { `CREATE TABLE t (a int8 primary key, b string, index (b), index (a, b))`, `IMPORT INTO t CSV DATA (%s) WITH decompress = 'auto'`, testFiles.bzipFiles, - ` WITH decompress = 'auto'`, + ` WITH OPTIONS (decompress = 'auto')`, "", }, { @@ -2384,7 +2384,7 @@ func TestImportCSVStmt(t *testing.T) { `CREATE TABLE t (a int8 primary key, b string, index (b), index (a, b))`, `IMPORT INTO t CSV DATA (%s) WITH decompress = 'none'`, testFiles.gzipFiles, - ` WITH decompress = 'none'`, + ` WITH OPTIONS (decompress = 'none')`, // This returns different errors for `make test` and `make testrace` but // field is in both error messages. `field`, @@ -2394,7 +2394,7 @@ func TestImportCSVStmt(t *testing.T) { `CREATE TABLE t (a int8 primary key, b string, index (b), index (a, b))`, `IMPORT INTO t CSV DATA (%s) WITH decompress = 'gzip'`, testFiles.files, - ` WITH decompress = 'gzip'`, + ` WITH OPTIONS (decompress = 'gzip')`, "gzip: invalid header", }, { @@ -3177,7 +3177,7 @@ func TestImportIntoCSV(t *testing.T) { "import-into-with-opts", `IMPORT INTO t (a, b) CSV DATA (%s) WITH delimiter = '|', comment = '#', nullif='', skip = '2'`, testFiles.filesWithOpts, - ` WITH comment = '#', delimiter = '|', "nullif" = '', skip = '2'`, + ` WITH OPTIONS (comment = '#', delimiter = '|', "nullif" = '', skip = '2')`, "", }, { @@ -3185,7 +3185,7 @@ func TestImportIntoCSV(t *testing.T) { "import-into-sstsize", `IMPORT INTO t (a, b) CSV DATA (%s) WITH sstsize = '10K'`, testFiles.files, - ` WITH sstsize = '10K'`, + ` WITH OPTIONS (sstsize = '10K')`, "", }, { @@ -3206,28 +3206,28 @@ func TestImportIntoCSV(t *testing.T) { "import-into-auto-decompress", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'auto'`, testFiles.files, - ` WITH decompress = 'auto'`, + ` WITH OPTIONS (decompress = 'auto')`, "", }, { "import-into-no-decompress", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'none'`, testFiles.files, - ` WITH decompress = 'none'`, + ` WITH OPTIONS (decompress = 'none')`, "", }, { "import-into-explicit-gzip", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'gzip'`, testFiles.gzipFiles, - ` WITH decompress = 'gzip'`, + ` WITH OPTIONS (decompress = 'gzip')`, "", }, { "import-into-auto-gzip", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'auto'`, testFiles.gzipFiles, - ` WITH decompress = 'auto'`, + ` WITH OPTIONS (decompress = 'auto')`, "", }, { @@ -3241,14 +3241,14 @@ func TestImportIntoCSV(t *testing.T) { "import-into-explicit-bzip", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'bzip'`, testFiles.bzipFiles, - ` WITH decompress = 'bzip'`, + ` WITH OPTIONS (decompress = 'bzip')`, "", }, { "import-into-auto-bzip", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'auto'`, testFiles.bzipFiles, - ` WITH decompress = 'auto'`, + ` WITH OPTIONS (decompress = 'auto')`, "", }, { @@ -3262,21 +3262,21 @@ func TestImportIntoCSV(t *testing.T) { "import-into-no-decompress-wildcard", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'none'`, testFiles.filesUsingWildcard, - ` WITH decompress = 'none'`, + ` WITH OPTIONS (decompress = 'none')`, "", }, { "import-into-explicit-gzip-wildcard", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'gzip'`, testFiles.gzipFilesUsingWildcard, - ` WITH decompress = 'gzip'`, + ` WITH OPTIONS (decompress = 'gzip')`, "", }, { "import-into-auto-bzip-wildcard", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'auto'`, testFiles.gzipFilesUsingWildcard, - ` WITH decompress = 'auto'`, + ` WITH OPTIONS (decompress = 'auto')`, "", }, // NB: successes above, failures below, because we check the i-th job. @@ -3305,7 +3305,7 @@ func TestImportIntoCSV(t *testing.T) { "import-into-no-decompress-gzip", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'none'`, testFiles.gzipFiles, - ` WITH decompress = 'none'`, + ` WITH OPTIONS (decompress = 'none')`, // This returns different errors for `make test` and `make testrace` but // field is in both error messages. "field", @@ -3314,21 +3314,21 @@ func TestImportIntoCSV(t *testing.T) { "import-into-no-decompress-gzip", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'gzip'`, testFiles.files, - ` WITH decompress = 'gzip'`, + ` WITH OPTIONS (decompress = 'gzip')`, "gzip: invalid header", }, { "import-no-files-match-wildcard", `IMPORT INTO t (a, b) CSV DATA (%s) WITH decompress = 'auto'`, []string{`'nodelocal://1/data-[0-9][0-9]*'`}, - ` WITH decompress = 'auto'`, + ` WITH OPTIONS (decompress = 'auto')`, `pq: no files matched`, }, { "import-into-no-glob-wildcard", `IMPORT INTO t (a, b) CSV DATA (%s) WITH disable_glob_matching`, testFiles.filesUsingWildcard, - ` WITH disable_glob_matching`, + ` WITH OPTIONS (disable_glob_matching)`, "pq: (.+)no such file or directory: nodelocal storage file does not exist:", }, } { diff --git a/pkg/sql/parser/testdata/backup_restore b/pkg/sql/parser/testdata/backup_restore index c65222777778..ff4045613463 100644 --- a/pkg/sql/parser/testdata/backup_restore +++ b/pkg/sql/parser/testdata/backup_restore @@ -1,1030 +1,7 @@ -parse -BACKUP TABLE foo TO 'bar' ----- -BACKUP TABLE foo TO 'bar' -BACKUP TABLE (foo) TO ('bar') -- fully parenthesized -BACKUP TABLE foo TO '_' -- literals removed -BACKUP TABLE _ TO 'bar' -- identifiers removed - -parse -BACKUP foo TO 'bar' ----- -BACKUP TABLE foo TO 'bar' -- normalized! -BACKUP TABLE (foo) TO ('bar') -- fully parenthesized -BACKUP TABLE foo TO '_' -- literals removed -BACKUP TABLE _ TO 'bar' -- identifiers removed - -parse -BACKUP TO 'bar' ----- -BACKUP TO 'bar' -BACKUP TO ('bar') -- fully parenthesized -BACKUP TO '_' -- literals removed -BACKUP TO 'bar' -- identifiers removed - -parse -BACKUP role TO 'bar' ----- -BACKUP TABLE "role" TO 'bar' -- normalized! -BACKUP TABLE ("role") TO ('bar') -- fully parenthesized -BACKUP TABLE "role" TO '_' -- literals removed -BACKUP TABLE _ TO 'bar' -- identifiers removed - -parse -BACKUP TABLE foo INTO 'bar' ----- -BACKUP TABLE foo INTO 'bar' -BACKUP TABLE (foo) INTO ('bar') -- fully parenthesized -BACKUP TABLE foo INTO '_' -- literals removed -BACKUP TABLE _ INTO 'bar' -- identifiers removed - -parse -BACKUP TABLE foo INTO LATEST IN 'bar' ----- -BACKUP TABLE foo INTO LATEST IN 'bar' -BACKUP TABLE (foo) INTO LATEST IN ('bar') -- fully parenthesized -BACKUP TABLE foo INTO LATEST IN '_' -- literals removed -BACKUP TABLE _ INTO LATEST IN 'bar' -- identifiers removed - -parse -BACKUP TABLE foo INTO LATEST IN 'bar' WITH incremental_location = 'baz' ----- -BACKUP TABLE foo INTO LATEST IN 'bar' WITH incremental_location = 'baz' -BACKUP TABLE (foo) INTO LATEST IN ('bar') WITH incremental_location = ('baz') -- fully parenthesized -BACKUP TABLE foo INTO LATEST IN '_' WITH incremental_location = '_' -- literals removed -BACKUP TABLE _ INTO LATEST IN 'bar' WITH incremental_location = 'baz' -- identifiers removed - -parse -BACKUP TABLE foo INTO 'subdir' IN 'bar' ----- -BACKUP TABLE foo INTO 'subdir' IN 'bar' -BACKUP TABLE (foo) INTO ('subdir') IN ('bar') -- fully parenthesized -BACKUP TABLE foo INTO '_' IN '_' -- literals removed -BACKUP TABLE _ INTO 'subdir' IN 'bar' -- identifiers removed - -parse -BACKUP TABLE foo INTO $1 IN $2 ----- -BACKUP TABLE foo INTO $1 IN $2 -BACKUP TABLE (foo) INTO ($1) IN ($2) -- fully parenthesized -BACKUP TABLE foo INTO $1 IN $1 -- literals removed -BACKUP TABLE _ INTO $1 IN $2 -- identifiers removed - -parse -EXPLAIN BACKUP TABLE foo TO 'bar' ----- -EXPLAIN BACKUP TABLE foo TO 'bar' -EXPLAIN BACKUP TABLE (foo) TO ('bar') -- fully parenthesized -EXPLAIN BACKUP TABLE foo TO '_' -- literals removed -EXPLAIN BACKUP TABLE _ TO 'bar' -- identifiers removed - -parse -BACKUP TABLE foo.foo, baz.baz TO 'bar' ----- -BACKUP TABLE foo.foo, baz.baz TO 'bar' -BACKUP TABLE (foo.foo), (baz.baz) TO ('bar') -- fully parenthesized -BACKUP TABLE foo.foo, baz.baz TO '_' -- literals removed -BACKUP TABLE _._, _._ TO 'bar' -- identifiers removed - -parse -BACKUP foo.foo, baz.baz TO 'bar' ----- -BACKUP TABLE foo.foo, baz.baz TO 'bar' -- normalized! -BACKUP TABLE (foo.foo), (baz.baz) TO ('bar') -- fully parenthesized -BACKUP TABLE foo.foo, baz.baz TO '_' -- literals removed -BACKUP TABLE _._, _._ TO 'bar' -- identifiers removed - -parse -SHOW BACKUP 'bar' ----- -SHOW BACKUP 'bar' -SHOW BACKUP ('bar') -- fully parenthesized -SHOW BACKUP '_' -- literals removed -SHOW BACKUP 'bar' -- identifiers removed - -parse -SHOW BACKUP 'bar' WITH ENCRYPTION_PASSPHRASE = 'secret', CHECK_FILES ----- -SHOW BACKUP 'bar' WITH check_files, encryption_passphrase = '*****' -- normalized! -SHOW BACKUP ('bar') WITH check_files, encryption_passphrase = '*****' -- fully parenthesized -SHOW BACKUP '_' WITH check_files, encryption_passphrase = '*****' -- literals removed -SHOW BACKUP 'bar' WITH check_files, encryption_passphrase = '*****' -- identifiers removed -SHOW BACKUP 'bar' WITH check_files, encryption_passphrase = 'secret' -- passwords exposed - -parse -SHOW BACKUP FROM LATEST IN 'bar' WITH incremental_location = 'baz', skip size ----- -SHOW BACKUP FROM 'latest' IN 'bar' WITH incremental_location = 'baz', skip size -- normalized! -SHOW BACKUP FROM ('latest') IN ('bar') WITH incremental_location = ('baz'), skip size -- fully parenthesized -SHOW BACKUP FROM '_' IN '_' WITH incremental_location = '_', skip size -- literals removed -SHOW BACKUP FROM 'latest' IN 'bar' WITH incremental_location = 'baz', skip size -- identifiers removed - -parse -SHOW BACKUP FROM LATEST IN ('bar','bar1') WITH KMS = ('foo', 'bar'), incremental_location=('hi','hello') ----- -SHOW BACKUP FROM 'latest' IN ('bar', 'bar1') WITH incremental_location = ('hi', 'hello'), kms = ('foo', 'bar') -- normalized! -SHOW BACKUP FROM ('latest') IN (('bar'), ('bar1')) WITH incremental_location = (('hi'), ('hello')), kms = (('foo'), ('bar')) -- fully parenthesized -SHOW BACKUP FROM '_' IN ('_', '_') WITH incremental_location = ('_', '_'), kms = ('_', '_') -- literals removed -SHOW BACKUP FROM 'latest' IN ('bar', 'bar1') WITH incremental_location = ('hi', 'hello'), kms = ('foo', 'bar') -- identifiers removed - - -parse -EXPLAIN SHOW BACKUP 'bar' ----- -EXPLAIN SHOW BACKUP 'bar' -EXPLAIN SHOW BACKUP ('bar') -- fully parenthesized -EXPLAIN SHOW BACKUP '_' -- literals removed -EXPLAIN SHOW BACKUP 'bar' -- identifiers removed - -parse -SHOW BACKUP RANGES 'bar' ----- -SHOW BACKUP RANGES 'bar' -SHOW BACKUP RANGES ('bar') -- fully parenthesized -SHOW BACKUP RANGES '_' -- literals removed -SHOW BACKUP RANGES 'bar' -- identifiers removed - -parse -SHOW BACKUP FILES 'bar' ----- -SHOW BACKUP FILES 'bar' -SHOW BACKUP FILES ('bar') -- fully parenthesized -SHOW BACKUP FILES '_' -- literals removed -SHOW BACKUP FILES 'bar' -- identifiers removed - -parse -SHOW BACKUP CONNECTION 'bar' ----- -SHOW BACKUP CONNECTION 'bar' -SHOW BACKUP CONNECTION ('bar') -- fully parenthesized -SHOW BACKUP CONNECTION '_' -- literals removed -SHOW BACKUP CONNECTION 'bar' -- identifiers removed - -parse -SHOW BACKUP CONNECTION 'bar' WITH TRANSFER = '1KiB', TIME = '1h', CONCURRENTLY = 3 ----- -SHOW BACKUP CONNECTION 'bar' WITH CONCURRENTLY = 3, TIME = '1h', TRANSFER = '1KiB' -- normalized! -SHOW BACKUP CONNECTION ('bar') WITH CONCURRENTLY = (3), TIME = ('1h'), TRANSFER = ('1KiB') -- fully parenthesized -SHOW BACKUP CONNECTION '_' WITH CONCURRENTLY = _, TIME = '_', TRANSFER = '_' -- literals removed -SHOW BACKUP CONNECTION 'bar' WITH CONCURRENTLY = 3, TIME = '1h', TRANSFER = '1KiB' -- identifiers removed - -parse -SHOW BACKUP CONNECTION 'bar' WITH TRANSFER = $1, CONCURRENTLY = $2, TIME = $3 ----- -SHOW BACKUP CONNECTION 'bar' WITH CONCURRENTLY = $2, TIME = $3, TRANSFER = $1 -- normalized! -SHOW BACKUP CONNECTION ('bar') WITH CONCURRENTLY = ($2), TIME = ($3), TRANSFER = ($1) -- fully parenthesized -SHOW BACKUP CONNECTION '_' WITH CONCURRENTLY = $1, TIME = $1, TRANSFER = $1 -- literals removed -SHOW BACKUP CONNECTION 'bar' WITH CONCURRENTLY = $2, TIME = $3, TRANSFER = $1 -- identifiers removed - -parse -SHOW BACKUPS IN 'bar' ----- -SHOW BACKUPS IN 'bar' -SHOW BACKUPS IN ('bar') -- fully parenthesized -SHOW BACKUPS IN '_' -- literals removed -SHOW BACKUPS IN 'bar' -- identifiers removed - -parse -SHOW BACKUPS IN $1 ----- -SHOW BACKUPS IN $1 -SHOW BACKUPS IN ($1) -- fully parenthesized -SHOW BACKUPS IN $1 -- literals removed -SHOW BACKUPS IN $1 -- identifiers removed - -parse -SHOW BACKUP 'foo' IN 'bar' ----- -SHOW BACKUP 'foo' IN 'bar' -SHOW BACKUP ('foo') IN ('bar') -- fully parenthesized -SHOW BACKUP '_' IN '_' -- literals removed -SHOW BACKUP 'foo' IN 'bar' -- identifiers removed - -parse -SHOW BACKUP FROM $1 IN $2 WITH privileges ----- -SHOW BACKUP FROM $1 IN $2 WITH privileges -SHOW BACKUP FROM ($1) IN ($2) WITH privileges -- fully parenthesized -SHOW BACKUP FROM $1 IN $1 WITH privileges -- literals removed -SHOW BACKUP FROM $1 IN $2 WITH privileges -- identifiers removed - -parse -SHOW BACKUP FILES FROM 'foo' IN 'bar' ----- -SHOW BACKUP FILES FROM 'foo' IN 'bar' -SHOW BACKUP FILES FROM ('foo') IN ('bar') -- fully parenthesized -SHOW BACKUP FILES FROM '_' IN '_' -- literals removed -SHOW BACKUP FILES FROM 'foo' IN 'bar' -- identifiers removed - -parse -SHOW BACKUP RANGES FROM 'foo' IN 'bar' ----- -SHOW BACKUP RANGES FROM 'foo' IN 'bar' -SHOW BACKUP RANGES FROM ('foo') IN ('bar') -- fully parenthesized -SHOW BACKUP RANGES FROM '_' IN '_' -- literals removed -SHOW BACKUP RANGES FROM 'foo' IN 'bar' -- identifiers removed - -parse -SHOW BACKUP SCHEMAS FROM 'foo' IN 'bar' ----- -SHOW BACKUP SCHEMAS FROM 'foo' IN 'bar' -SHOW BACKUP SCHEMAS FROM ('foo') IN ('bar') -- fully parenthesized -SHOW BACKUP SCHEMAS FROM '_' IN '_' -- literals removed -SHOW BACKUP SCHEMAS FROM 'foo' IN 'bar' -- identifiers removed - -parse -SHOW BACKUP $1 IN $2 WITH ENCRYPTION_PASSPHRASE = 'secret', ENCRYPTION_INFO_DIR = 'long_live_backupper' ----- -SHOW BACKUP $1 IN $2 WITH encryption_passphrase = '*****', encryption_info_dir = 'long_live_backupper' -- normalized! -SHOW BACKUP ($1) IN ($2) WITH encryption_passphrase = '*****', encryption_info_dir = ('long_live_backupper') -- fully parenthesized -SHOW BACKUP $1 IN $1 WITH encryption_passphrase = '*****', encryption_info_dir = '_' -- literals removed -SHOW BACKUP $1 IN $2 WITH encryption_passphrase = '*****', encryption_info_dir = 'long_live_backupper' -- identifiers removed -SHOW BACKUP $1 IN $2 WITH encryption_passphrase = 'secret', encryption_info_dir = 'long_live_backupper' -- passwords exposed - -parse -BACKUP TABLE foo TO 'bar' AS OF SYSTEM TIME '1' INCREMENTAL FROM 'baz' ----- -BACKUP TABLE foo TO 'bar' AS OF SYSTEM TIME '1' INCREMENTAL FROM 'baz' -BACKUP TABLE (foo) TO ('bar') AS OF SYSTEM TIME ('1') INCREMENTAL FROM ('baz') -- fully parenthesized -BACKUP TABLE foo TO '_' AS OF SYSTEM TIME '_' INCREMENTAL FROM '_' -- literals removed -BACKUP TABLE _ TO 'bar' AS OF SYSTEM TIME '1' INCREMENTAL FROM 'baz' -- identifiers removed - -parse -BACKUP foo TO 'bar' AS OF SYSTEM TIME '1' INCREMENTAL FROM 'baz' ----- -BACKUP TABLE foo TO 'bar' AS OF SYSTEM TIME '1' INCREMENTAL FROM 'baz' -- normalized! -BACKUP TABLE (foo) TO ('bar') AS OF SYSTEM TIME ('1') INCREMENTAL FROM ('baz') -- fully parenthesized -BACKUP TABLE foo TO '_' AS OF SYSTEM TIME '_' INCREMENTAL FROM '_' -- literals removed -BACKUP TABLE _ TO 'bar' AS OF SYSTEM TIME '1' INCREMENTAL FROM 'baz' -- identifiers removed - -parse -BACKUP TABLE foo TO $1 INCREMENTAL FROM 'bar', $2, 'baz' ----- -BACKUP TABLE foo TO $1 INCREMENTAL FROM 'bar', $2, 'baz' -BACKUP TABLE (foo) TO ($1) INCREMENTAL FROM ('bar'), ($2), ('baz') -- fully parenthesized -BACKUP TABLE foo TO $1 INCREMENTAL FROM '_', $1, '_' -- literals removed -BACKUP TABLE _ TO $1 INCREMENTAL FROM 'bar', $2, 'baz' -- identifiers removed - -parse -BACKUP foo TO $1 INCREMENTAL FROM 'bar', $2, 'baz' ----- -BACKUP TABLE foo TO $1 INCREMENTAL FROM 'bar', $2, 'baz' -- normalized! -BACKUP TABLE (foo) TO ($1) INCREMENTAL FROM ('bar'), ($2), ('baz') -- fully parenthesized -BACKUP TABLE foo TO $1 INCREMENTAL FROM '_', $1, '_' -- literals removed -BACKUP TABLE _ TO $1 INCREMENTAL FROM 'bar', $2, 'baz' -- identifiers removed - -parse -BACKUP DATABASE foo TO 'bar' ----- -BACKUP DATABASE foo TO 'bar' -BACKUP DATABASE foo TO ('bar') -- fully parenthesized -BACKUP DATABASE foo TO '_' -- literals removed -BACKUP DATABASE _ TO 'bar' -- identifiers removed - -parse -BACKUP DATABASE foo TO ($1) ----- -BACKUP DATABASE foo TO $1 -- normalized! -BACKUP DATABASE foo TO ($1) -- fully parenthesized -BACKUP DATABASE foo TO $1 -- literals removed -BACKUP DATABASE _ TO $1 -- identifiers removed - -parse -EXPLAIN BACKUP DATABASE foo TO 'bar' ----- -EXPLAIN BACKUP DATABASE foo TO 'bar' -EXPLAIN BACKUP DATABASE foo TO ('bar') -- fully parenthesized -EXPLAIN BACKUP DATABASE foo TO '_' -- literals removed -EXPLAIN BACKUP DATABASE _ TO 'bar' -- identifiers removed - -parse -BACKUP DATABASE foo TO bar ----- -BACKUP DATABASE foo TO 'bar' -- normalized! -BACKUP DATABASE foo TO ('bar') -- fully parenthesized -BACKUP DATABASE foo TO '_' -- literals removed -BACKUP DATABASE _ TO 'bar' -- identifiers removed - - -parse -BACKUP DATABASE foo, baz TO 'bar' ----- -BACKUP DATABASE foo, baz TO 'bar' -BACKUP DATABASE foo, baz TO ('bar') -- fully parenthesized -BACKUP DATABASE foo, baz TO '_' -- literals removed -BACKUP DATABASE _, _ TO 'bar' -- identifiers removed - -parse -BACKUP DATABASE foo TO "bar.12" INCREMENTAL FROM "baz.34" ----- -BACKUP DATABASE foo TO 'bar.12' INCREMENTAL FROM 'baz.34' -- normalized! -BACKUP DATABASE foo TO ('bar.12') INCREMENTAL FROM ('baz.34') -- fully parenthesized -BACKUP DATABASE foo TO '_' INCREMENTAL FROM '_' -- literals removed -BACKUP DATABASE _ TO 'bar.12' INCREMENTAL FROM 'baz.34' -- identifiers removed - - -parse -BACKUP DATABASE foo TO 'bar' AS OF SYSTEM TIME '1' INCREMENTAL FROM 'baz' ----- -BACKUP DATABASE foo TO 'bar' AS OF SYSTEM TIME '1' INCREMENTAL FROM 'baz' -BACKUP DATABASE foo TO ('bar') AS OF SYSTEM TIME ('1') INCREMENTAL FROM ('baz') -- fully parenthesized -BACKUP DATABASE foo TO '_' AS OF SYSTEM TIME '_' INCREMENTAL FROM '_' -- literals removed -BACKUP DATABASE _ TO 'bar' AS OF SYSTEM TIME '1' INCREMENTAL FROM 'baz' -- identifiers removed - -parse -BACKUP DATABASE foo TO ($1, $2) ----- -BACKUP DATABASE foo TO ($1, $2) -BACKUP DATABASE foo TO (($1), ($2)) -- fully parenthesized -BACKUP DATABASE foo TO ($1, $1) -- literals removed -BACKUP DATABASE _ TO ($1, $2) -- identifiers removed - -parse -BACKUP DATABASE foo TO ($1, $2) INCREMENTAL FROM 'baz' ----- -BACKUP DATABASE foo TO ($1, $2) INCREMENTAL FROM 'baz' -BACKUP DATABASE foo TO (($1), ($2)) INCREMENTAL FROM ('baz') -- fully parenthesized -BACKUP DATABASE foo TO ($1, $1) INCREMENTAL FROM '_' -- literals removed -BACKUP DATABASE _ TO ($1, $2) INCREMENTAL FROM 'baz' -- identifiers removed - -parse -BACKUP foo TO 'bar' WITH ENCRYPTION_PASSPHRASE = 'secret', revision_history, execution locality = 'a=b' ----- -BACKUP TABLE foo TO 'bar' WITH revision_history = true, encryption_passphrase = '*****', execution locality = 'a=b' -- normalized! -BACKUP TABLE (foo) TO ('bar') WITH revision_history = (true), encryption_passphrase = '*****', execution locality = ('a=b') -- fully parenthesized -BACKUP TABLE foo TO '_' WITH revision_history = _, encryption_passphrase = '*****', execution locality = '_' -- literals removed -BACKUP TABLE _ TO 'bar' WITH revision_history = true, encryption_passphrase = '*****', execution locality = 'a=b' -- identifiers removed -BACKUP TABLE foo TO 'bar' WITH revision_history = true, encryption_passphrase = 'secret', execution locality = 'a=b' -- passwords exposed - -parse -BACKUP foo TO 'bar' WITH KMS = ('foo', 'bar'), revision_history ----- -BACKUP TABLE foo TO 'bar' WITH revision_history = true, kms = ('foo', 'bar') -- normalized! -BACKUP TABLE (foo) TO ('bar') WITH revision_history = (true), kms = (('foo'), ('bar')) -- fully parenthesized -BACKUP TABLE foo TO '_' WITH revision_history = _, kms = ('_', '_') -- literals removed -BACKUP TABLE _ TO 'bar' WITH revision_history = true, kms = ('foo', 'bar') -- identifiers removed - -parse -BACKUP foo TO 'bar' WITH OPTIONS (detached, ENCRYPTION_PASSPHRASE = 'secret', revision_history) ----- -BACKUP TABLE foo TO 'bar' WITH revision_history = true, encryption_passphrase = '*****', detached -- normalized! -BACKUP TABLE (foo) TO ('bar') WITH revision_history = (true), encryption_passphrase = '*****', detached -- fully parenthesized -BACKUP TABLE foo TO '_' WITH revision_history = _, encryption_passphrase = '*****', detached -- literals removed -BACKUP TABLE _ TO 'bar' WITH revision_history = true, encryption_passphrase = '*****', detached -- identifiers removed -BACKUP TABLE foo TO 'bar' WITH revision_history = true, encryption_passphrase = 'secret', detached -- passwords exposed - -parse -BACKUP foo TO 'bar' WITH OPTIONS (detached, KMS = ('foo', 'bar'), revision_history) ----- -BACKUP TABLE foo TO 'bar' WITH revision_history = true, detached, kms = ('foo', 'bar') -- normalized! -BACKUP TABLE (foo) TO ('bar') WITH revision_history = (true), detached, kms = (('foo'), ('bar')) -- fully parenthesized -BACKUP TABLE foo TO '_' WITH revision_history = _, detached, kms = ('_', '_') -- literals removed -BACKUP TABLE _ TO 'bar' WITH revision_history = true, detached, kms = ('foo', 'bar') -- identifiers removed - - -# Regression test for #95235. -parse -BACKUP foo TO 'bar' WITH OPTIONS (detached = false) ----- -BACKUP TABLE foo TO 'bar' WITH detached = FALSE -- normalized! -BACKUP TABLE (foo) TO ('bar') WITH detached = FALSE -- fully parenthesized -BACKUP TABLE foo TO '_' WITH detached = FALSE -- literals removed -BACKUP TABLE _ TO 'bar' WITH detached = FALSE -- identifiers removed - -parse -BACKUP VIRTUAL CLUSTER 36 TO 'bar' ----- -BACKUP VIRTUAL CLUSTER 36 TO 'bar' -BACKUP VIRTUAL CLUSTER 36 TO ('bar') -- fully parenthesized -BACKUP VIRTUAL CLUSTER _ TO '_' -- literals removed -BACKUP VIRTUAL CLUSTER 36 TO 'bar' -- identifiers removed - -parse -BACKUP TENANT 36 TO 'bar' ----- -BACKUP VIRTUAL CLUSTER 36 TO 'bar' -- normalized! -BACKUP VIRTUAL CLUSTER 36 TO ('bar') -- fully parenthesized -BACKUP VIRTUAL CLUSTER _ TO '_' -- literals removed -BACKUP VIRTUAL CLUSTER 36 TO 'bar' -- identifiers removed - -parse -RESTORE TABLE foo FROM 'bar' ----- -RESTORE TABLE foo FROM 'bar' -RESTORE TABLE (foo) FROM ('bar') -- fully parenthesized -RESTORE TABLE foo FROM '_' -- literals removed -RESTORE TABLE _ FROM 'bar' -- identifiers removed - -parse -EXPLAIN RESTORE TABLE foo FROM 'bar' ----- -EXPLAIN RESTORE TABLE foo FROM 'bar' -EXPLAIN RESTORE TABLE (foo) FROM ('bar') -- fully parenthesized -EXPLAIN RESTORE TABLE foo FROM '_' -- literals removed -EXPLAIN RESTORE TABLE _ FROM 'bar' -- identifiers removed - -parse -RESTORE foo FROM 'bar' ----- -RESTORE TABLE foo FROM 'bar' -- normalized! -RESTORE TABLE (foo) FROM ('bar') -- fully parenthesized -RESTORE TABLE foo FROM '_' -- literals removed -RESTORE TABLE _ FROM 'bar' -- identifiers removed - -parse -RESTORE TABLE foo FROM $1 ----- -RESTORE TABLE foo FROM $1 -RESTORE TABLE (foo) FROM ($1) -- fully parenthesized -RESTORE TABLE foo FROM $1 -- literals removed -RESTORE TABLE _ FROM $1 -- identifiers removed - - -parse -RESTORE foo FROM $1 ----- -RESTORE TABLE foo FROM $1 -- normalized! -RESTORE TABLE (foo) FROM ($1) -- fully parenthesized -RESTORE TABLE foo FROM $1 -- literals removed -RESTORE TABLE _ FROM $1 -- identifiers removed - -parse -RESTORE TABLE foo FROM $2 IN $1 ----- -RESTORE TABLE foo FROM $2 IN $1 -RESTORE TABLE (foo) FROM ($2) IN ($1) -- fully parenthesized -RESTORE TABLE foo FROM $1 IN $1 -- literals removed -RESTORE TABLE _ FROM $2 IN $1 -- identifiers removed - -parse -RESTORE TABLE foo FROM $1, $2, 'bar' ----- -RESTORE TABLE foo FROM $1, $2, 'bar' -RESTORE TABLE (foo) FROM ($1), ($2), ('bar') -- fully parenthesized -RESTORE TABLE foo FROM $1, $1, '_' -- literals removed -RESTORE TABLE _ FROM $1, $2, 'bar' -- identifiers removed - -parse -RESTORE foo FROM $1, $2, 'bar' ----- -RESTORE TABLE foo FROM $1, $2, 'bar' -- normalized! -RESTORE TABLE (foo) FROM ($1), ($2), ('bar') -- fully parenthesized -RESTORE TABLE foo FROM $1, $1, '_' -- literals removed -RESTORE TABLE _ FROM $1, $2, 'bar' -- identifiers removed - -parse -RESTORE TABLE foo FROM 'abc' IN $1, $2, 'bar' ----- -RESTORE TABLE foo FROM 'abc' IN $1, $2, 'bar' -RESTORE TABLE (foo) FROM ('abc') IN ($1), ($2), ('bar') -- fully parenthesized -RESTORE TABLE foo FROM '_' IN $1, $1, '_' -- literals removed -RESTORE TABLE _ FROM 'abc' IN $1, $2, 'bar' -- identifiers removed - -parse -RESTORE TABLE foo FROM $4 IN $1, $2, 'bar' ----- -RESTORE TABLE foo FROM $4 IN $1, $2, 'bar' -RESTORE TABLE (foo) FROM ($4) IN ($1), ($2), ('bar') -- fully parenthesized -RESTORE TABLE foo FROM $1 IN $1, $1, '_' -- literals removed -RESTORE TABLE _ FROM $4 IN $1, $2, 'bar' -- identifiers removed - -parse -RESTORE TABLE foo, baz FROM 'bar' ----- -RESTORE TABLE foo, baz FROM 'bar' -RESTORE TABLE (foo), (baz) FROM ('bar') -- fully parenthesized -RESTORE TABLE foo, baz FROM '_' -- literals removed -RESTORE TABLE _, _ FROM 'bar' -- identifiers removed - - -parse -RESTORE foo, baz FROM 'bar' ----- -RESTORE TABLE foo, baz FROM 'bar' -- normalized! -RESTORE TABLE (foo), (baz) FROM ('bar') -- fully parenthesized -RESTORE TABLE foo, baz FROM '_' -- literals removed -RESTORE TABLE _, _ FROM 'bar' -- identifiers removed - -parse -RESTORE TABLE foo, baz FROM 'bar' AS OF SYSTEM TIME '1' ----- -RESTORE TABLE foo, baz FROM 'bar' AS OF SYSTEM TIME '1' -RESTORE TABLE (foo), (baz) FROM ('bar') AS OF SYSTEM TIME ('1') -- fully parenthesized -RESTORE TABLE foo, baz FROM '_' AS OF SYSTEM TIME '_' -- literals removed -RESTORE TABLE _, _ FROM 'bar' AS OF SYSTEM TIME '1' -- identifiers removed - - -parse -RESTORE foo, baz FROM 'bar' AS OF SYSTEM TIME '1' ----- -RESTORE TABLE foo, baz FROM 'bar' AS OF SYSTEM TIME '1' -- normalized! -RESTORE TABLE (foo), (baz) FROM ('bar') AS OF SYSTEM TIME ('1') -- fully parenthesized -RESTORE TABLE foo, baz FROM '_' AS OF SYSTEM TIME '_' -- literals removed -RESTORE TABLE _, _ FROM 'bar' AS OF SYSTEM TIME '1' -- identifiers removed - -parse -RESTORE DATABASE foo FROM 'bar' ----- -RESTORE DATABASE foo FROM 'bar' -RESTORE DATABASE foo FROM ('bar') -- fully parenthesized -RESTORE DATABASE foo FROM '_' -- literals removed -RESTORE DATABASE _ FROM 'bar' -- identifiers removed - -parse -RESTORE DATABASE foo FROM ($1) ----- -RESTORE DATABASE foo FROM $1 -- normalized! -RESTORE DATABASE foo FROM ($1) -- fully parenthesized -RESTORE DATABASE foo FROM $1 -- literals removed -RESTORE DATABASE _ FROM $1 -- identifiers removed - -parse -EXPLAIN RESTORE DATABASE foo FROM 'bar' ----- -EXPLAIN RESTORE DATABASE foo FROM 'bar' -EXPLAIN RESTORE DATABASE foo FROM ('bar') -- fully parenthesized -EXPLAIN RESTORE DATABASE foo FROM '_' -- literals removed -EXPLAIN RESTORE DATABASE _ FROM 'bar' -- identifiers removed - -parse -RESTORE DATABASE foo FROM bar ----- -RESTORE DATABASE foo FROM 'bar' -- normalized! -RESTORE DATABASE foo FROM ('bar') -- fully parenthesized -RESTORE DATABASE foo FROM '_' -- literals removed -RESTORE DATABASE _ FROM 'bar' -- identifiers removed - - -parse -RESTORE DATABASE foo, baz FROM 'bar' ----- -RESTORE DATABASE foo, baz FROM 'bar' -RESTORE DATABASE foo, baz FROM ('bar') -- fully parenthesized -RESTORE DATABASE foo, baz FROM '_' -- literals removed -RESTORE DATABASE _, _ FROM 'bar' -- identifiers removed - -parse -RESTORE DATABASE foo FROM 'bar' WITH new_db_name = 'baz' ----- -RESTORE DATABASE foo FROM 'bar' WITH new_db_name = 'baz' -RESTORE DATABASE foo FROM ('bar') WITH new_db_name = ('baz') -- fully parenthesized -RESTORE DATABASE foo FROM '_' WITH new_db_name = '_' -- literals removed -RESTORE DATABASE _ FROM 'bar' WITH new_db_name = 'baz' -- identifiers removed - -parse -RESTORE DATABASE foo FROM 'bar' WITH schema_only ----- -RESTORE DATABASE foo FROM 'bar' WITH schema_only -RESTORE DATABASE foo FROM ('bar') WITH schema_only -- fully parenthesized -RESTORE DATABASE foo FROM '_' WITH schema_only -- literals removed -RESTORE DATABASE _ FROM 'bar' WITH schema_only -- identifiers removed - -parse -RESTORE DATABASE foo FROM 'bar' IN LATEST WITH incremental_location = 'baz' ----- -RESTORE DATABASE foo FROM 'bar' IN 'latest' WITH incremental_location = 'baz' -- normalized! -RESTORE DATABASE foo FROM ('bar') IN ('latest') WITH incremental_location = ('baz') -- fully parenthesized -RESTORE DATABASE foo FROM '_' IN '_' WITH incremental_location = '_' -- literals removed -RESTORE DATABASE _ FROM 'bar' IN 'latest' WITH incremental_location = 'baz' -- identifiers removed - -parse -RESTORE DATABASE foo, baz FROM 'bar' AS OF SYSTEM TIME '1' ----- -RESTORE DATABASE foo, baz FROM 'bar' AS OF SYSTEM TIME '1' -RESTORE DATABASE foo, baz FROM ('bar') AS OF SYSTEM TIME ('1') -- fully parenthesized -RESTORE DATABASE foo, baz FROM '_' AS OF SYSTEM TIME '_' -- literals removed -RESTORE DATABASE _, _ FROM 'bar' AS OF SYSTEM TIME '1' -- identifiers removed - -parse -RESTORE DATABASE foo FROM ($1, $2) ----- -RESTORE DATABASE foo FROM ($1, $2) -RESTORE DATABASE foo FROM (($1), ($2)) -- fully parenthesized -RESTORE DATABASE foo FROM ($1, $1) -- literals removed -RESTORE DATABASE _ FROM ($1, $2) -- identifiers removed - -parse -RESTORE DATABASE foo FROM ($1), ($2) ----- -RESTORE DATABASE foo FROM $1, $2 -- normalized! -RESTORE DATABASE foo FROM ($1), ($2) -- fully parenthesized -RESTORE DATABASE foo FROM $1, $1 -- literals removed -RESTORE DATABASE _ FROM $1, $2 -- identifiers removed - -parse -RESTORE DATABASE foo FROM ($1), ($2, $3) ----- -RESTORE DATABASE foo FROM $1, ($2, $3) -- normalized! -RESTORE DATABASE foo FROM ($1), (($2), ($3)) -- fully parenthesized -RESTORE DATABASE foo FROM $1, ($1, $1) -- literals removed -RESTORE DATABASE _ FROM $1, ($2, $3) -- identifiers removed - -parse -RESTORE DATABASE foo FROM ($1, $2), $3 ----- -RESTORE DATABASE foo FROM ($1, $2), $3 -RESTORE DATABASE foo FROM (($1), ($2)), ($3) -- fully parenthesized -RESTORE DATABASE foo FROM ($1, $1), $1 -- literals removed -RESTORE DATABASE _ FROM ($1, $2), $3 -- identifiers removed - -parse -RESTORE DATABASE foo FROM $1, ($2, $3) ----- -RESTORE DATABASE foo FROM $1, ($2, $3) -RESTORE DATABASE foo FROM ($1), (($2), ($3)) -- fully parenthesized -RESTORE DATABASE foo FROM $1, ($1, $1) -- literals removed -RESTORE DATABASE _ FROM $1, ($2, $3) -- identifiers removed - -parse -RESTORE DATABASE foo FROM ($1, $2), ($3, $4) ----- -RESTORE DATABASE foo FROM ($1, $2), ($3, $4) -RESTORE DATABASE foo FROM (($1), ($2)), (($3), ($4)) -- fully parenthesized -RESTORE DATABASE foo FROM ($1, $1), ($1, $1) -- literals removed -RESTORE DATABASE _ FROM ($1, $2), ($3, $4) -- identifiers removed - -parse -RESTORE DATABASE foo FROM ($1, $2), ($3, $4) AS OF SYSTEM TIME '1' ----- -RESTORE DATABASE foo FROM ($1, $2), ($3, $4) AS OF SYSTEM TIME '1' -RESTORE DATABASE foo FROM (($1), ($2)), (($3), ($4)) AS OF SYSTEM TIME ('1') -- fully parenthesized -RESTORE DATABASE foo FROM ($1, $1), ($1, $1) AS OF SYSTEM TIME '_' -- literals removed -RESTORE DATABASE _ FROM ($1, $2), ($3, $4) AS OF SYSTEM TIME '1' -- identifiers removed - -parse -RESTORE FROM ($1, $2) ----- -RESTORE FROM ($1, $2) -RESTORE FROM (($1), ($2)) -- fully parenthesized -RESTORE FROM ($1, $1) -- literals removed -RESTORE FROM ($1, $2) -- identifiers removed - -parse -RESTORE FROM ($1, $2), $3 ----- -RESTORE FROM ($1, $2), $3 -RESTORE FROM (($1), ($2)), ($3) -- fully parenthesized -RESTORE FROM ($1, $1), $1 -- literals removed -RESTORE FROM ($1, $2), $3 -- identifiers removed - -parse -RESTORE FROM $1, ($2, $3) ----- -RESTORE FROM $1, ($2, $3) -RESTORE FROM ($1), (($2), ($3)) -- fully parenthesized -RESTORE FROM $1, ($1, $1) -- literals removed -RESTORE FROM $1, ($2, $3) -- identifiers removed - -parse -RESTORE FROM ($1, $2), ($3, $4) ----- -RESTORE FROM ($1, $2), ($3, $4) -RESTORE FROM (($1), ($2)), (($3), ($4)) -- fully parenthesized -RESTORE FROM ($1, $1), ($1, $1) -- literals removed -RESTORE FROM ($1, $2), ($3, $4) -- identifiers removed - -parse -RESTORE FROM ($1, $2), ($3, $4) AS OF SYSTEM TIME '1' ----- -RESTORE FROM ($1, $2), ($3, $4) AS OF SYSTEM TIME '1' -RESTORE FROM (($1), ($2)), (($3), ($4)) AS OF SYSTEM TIME ('1') -- fully parenthesized -RESTORE FROM ($1, $1), ($1, $1) AS OF SYSTEM TIME '_' -- literals removed -RESTORE FROM ($1, $2), ($3, $4) AS OF SYSTEM TIME '1' -- identifiers removed - -parse -RESTORE FROM $1, $2, 'bar' ----- -RESTORE FROM $1, $2, 'bar' -RESTORE FROM ($1), ($2), ('bar') -- fully parenthesized -RESTORE FROM $1, $1, '_' -- literals removed -RESTORE FROM $1, $2, 'bar' -- identifiers removed - -parse -RESTORE FROM $4 IN $1, $2, 'bar' ----- -RESTORE FROM $4 IN $1, $2, 'bar' -RESTORE FROM ($4) IN ($1), ($2), ('bar') -- fully parenthesized -RESTORE FROM $1 IN $1, $1, '_' -- literals removed -RESTORE FROM $4 IN $1, $2, 'bar' -- identifiers removed - -parse -RESTORE FROM $4 IN $1, $2, 'bar' AS OF SYSTEM TIME '1' WITH skip_missing_foreign_keys ----- -RESTORE FROM $4 IN $1, $2, 'bar' AS OF SYSTEM TIME '1' WITH skip_missing_foreign_keys -RESTORE FROM ($4) IN ($1), ($2), ('bar') AS OF SYSTEM TIME ('1') WITH skip_missing_foreign_keys -- fully parenthesized -RESTORE FROM $1 IN $1, $1, '_' AS OF SYSTEM TIME '_' WITH skip_missing_foreign_keys -- literals removed -RESTORE FROM $4 IN $1, $2, 'bar' AS OF SYSTEM TIME '1' WITH skip_missing_foreign_keys -- identifiers removed - -parse -RESTORE abc.xzy FROM 'a' WITH into_db = 'foo', skip_missing_foreign_keys ----- -RESTORE TABLE abc.xzy FROM 'a' WITH into_db = 'foo', skip_missing_foreign_keys -- normalized! -RESTORE TABLE (abc.xzy) FROM ('a') WITH into_db = ('foo'), skip_missing_foreign_keys -- fully parenthesized -RESTORE TABLE abc.xzy FROM '_' WITH into_db = '_', skip_missing_foreign_keys -- literals removed -RESTORE TABLE _._ FROM 'a' WITH into_db = 'foo', skip_missing_foreign_keys -- identifiers removed - -parse -RESTORE FROM 'a' WITH into_db = 'foo', skip_missing_foreign_keys, skip_localities_check ----- -RESTORE FROM 'a' WITH into_db = 'foo', skip_missing_foreign_keys, skip_localities_check -RESTORE FROM ('a') WITH into_db = ('foo'), skip_missing_foreign_keys, skip_localities_check -- fully parenthesized -RESTORE FROM '_' WITH into_db = '_', skip_missing_foreign_keys, skip_localities_check -- literals removed -RESTORE FROM 'a' WITH into_db = 'foo', skip_missing_foreign_keys, skip_localities_check -- identifiers removed - -parse -RESTORE foo FROM 'bar' WITH OPTIONS (encryption_passphrase='secret', into_db='baz', debug_pause_on='error', -skip_missing_foreign_keys, skip_missing_sequences, skip_missing_sequence_owners, skip_missing_views, skip_missing_udfs, detached, skip_localities_check) ----- -RESTORE TABLE foo FROM 'bar' WITH encryption_passphrase = '*****', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_missing_udfs, detached, skip_localities_check -- normalized! -RESTORE TABLE (foo) FROM ('bar') WITH encryption_passphrase = '*****', into_db = ('baz'), debug_pause_on = ('error'), skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_missing_udfs, detached, skip_localities_check -- fully parenthesized -RESTORE TABLE foo FROM '_' WITH encryption_passphrase = '*****', into_db = '_', debug_pause_on = '_', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_missing_udfs, detached, skip_localities_check -- literals removed -RESTORE TABLE _ FROM 'bar' WITH encryption_passphrase = '*****', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_missing_udfs, detached, skip_localities_check -- identifiers removed -RESTORE TABLE foo FROM 'bar' WITH encryption_passphrase = 'secret', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_missing_udfs, detached, skip_localities_check -- passwords exposed - -parse -RESTORE foo FROM 'bar' WITH ENCRYPTION_PASSPHRASE = 'secret', INTO_DB=baz, DEBUG_PAUSE_ON='error', -SKIP_MISSING_FOREIGN_KEYS, SKIP_MISSING_SEQUENCES, SKIP_MISSING_SEQUENCE_OWNERS, SKIP_MISSING_VIEWS, SKIP_LOCALITIES_CHECK, SKIP_MISSING_UDFS ----- -RESTORE TABLE foo FROM 'bar' WITH encryption_passphrase = '*****', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_missing_udfs, skip_localities_check -- normalized! -RESTORE TABLE (foo) FROM ('bar') WITH encryption_passphrase = '*****', into_db = ('baz'), debug_pause_on = ('error'), skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_missing_udfs, skip_localities_check -- fully parenthesized -RESTORE TABLE foo FROM '_' WITH encryption_passphrase = '*****', into_db = '_', debug_pause_on = '_', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_missing_udfs, skip_localities_check -- literals removed -RESTORE TABLE _ FROM 'bar' WITH encryption_passphrase = '*****', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_missing_udfs, skip_localities_check -- identifiers removed -RESTORE TABLE foo FROM 'bar' WITH encryption_passphrase = 'secret', into_db = 'baz', debug_pause_on = 'error', skip_missing_foreign_keys, skip_missing_sequence_owners, skip_missing_sequences, skip_missing_views, skip_missing_udfs, skip_localities_check -- passwords exposed - -parse -RESTORE TENANT 36 FROM ($1, $2) AS OF SYSTEM TIME '1' ----- -RESTORE VIRTUAL CLUSTER 36 FROM ($1, $2) AS OF SYSTEM TIME '1' -- normalized! -RESTORE VIRTUAL CLUSTER 36 FROM (($1), ($2)) AS OF SYSTEM TIME ('1') -- fully parenthesized -RESTORE VIRTUAL CLUSTER _ FROM ($1, $1) AS OF SYSTEM TIME '_' -- literals removed -RESTORE VIRTUAL CLUSTER 36 FROM ($1, $2) AS OF SYSTEM TIME '1' -- identifiers removed - -parse -RESTORE TENANT 36 FROM ($1, $2) WITH virtual_cluster_name = 'tenant-5' ----- -RESTORE VIRTUAL CLUSTER 36 FROM ($1, $2) WITH virtual_cluster_name = 'tenant-5' -- normalized! -RESTORE VIRTUAL CLUSTER 36 FROM (($1), ($2)) WITH virtual_cluster_name = ('tenant-5') -- fully parenthesized -RESTORE VIRTUAL CLUSTER _ FROM ($1, $1) WITH virtual_cluster_name = '_' -- literals removed -RESTORE VIRTUAL CLUSTER 36 FROM ($1, $2) WITH virtual_cluster_name = 'tenant-5' -- identifiers removed - -parse -RESTORE TENANT 36 FROM ($1, $2) WITH tenant_name = 'tenant-5' ----- -RESTORE VIRTUAL CLUSTER 36 FROM ($1, $2) WITH virtual_cluster_name = 'tenant-5' -- normalized! -RESTORE VIRTUAL CLUSTER 36 FROM (($1), ($2)) WITH virtual_cluster_name = ('tenant-5') -- fully parenthesized -RESTORE VIRTUAL CLUSTER _ FROM ($1, $1) WITH virtual_cluster_name = '_' -- literals removed -RESTORE VIRTUAL CLUSTER 36 FROM ($1, $2) WITH virtual_cluster_name = 'tenant-5' -- identifiers removed - -parse -RESTORE TENANT 36 FROM ($1, $2) WITH virtual_cluster = '5' ----- -RESTORE VIRTUAL CLUSTER 36 FROM ($1, $2) WITH virtual_cluster = '5' -- normalized! -RESTORE VIRTUAL CLUSTER 36 FROM (($1), ($2)) WITH virtual_cluster = ('5') -- fully parenthesized -RESTORE VIRTUAL CLUSTER _ FROM ($1, $1) WITH virtual_cluster = '_' -- literals removed -RESTORE VIRTUAL CLUSTER 36 FROM ($1, $2) WITH virtual_cluster = '5' -- identifiers removed - -parse -RESTORE TENANT 36 FROM ($1, $2) WITH tenant = '5' ----- -RESTORE VIRTUAL CLUSTER 36 FROM ($1, $2) WITH virtual_cluster = '5' -- normalized! -RESTORE VIRTUAL CLUSTER 36 FROM (($1), ($2)) WITH virtual_cluster = ('5') -- fully parenthesized -RESTORE VIRTUAL CLUSTER _ FROM ($1, $1) WITH virtual_cluster = '_' -- literals removed -RESTORE VIRTUAL CLUSTER 36 FROM ($1, $2) WITH virtual_cluster = '5' -- identifiers removed - -parse -BACKUP TABLE foo TO 'bar' WITH revision_history, detached ----- -BACKUP TABLE foo TO 'bar' WITH revision_history = true, detached -- normalized! -BACKUP TABLE (foo) TO ('bar') WITH revision_history = (true), detached -- fully parenthesized -BACKUP TABLE foo TO '_' WITH revision_history = _, detached -- literals removed -BACKUP TABLE _ TO 'bar' WITH revision_history = true, detached -- identifiers removed - -parse -BACKUP TABLE foo TO 'bar' WITH revision_history = $1, detached, execution locality = $2 ----- -BACKUP TABLE foo TO 'bar' WITH revision_history = $1, detached, execution locality = $2 -BACKUP TABLE (foo) TO ('bar') WITH revision_history = ($1), detached, execution locality = ($2) -- fully parenthesized -BACKUP TABLE foo TO '_' WITH revision_history = $1, detached, execution locality = $1 -- literals removed -BACKUP TABLE _ TO 'bar' WITH revision_history = $1, detached, execution locality = $2 -- identifiers removed - -parse -RESTORE TABLE foo FROM 'bar' WITH skip_missing_foreign_keys, skip_missing_sequences, detached ----- -RESTORE TABLE foo FROM 'bar' WITH skip_missing_foreign_keys, skip_missing_sequences, detached -RESTORE TABLE (foo) FROM ('bar') WITH skip_missing_foreign_keys, skip_missing_sequences, detached -- fully parenthesized -RESTORE TABLE foo FROM '_' WITH skip_missing_foreign_keys, skip_missing_sequences, detached -- literals removed -RESTORE TABLE _ FROM 'bar' WITH skip_missing_foreign_keys, skip_missing_sequences, detached -- identifiers removed - -parse -BACKUP INTO 'bar' WITH include_all_virtual_clusters = $1, detached ----- -BACKUP INTO 'bar' WITH detached, include_all_virtual_clusters = $1 -- normalized! -BACKUP INTO ('bar') WITH detached, include_all_virtual_clusters = ($1) -- fully parenthesized -BACKUP INTO '_' WITH detached, include_all_virtual_clusters = $1 -- literals removed -BACKUP INTO 'bar' WITH detached, include_all_virtual_clusters = $1 -- identifiers removed - -parse -BACKUP INTO 'bar' WITH include_all_secondary_tenants = $1, detached ----- -BACKUP INTO 'bar' WITH detached, include_all_virtual_clusters = $1 -- normalized! -BACKUP INTO ('bar') WITH detached, include_all_virtual_clusters = ($1) -- fully parenthesized -BACKUP INTO '_' WITH detached, include_all_virtual_clusters = $1 -- literals removed -BACKUP INTO 'bar' WITH detached, include_all_virtual_clusters = $1 -- identifiers removed - -parse -BACKUP INTO 'bar' WITH include_all_virtual_clusters, detached ----- -BACKUP INTO 'bar' WITH detached, include_all_virtual_clusters = true -- normalized! -BACKUP INTO ('bar') WITH detached, include_all_virtual_clusters = (true) -- fully parenthesized -BACKUP INTO '_' WITH detached, include_all_virtual_clusters = _ -- literals removed -BACKUP INTO 'bar' WITH detached, include_all_virtual_clusters = true -- identifiers removed - -parse -BACKUP INTO 'bar' WITH include_all_secondary_tenants, detached ----- -BACKUP INTO 'bar' WITH detached, include_all_virtual_clusters = true -- normalized! -BACKUP INTO ('bar') WITH detached, include_all_virtual_clusters = (true) -- fully parenthesized -BACKUP INTO '_' WITH detached, include_all_virtual_clusters = _ -- literals removed -BACKUP INTO 'bar' WITH detached, include_all_virtual_clusters = true -- identifiers removed - -parse -RESTORE FROM LATEST IN 'bar' WITH include_all_virtual_clusters = $1, detached ----- -RESTORE FROM 'latest' IN 'bar' WITH detached, include_all_virtual_clusters = $1 -- normalized! -RESTORE FROM ('latest') IN ('bar') WITH detached, include_all_virtual_clusters = ($1) -- fully parenthesized -RESTORE FROM '_' IN '_' WITH detached, include_all_virtual_clusters = $1 -- literals removed -RESTORE FROM 'latest' IN 'bar' WITH detached, include_all_virtual_clusters = $1 -- identifiers removed - parse RESTORE FROM LATEST IN 'bar' WITH include_all_virtual_clusters = $1, execution locality = $2, detached ---- -RESTORE FROM 'latest' IN 'bar' WITH detached, include_all_virtual_clusters = $1, execution locality = $2 -- normalized! -RESTORE FROM ('latest') IN ('bar') WITH detached, include_all_virtual_clusters = ($1), execution locality = ($2) -- fully parenthesized -RESTORE FROM '_' IN '_' WITH detached, include_all_virtual_clusters = $1, execution locality = $1 -- literals removed -RESTORE FROM 'latest' IN 'bar' WITH detached, include_all_virtual_clusters = $1, execution locality = $2 -- identifiers removed - -parse -RESTORE FROM LATEST IN 'bar' WITH include_all_virtual_clusters, detached ----- -RESTORE FROM 'latest' IN 'bar' WITH detached, include_all_virtual_clusters = true -- normalized! -RESTORE FROM ('latest') IN ('bar') WITH detached, include_all_virtual_clusters = (true) -- fully parenthesized -RESTORE FROM '_' IN '_' WITH detached, include_all_virtual_clusters = _ -- literals removed -RESTORE FROM 'latest' IN 'bar' WITH detached, include_all_virtual_clusters = true -- identifiers removed - -parse -RESTORE FROM LATEST IN 'bar' WITH include_all_secondary_tenants, detached ----- -RESTORE FROM 'latest' IN 'bar' WITH detached, include_all_virtual_clusters = true -- normalized! -RESTORE FROM ('latest') IN ('bar') WITH detached, include_all_virtual_clusters = (true) -- fully parenthesized -RESTORE FROM '_' IN '_' WITH detached, include_all_virtual_clusters = _ -- literals removed -RESTORE FROM 'latest' IN 'bar' WITH detached, include_all_virtual_clusters = true -- identifiers removed - -parse -RESTORE FROM LATEST IN 'bar' WITH unsafe_restore_incompatible_version, execution locality = 'abc', detached ----- -RESTORE FROM 'latest' IN 'bar' WITH detached, unsafe_restore_incompatible_version, execution locality = 'abc' -- normalized! -RESTORE FROM ('latest') IN ('bar') WITH detached, unsafe_restore_incompatible_version, execution locality = ('abc') -- fully parenthesized -RESTORE FROM '_' IN '_' WITH detached, unsafe_restore_incompatible_version, execution locality = '_' -- literals removed -RESTORE FROM 'latest' IN 'bar' WITH detached, unsafe_restore_incompatible_version, execution locality = 'abc' -- identifiers removed - -error -BACKUP foo TO 'bar' WITH key1, key2 = 'value' ----- -at or near "key1": syntax error -DETAIL: source SQL: -BACKUP foo TO 'bar' WITH key1, key2 = 'value' - ^ -HINT: try \h BACKUP - -error -BACKUP foo TO 'bar' WITH revision_history, revision_history ----- -at or near "EOF": syntax error: revision_history option specified multiple times -DETAIL: source SQL: -BACKUP foo TO 'bar' WITH revision_history, revision_history - ^ - -error -BACKUP foo TO 'bar' WITH detached, revision_history, detached ----- -at or near "EOF": syntax error: detached option specified multiple times -DETAIL: source SQL: -BACKUP foo TO 'bar' WITH detached, revision_history, detached - ^ - -error -BACKUP foo TO 'bar' WITH revision_history=false, revision_history, detached ----- -at or near ",": syntax error: revision_history option specified multiple times -DETAIL: source SQL: -BACKUP foo TO 'bar' WITH revision_history=false, revision_history, detached - ^ - -error -BACKUP foo TO 'bar' WITH detached=true, revision_history, detached=true ----- -at or near "true": syntax error: detached option specified multiple times -DETAIL: source SQL: -BACKUP foo TO 'bar' WITH detached=true, revision_history, detached=true - ^ - -error -BACKUP TO 'bar' WITH include_all_virtual_clusters=false, include_all_secondary_tenants ----- -at or near "EOF": syntax error: include_all_virtual_clusters specified multiple times -DETAIL: source SQL: -BACKUP TO 'bar' WITH include_all_virtual_clusters=false, include_all_secondary_tenants - ^ - -error -BACKUP foo TO 'bar' WITH detached=$1, revision_history ----- -at or near "1": syntax error -DETAIL: source SQL: -BACKUP foo TO 'bar' WITH detached=$1, revision_history - ^ -HINT: try \h BACKUP - -error -RESTORE foo FROM 'bar' WITH key1, key2 = 'value' ----- -at or near "key1": syntax error -DETAIL: source SQL: -RESTORE foo FROM 'bar' WITH key1, key2 = 'value' - ^ -HINT: try \h RESTORE - -error -RESTORE foo FROM 'bar' WITH skip_missing_foreign_keys, skip_missing_foreign_keys ----- -at or near "skip_missing_foreign_keys": syntax error: skip_missing_foreign_keys specified multiple times -DETAIL: source SQL: -RESTORE foo FROM 'bar' WITH skip_missing_foreign_keys, skip_missing_foreign_keys - ^ - -error -RESTORE foo FROM 'bar' WITH skip_missing_sequences, skip_missing_views, skip_missing_sequences ----- -at or near "skip_missing_sequences": syntax error: skip_missing_sequences specified multiple times -DETAIL: source SQL: -RESTORE foo FROM 'bar' WITH skip_missing_sequences, skip_missing_views, skip_missing_sequences - ^ - -error -RESTORE foo FROM 'bar' WITH detached, skip_missing_views, detached ----- -at or near "detached": syntax error: detached option specified multiple times -DETAIL: source SQL: -RESTORE foo FROM 'bar' WITH detached, skip_missing_views, detached - ^ - -error -RESTORE foo FROM 'bar' WITH skip_missing_udfs, skip_missing_views, skip_missing_udfs ----- -at or near "skip_missing_udfs": syntax error: skip_missing_udfs specified multiple times -DETAIL: source SQL: -RESTORE foo FROM 'bar' WITH skip_missing_udfs, skip_missing_views, skip_missing_udfs - ^ - -error -RESTORE FROM 'bar' WITH include_all_virtual_clusters=false, include_all_secondary_tenants ----- -at or near "EOF": syntax error: include_all_virtual_clusters specified multiple times -DETAIL: source SQL: -RESTORE FROM 'bar' WITH include_all_virtual_clusters=false, include_all_secondary_tenants - ^ - -error -RESTORE FROM 'bar' WITH unsafe_restore_incompatible_version, unsafe_restore_incompatible_version ----- -at or near "unsafe_restore_incompatible_version": syntax error: unsafe_restore_incompatible_version specified multiple times -DETAIL: source SQL: -RESTORE FROM 'bar' WITH unsafe_restore_incompatible_version, unsafe_restore_incompatible_version - ^ - -error -BACKUP ROLE foo, bar TO 'baz' ----- -at or near "foo": syntax error -DETAIL: source SQL: -BACKUP ROLE foo, bar TO 'baz' - ^ -HINT: try \h BACKUP - -error -RESTORE ROLE foo, bar FROM 'baz' ----- -at or near "foo": syntax error -DETAIL: source SQL: -RESTORE ROLE foo, bar FROM 'baz' - ^ -HINT: try \h RESTORE - -# Regression test for #95612 -parse -BACKUP INTO LATEST IN UNLOGGED WITH OPTIONS ( DETACHED = FALSE ) ----- -BACKUP INTO LATEST IN 'unlogged' WITH detached = FALSE -- normalized! -BACKUP INTO LATEST IN ('unlogged') WITH detached = FALSE -- fully parenthesized -BACKUP INTO LATEST IN '_' WITH detached = FALSE -- literals removed -BACKUP INTO LATEST IN 'unlogged' WITH detached = FALSE -- identifiers removed +RESTORE FROM 'latest' IN 'bar' WITH OPTIONS (detached, include_all_virtual_clusters = $1, execution locality = $2) -- normalized! +RESTORE FROM ('latest') IN ('bar') WITH OPTIONS (detached, include_all_virtual_clusters = ($1), execution locality = ($2)) -- fully parenthesized +RESTORE FROM '_' IN '_' WITH OPTIONS (detached, include_all_virtual_clusters = $1, execution locality = $1) -- literals removed +RESTORE FROM 'latest' IN 'bar' WITH OPTIONS (detached, include_all_virtual_clusters = $1, execution locality = $2) -- identifiers removed diff --git a/pkg/sql/parser/testdata/changefeed b/pkg/sql/parser/testdata/changefeed index 96e3ccec16ba..284ce341fc18 100644 --- a/pkg/sql/parser/testdata/changefeed +++ b/pkg/sql/parser/testdata/changefeed @@ -62,10 +62,10 @@ CREATE CHANGEFEED FOR TABLE _ INTO 'sink' -- identifiers removed parse CREATE CHANGEFEED FOR TABLE foo INTO 'sink' WITH bar = 'baz' ---- -CREATE CHANGEFEED FOR TABLE foo INTO 'sink' WITH bar = 'baz' -CREATE CHANGEFEED FOR TABLE (foo) INTO ('sink') WITH bar = ('baz') -- fully parenthesized -CREATE CHANGEFEED FOR TABLE foo INTO '_' WITH bar = '_' -- literals removed -CREATE CHANGEFEED FOR TABLE _ INTO 'sink' WITH _ = 'baz' -- identifiers removed +CREATE CHANGEFEED FOR TABLE foo INTO 'sink' WITH OPTIONS (bar = 'baz') -- normalized! +CREATE CHANGEFEED FOR TABLE (foo) INTO ('sink') WITH OPTIONS (bar = ('baz')) -- fully parenthesized +CREATE CHANGEFEED FOR TABLE foo INTO '_' WITH OPTIONS (bar = '_') -- literals removed +CREATE CHANGEFEED FOR TABLE _ INTO 'sink' WITH OPTIONS (_ = 'baz') -- identifiers removed parse CREATE CHANGEFEED AS SELECT * FROM foo diff --git a/pkg/sql/parser/testdata/import_export b/pkg/sql/parser/testdata/import_export index f584c96ee4c5..a1863dd52b63 100644 --- a/pkg/sql/parser/testdata/import_export +++ b/pkg/sql/parser/testdata/import_export @@ -1,58 +1,66 @@ parse IMPORT TABLE foo FROM PGDUMPCREATE 'nodelocal://0/foo/bar' WITH temp = 'path/to/temp' ---- -IMPORT TABLE foo FROM PGDUMPCREATE 'nodelocal://0/foo/bar' WITH temp = 'path/to/temp' -IMPORT TABLE foo FROM PGDUMPCREATE ('nodelocal://0/foo/bar') WITH temp = ('path/to/temp') -- fully parenthesized -IMPORT TABLE foo FROM PGDUMPCREATE '_' WITH temp = '_' -- literals removed -IMPORT TABLE _ FROM PGDUMPCREATE 'nodelocal://0/foo/bar' WITH _ = 'path/to/temp' -- identifiers removed +IMPORT TABLE foo FROM PGDUMPCREATE 'nodelocal://0/foo/bar' WITH OPTIONS (temp = 'path/to/temp') -- normalized! +IMPORT TABLE foo FROM PGDUMPCREATE ('nodelocal://0/foo/bar') WITH OPTIONS (temp = ('path/to/temp')) -- fully parenthesized +IMPORT TABLE foo FROM PGDUMPCREATE '_' WITH OPTIONS (temp = '_') -- literals removed +IMPORT TABLE _ FROM PGDUMPCREATE 'nodelocal://0/foo/bar' WITH OPTIONS (_ = 'path/to/temp') -- identifiers removed parse IMPORT TABLE foo FROM PGDUMPCREATE ('nodelocal://0/foo/bar') WITH temp = 'path/to/temp' ---- -IMPORT TABLE foo FROM PGDUMPCREATE 'nodelocal://0/foo/bar' WITH temp = 'path/to/temp' -- normalized! -IMPORT TABLE foo FROM PGDUMPCREATE ('nodelocal://0/foo/bar') WITH temp = ('path/to/temp') -- fully parenthesized -IMPORT TABLE foo FROM PGDUMPCREATE '_' WITH temp = '_' -- literals removed -IMPORT TABLE _ FROM PGDUMPCREATE 'nodelocal://0/foo/bar' WITH _ = 'path/to/temp' -- identifiers removed +IMPORT TABLE foo FROM PGDUMPCREATE 'nodelocal://0/foo/bar' WITH OPTIONS (temp = 'path/to/temp') -- normalized! +IMPORT TABLE foo FROM PGDUMPCREATE ('nodelocal://0/foo/bar') WITH OPTIONS (temp = ('path/to/temp')) -- fully parenthesized +IMPORT TABLE foo FROM PGDUMPCREATE '_' WITH OPTIONS (temp = '_') -- literals removed +IMPORT TABLE _ FROM PGDUMPCREATE 'nodelocal://0/foo/bar' WITH OPTIONS (_ = 'path/to/temp') -- identifiers removed parse IMPORT INTO foo(id, email) CSV DATA ('path/to/some/file', $1) WITH temp = 'path/to/temp' ---- -IMPORT INTO foo(id, email) CSV DATA ('path/to/some/file', $1) WITH temp = 'path/to/temp' -IMPORT INTO foo(id, email) CSV DATA (('path/to/some/file'), ($1)) WITH temp = ('path/to/temp') -- fully parenthesized -IMPORT INTO foo(id, email) CSV DATA ('_', $1) WITH temp = '_' -- literals removed -IMPORT INTO _(_, _) CSV DATA ('path/to/some/file', $1) WITH _ = 'path/to/temp' -- identifiers removed +IMPORT INTO foo(id, email) CSV DATA ('path/to/some/file', $1) WITH OPTIONS (temp = 'path/to/temp') -- normalized! +IMPORT INTO foo(id, email) CSV DATA (('path/to/some/file'), ($1)) WITH OPTIONS (temp = ('path/to/temp')) -- fully parenthesized +IMPORT INTO foo(id, email) CSV DATA ('_', $1) WITH OPTIONS (temp = '_') -- literals removed +IMPORT INTO _(_, _) CSV DATA ('path/to/some/file', $1) WITH OPTIONS (_ = 'path/to/temp') -- identifiers removed parse IMPORT INTO foo CSV DATA ('path/to/some/file', $1) WITH temp = 'path/to/temp' ---- -IMPORT INTO foo CSV DATA ('path/to/some/file', $1) WITH temp = 'path/to/temp' -IMPORT INTO foo CSV DATA (('path/to/some/file'), ($1)) WITH temp = ('path/to/temp') -- fully parenthesized -IMPORT INTO foo CSV DATA ('_', $1) WITH temp = '_' -- literals removed -IMPORT INTO _ CSV DATA ('path/to/some/file', $1) WITH _ = 'path/to/temp' -- identifiers removed +IMPORT INTO foo CSV DATA ('path/to/some/file', $1) WITH OPTIONS (temp = 'path/to/temp') -- normalized! +IMPORT INTO foo CSV DATA (('path/to/some/file'), ($1)) WITH OPTIONS (temp = ('path/to/temp')) -- fully parenthesized +IMPORT INTO foo CSV DATA ('_', $1) WITH OPTIONS (temp = '_') -- literals removed +IMPORT INTO _ CSV DATA ('path/to/some/file', $1) WITH OPTIONS (_ = 'path/to/temp') -- identifiers removed parse IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH temp = 'path/to/temp' ---- -IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH temp = 'path/to/temp' -IMPORT PGDUMP ('nodelocal://0/foo/bar') WITH temp = ('path/to/temp') -- fully parenthesized -IMPORT PGDUMP '_' WITH temp = '_' -- literals removed -IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH _ = 'path/to/temp' -- identifiers removed +IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH OPTIONS (temp = 'path/to/temp') -- normalized! +IMPORT PGDUMP ('nodelocal://0/foo/bar') WITH OPTIONS (temp = ('path/to/temp')) -- fully parenthesized +IMPORT PGDUMP '_' WITH OPTIONS (temp = '_') -- literals removed +IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH OPTIONS (_ = 'path/to/temp') -- identifiers removed parse EXPLAIN IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH temp = 'path/to/temp' ---- -EXPLAIN IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH temp = 'path/to/temp' -EXPLAIN IMPORT PGDUMP ('nodelocal://0/foo/bar') WITH temp = ('path/to/temp') -- fully parenthesized -EXPLAIN IMPORT PGDUMP '_' WITH temp = '_' -- literals removed -EXPLAIN IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH _ = 'path/to/temp' -- identifiers removed +EXPLAIN IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH OPTIONS (temp = 'path/to/temp') -- normalized! +EXPLAIN IMPORT PGDUMP ('nodelocal://0/foo/bar') WITH OPTIONS (temp = ('path/to/temp')) -- fully parenthesized +EXPLAIN IMPORT PGDUMP '_' WITH OPTIONS (temp = '_') -- literals removed +EXPLAIN IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH OPTIONS (_ = 'path/to/temp') -- identifiers removed parse IMPORT PGDUMP ('nodelocal://0/foo/bar') WITH temp = 'path/to/temp' ---- -IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH temp = 'path/to/temp' -- normalized! -IMPORT PGDUMP ('nodelocal://0/foo/bar') WITH temp = ('path/to/temp') -- fully parenthesized -IMPORT PGDUMP '_' WITH temp = '_' -- literals removed -IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH _ = 'path/to/temp' -- identifiers removed +IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH OPTIONS (temp = 'path/to/temp') -- normalized! +IMPORT PGDUMP ('nodelocal://0/foo/bar') WITH OPTIONS (temp = ('path/to/temp')) -- fully parenthesized +IMPORT PGDUMP '_' WITH OPTIONS (temp = '_') -- literals removed +IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH OPTIONS (_ = 'path/to/temp') -- identifiers removed + +parse +IMPORT PGDUMP ('nodelocal://0/foo/bar') WITH OPTIONS (temp = 'path/to/temp') +---- +IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH OPTIONS (temp = 'path/to/temp') -- normalized! +IMPORT PGDUMP ('nodelocal://0/foo/bar') WITH OPTIONS (temp = ('path/to/temp')) -- fully parenthesized +IMPORT PGDUMP '_' WITH OPTIONS (temp = '_') -- literals removed +IMPORT PGDUMP 'nodelocal://0/foo/bar' WITH OPTIONS (_ = 'path/to/temp') -- identifiers removed parse EXPORT INTO CSV 'a' FROM TABLE a @@ -73,15 +81,15 @@ EXPORT INTO CSV 'a' FROM SELECT * FROM _ -- identifiers removed parse EXPORT INTO CSV 's3://my/path/%part%.csv' WITH delimiter = '|' FROM TABLE a ---- -EXPORT INTO CSV 's3://my/path/%part%.csv' WITH delimiter = '|' FROM TABLE a -EXPORT INTO CSV ('s3://my/path/%part%.csv') WITH delimiter = ('|') FROM TABLE a -- fully parenthesized -EXPORT INTO CSV '_' WITH delimiter = '_' FROM TABLE a -- literals removed -EXPORT INTO CSV 's3://my/path/%part%.csv' WITH _ = '|' FROM TABLE _ -- identifiers removed +EXPORT INTO CSV 's3://my/path/%part%.csv' WITH OPTIONS(delimiter = '|') FROM TABLE a -- normalized! +EXPORT INTO CSV ('s3://my/path/%part%.csv') WITH OPTIONS(delimiter = ('|')) FROM TABLE a -- fully parenthesized +EXPORT INTO CSV '_' WITH OPTIONS(delimiter = '_') FROM TABLE a -- literals removed +EXPORT INTO CSV 's3://my/path/%part%.csv' WITH OPTIONS(_ = '|') FROM TABLE _ -- identifiers removed parse EXPORT INTO CSV 's3://my/path/%part%.csv' WITH delimiter = '|' FROM SELECT a, sum(b) FROM c WHERE d = 1 ORDER BY sum(b) DESC LIMIT 10 ---- -EXPORT INTO CSV 's3://my/path/%part%.csv' WITH delimiter = '|' FROM SELECT a, sum(b) FROM c WHERE d = 1 ORDER BY sum(b) DESC LIMIT 10 -EXPORT INTO CSV ('s3://my/path/%part%.csv') WITH delimiter = ('|') FROM SELECT (a), (sum((b))) FROM c WHERE ((d) = (1)) ORDER BY (sum((b))) DESC LIMIT (10) -- fully parenthesized -EXPORT INTO CSV '_' WITH delimiter = '_' FROM SELECT a, sum(b) FROM c WHERE d = _ ORDER BY sum(b) DESC LIMIT _ -- literals removed -EXPORT INTO CSV 's3://my/path/%part%.csv' WITH _ = '|' FROM SELECT _, sum(_) FROM _ WHERE _ = 1 ORDER BY sum(_) DESC LIMIT 10 -- identifiers removed +EXPORT INTO CSV 's3://my/path/%part%.csv' WITH OPTIONS(delimiter = '|') FROM SELECT a, sum(b) FROM c WHERE d = 1 ORDER BY sum(b) DESC LIMIT 10 -- normalized! +EXPORT INTO CSV ('s3://my/path/%part%.csv') WITH OPTIONS(delimiter = ('|')) FROM SELECT (a), (sum((b))) FROM c WHERE ((d) = (1)) ORDER BY (sum((b))) DESC LIMIT (10) -- fully parenthesized +EXPORT INTO CSV '_' WITH OPTIONS(delimiter = '_') FROM SELECT a, sum(b) FROM c WHERE d = _ ORDER BY sum(b) DESC LIMIT _ -- literals removed +EXPORT INTO CSV 's3://my/path/%part%.csv' WITH OPTIONS(_ = '|') FROM SELECT _, sum(_) FROM _ WHERE _ = 1 ORDER BY sum(_) DESC LIMIT 10 -- identifiers removed diff --git a/pkg/sql/parser/testdata/prepared_stmts b/pkg/sql/parser/testdata/prepared_stmts index 0d280dbb1bee..8c62e696fc2e 100644 --- a/pkg/sql/parser/testdata/prepared_stmts +++ b/pkg/sql/parser/testdata/prepared_stmts @@ -265,18 +265,18 @@ PREPARE _ (INT8) AS RESUME JOBS SELECT $1 -- identifiers removed parse PREPARE a AS IMPORT INTO a CSV DATA ('c') WITH temp = 'd' ---- -PREPARE a AS IMPORT INTO a CSV DATA ('c') WITH temp = 'd' -PREPARE a AS IMPORT INTO a CSV DATA (('c')) WITH temp = ('d') -- fully parenthesized -PREPARE a AS IMPORT INTO a CSV DATA ('_') WITH temp = '_' -- literals removed -PREPARE _ AS IMPORT INTO _ CSV DATA ('c') WITH _ = 'd' -- identifiers removed +PREPARE a AS IMPORT INTO a CSV DATA ('c') WITH OPTIONS (temp = 'd') -- normalized! +PREPARE a AS IMPORT INTO a CSV DATA (('c')) WITH OPTIONS (temp = ('d')) -- fully parenthesized +PREPARE a AS IMPORT INTO a CSV DATA ('_') WITH OPTIONS (temp = '_') -- literals removed +PREPARE _ AS IMPORT INTO _ CSV DATA ('c') WITH OPTIONS (_ = 'd') -- identifiers removed parse PREPARE a (STRING, STRING, STRING) AS IMPORT INTO a CSV DATA ($2) WITH temp = $3 ---- -PREPARE a (STRING, STRING, STRING) AS IMPORT INTO a CSV DATA ($2) WITH temp = $3 -PREPARE a (STRING, STRING, STRING) AS IMPORT INTO a CSV DATA (($2)) WITH temp = ($3) -- fully parenthesized -PREPARE a (STRING, STRING, STRING) AS IMPORT INTO a CSV DATA ($1) WITH temp = $1 -- literals removed -PREPARE _ (STRING, STRING, STRING) AS IMPORT INTO _ CSV DATA ($2) WITH _ = $3 -- identifiers removed +PREPARE a (STRING, STRING, STRING) AS IMPORT INTO a CSV DATA ($2) WITH OPTIONS (temp = $3) -- normalized! +PREPARE a (STRING, STRING, STRING) AS IMPORT INTO a CSV DATA (($2)) WITH OPTIONS (temp = ($3)) -- fully parenthesized +PREPARE a (STRING, STRING, STRING) AS IMPORT INTO a CSV DATA ($1) WITH OPTIONS (temp = $1) -- literals removed +PREPARE _ (STRING, STRING, STRING) AS IMPORT INTO _ CSV DATA ($2) WITH OPTIONS (_ = $3) -- identifiers removed parse PREPARE a AS OPT PLAN 'some-string' diff --git a/pkg/sql/parser/testdata/show b/pkg/sql/parser/testdata/show index 96b2c84b3744..7af75e6bd06d 100644 --- a/pkg/sql/parser/testdata/show +++ b/pkg/sql/parser/testdata/show @@ -262,6 +262,14 @@ SHOW PUBLIC CLUSTER SETTINGS FOR VIRTUAL CLUSTER (123) -- fully parenthesized SHOW PUBLIC CLUSTER SETTINGS FOR VIRTUAL CLUSTER _ -- literals removed SHOW PUBLIC CLUSTER SETTINGS FOR VIRTUAL CLUSTER 123 -- identifiers removed +parse +SHOW PUBLIC CLUSTER SETTINGS FOR TENANT INTERVAL 'string' DAY TO HOUR +---- +SHOW PUBLIC CLUSTER SETTINGS FOR VIRTUAL CLUSTER ('string'::INTERVAL DAY TO HOUR) -- normalized! +SHOW PUBLIC CLUSTER SETTINGS FOR VIRTUAL CLUSTER ((('string')::INTERVAL DAY TO HOUR)) -- fully parenthesized +SHOW PUBLIC CLUSTER SETTINGS FOR VIRTUAL CLUSTER ('_'::INTERVAL DAY TO HOUR) -- literals removed +SHOW PUBLIC CLUSTER SETTINGS FOR VIRTUAL CLUSTER ('string'::INTERVAL DAY TO HOUR) -- identifiers removed + parse SHOW CLUSTER SETTINGS FOR VIRTUAL CLUSTER (1+1) ---- diff --git a/pkg/sql/sem/tree/backup.go b/pkg/sql/sem/tree/backup.go index b52b55517e63..7092d3fe26f3 100644 --- a/pkg/sql/sem/tree/backup.go +++ b/pkg/sql/sem/tree/backup.go @@ -111,8 +111,9 @@ func (node *Backup) Format(ctx *FmtCtx) { } if !node.Options.IsDefault() { - ctx.WriteString(" WITH ") + ctx.WriteString(" WITH OPTIONS (") ctx.FormatNode(&node.Options) + ctx.WriteString(")") } } @@ -198,8 +199,15 @@ func (node *Restore) Format(ctx *FmtCtx) { ctx.FormatNode(&node.AsOf) } if !node.Options.IsDefault() { - ctx.WriteString(" WITH ") - ctx.FormatNode(&node.Options) + if ctx.HasFlags(FmtHideConstants) { + ctx.WriteString(" WITH OPTIONS (") + ctx.FormatNode(&node.Options) + ctx.WriteString(")") + } else { + ctx.WriteString(" WITH OPTIONS (") + ctx.FormatNode(&node.Options) + ctx.WriteString(")") + } } } @@ -496,9 +504,15 @@ func (o *RestoreOptions) Format(ctx *FmtCtx) { } if o.ExecutionLocality != nil { - maybeAddSep() - ctx.WriteString("execution locality = ") - ctx.FormatNode(o.ExecutionLocality) + if ctx.HasFlags(FmtHideConstants) { + maybeAddSep() + ctx.WriteString("execution locality = ") + ctx.FormatNode(o.ExecutionLocality) + } else { + maybeAddSep() + ctx.WriteString("execution locality = ") + ctx.FormatNode(o.ExecutionLocality) + } } if o.ExperimentalOnline { diff --git a/pkg/sql/sem/tree/changefeed.go b/pkg/sql/sem/tree/changefeed.go index c9cfbad57ed0..4a1150d6a402 100644 --- a/pkg/sql/sem/tree/changefeed.go +++ b/pkg/sql/sem/tree/changefeed.go @@ -47,8 +47,9 @@ func (node *CreateChangefeed) Format(ctx *FmtCtx) { ctx.FormatNode(node.SinkURI) } if node.Options != nil { - ctx.WriteString(" WITH ") + ctx.WriteString(" WITH OPTIONS (") ctx.FormatNode(&node.Options) + ctx.WriteString(")") } } diff --git a/pkg/sql/sem/tree/create.go b/pkg/sql/sem/tree/create.go index 639449769359..41325b966e06 100644 --- a/pkg/sql/sem/tree/create.go +++ b/pkg/sql/sem/tree/create.go @@ -2255,7 +2255,15 @@ func (node *CreateTenantFromReplication) Format(ctx *FmtCtx) { ctx.WriteString(" FROM REPLICATION OF ") ctx.FormatNode(node.ReplicationSourceTenantName) ctx.WriteString(" ON ") + _, canOmitParentheses := node.ReplicationSourceAddress.(alreadyDelimitedAsSyntacticDExpr) + if !canOmitParentheses { + ctx.WriteByte('(') + } ctx.FormatNode(node.ReplicationSourceAddress) + if !canOmitParentheses { + ctx.WriteByte(')') + } + } if !node.Options.IsDefault() { ctx.WriteString(" WITH ") @@ -2267,7 +2275,14 @@ func (node *CreateTenantFromReplication) Format(ctx *FmtCtx) { func (o *TenantReplicationOptions) Format(ctx *FmtCtx) { if o.Retention != nil { ctx.WriteString("RETENTION = ") + _, canOmitParentheses := o.Retention.(alreadyDelimitedAsSyntacticDExpr) + if !canOmitParentheses { + ctx.WriteByte('(') + } ctx.FormatNode(o.Retention) + if !canOmitParentheses { + ctx.WriteByte(')') + } } } diff --git a/pkg/sql/sem/tree/export.go b/pkg/sql/sem/tree/export.go index e5c5541ceacd..9b592ff65ad6 100644 --- a/pkg/sql/sem/tree/export.go +++ b/pkg/sql/sem/tree/export.go @@ -27,8 +27,9 @@ func (node *Export) Format(ctx *FmtCtx) { ctx.WriteString(" ") ctx.FormatNode(node.File) if node.Options != nil { - ctx.WriteString(" WITH ") + ctx.WriteString(" WITH OPTIONS(") ctx.FormatNode(&node.Options) + ctx.WriteString(")") } ctx.WriteString(" FROM ") ctx.FormatNode(node.Query) diff --git a/pkg/sql/sem/tree/expr.go b/pkg/sql/sem/tree/expr.go index 7f34f2993082..35bf22aa51a1 100644 --- a/pkg/sql/sem/tree/expr.go +++ b/pkg/sql/sem/tree/expr.go @@ -800,11 +800,19 @@ func NewPlaceholder(name string) (*Placeholder, error) { // Format implements the NodeFormatter interface. func (node *Placeholder) Format(ctx *FmtCtx) { - if ctx.placeholderFormat != nil { - ctx.placeholderFormat(ctx, node) - return + if ctx.HasFlags(FmtHideConstants) { + if ctx.placeholderFormat != nil { + ctx.placeholderFormat(ctx, node) + return + } + ctx.Printf("$%d", node.Idx+1) + } else { + if ctx.placeholderFormat != nil { + ctx.placeholderFormat(ctx, node) + return + } + ctx.Printf("$%d", node.Idx+1) } - ctx.Printf("$%d", node.Idx+1) } // ResolvedType implements the TypedExpr interface. diff --git a/pkg/sql/sem/tree/import.go b/pkg/sql/sem/tree/import.go index ea3c76149cd5..a1510c9de11f 100644 --- a/pkg/sql/sem/tree/import.go +++ b/pkg/sql/sem/tree/import.go @@ -58,7 +58,8 @@ func (node *Import) Format(ctx *FmtCtx) { } if node.Options != nil { - ctx.WriteString(" WITH ") + ctx.WriteString(" WITH OPTIONS (") ctx.FormatNode(&node.Options) + ctx.WriteString(")") } } diff --git a/pkg/sql/sem/tree/show.go b/pkg/sql/sem/tree/show.go index c0fd34042436..97009b592e28 100644 --- a/pkg/sql/sem/tree/show.go +++ b/pkg/sql/sem/tree/show.go @@ -237,16 +237,16 @@ func (o *ShowBackupOptions) Format(ctx *FmtCtx) { ctx.WriteString("CONCURRENTLY = ") ctx.FormatNode(o.CheckConnectionConcurrency) } - if o.CheckConnectionDuration != nil { - maybeAddSep() - ctx.WriteString("TIME = ") - ctx.FormatNode(o.CheckConnectionDuration) - } if o.CheckConnectionTransferSize != nil { maybeAddSep() ctx.WriteString("TRANSFER = ") ctx.FormatNode(o.CheckConnectionTransferSize) } + if o.CheckConnectionDuration != nil { + maybeAddSep() + ctx.WriteString("TIME = ") + ctx.FormatNode(o.CheckConnectionDuration) + } } func (o ShowBackupOptions) IsDefault() bool { @@ -1125,8 +1125,9 @@ func (node *ShowTableStats) Format(ctx *FmtCtx) { ctx.WriteString("FOR TABLE ") ctx.FormatNode(node.Table) if len(node.Options) > 0 { - ctx.WriteString(" WITH ") + ctx.WriteString(" WITH OPTIONS (") ctx.FormatNode(&node.Options) + ctx.WriteString(")") } }