Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
26450: backupccl: support SHOW BACKUP [RANGES|FILES] r=mjibson,dt a=benesch

This makes debugging a broken backup substantially easier.

Release note (enterprise change): Introduce SHOW BACKUP RANGES and SHOW
BACKUP FILES, which show details about the ranges and files,
respectively, which comprise a backup.

Co-authored-by: Nikhil Benesch <[email protected]>
  • Loading branch information
craig[bot] and benesch committed Jun 6, 2018
2 parents 7c4dee1 + 74b0bf7 commit b1c30ff
Show file tree
Hide file tree
Showing 10 changed files with 383 additions and 182 deletions.
2 changes: 2 additions & 0 deletions docs/generated/sql/bnf/show_backup.bnf
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
show_backup_stmt ::=
'SHOW' 'BACKUP' location
| 'SHOW' 'BACKUP' 'RANGES' location
| 'SHOW' 'BACKUP' 'FILES' location
8 changes: 6 additions & 2 deletions docs/generated/sql/bnf/stmt_block.bnf
Original file line number Diff line number Diff line change
Expand Up @@ -440,6 +440,8 @@ use_stmt ::=

show_backup_stmt ::=
'SHOW' 'BACKUP' string_or_placeholder
| 'SHOW' 'BACKUP' 'RANGES' string_or_placeholder
| 'SHOW' 'BACKUP' 'FILES' string_or_placeholder

show_columns_stmt ::=
'SHOW' 'COLUMNS' 'FROM' table_name
Expand Down Expand Up @@ -680,6 +682,7 @@ unreserved_keyword ::=
| 'EXPERIMENTAL_REPLICA'
| 'EXPLAIN'
| 'EXPORT'
| 'FILES'
| 'FILTER'
| 'FIRST'
| 'FLOAT4'
Expand Down Expand Up @@ -756,6 +759,7 @@ unreserved_keyword ::=
| 'QUERIES'
| 'QUERY'
| 'RANGE'
| 'RANGES'
| 'READ'
| 'RECURSIVE'
| 'REF'
Expand Down Expand Up @@ -1769,7 +1773,7 @@ alter_table_cmd ::=
| 'ALTER' opt_column column_name 'DROP' 'STORED'
| 'DROP' opt_column 'IF' 'EXISTS' column_name opt_drop_behavior
| 'DROP' opt_column column_name opt_drop_behavior
| 'ALTER' opt_column column_name opt_set_data 'TYPE' typename opt_alter_column_collate opt_alter_column_using
| 'ALTER' opt_column column_name opt_set_data 'TYPE' typename opt_collate opt_alter_column_using
| 'ADD' table_constraint opt_validate_behavior
| 'VALIDATE' 'CONSTRAINT' constraint_name
| 'DROP' 'CONSTRAINT' 'IF' 'EXISTS' constraint_name opt_drop_behavior
Expand Down Expand Up @@ -1953,7 +1957,7 @@ opt_set_data ::=
'SET' 'DATA'
|

opt_alter_column_collate ::=
opt_collate ::=
'COLLATE' collation_name
|

Expand Down
91 changes: 0 additions & 91 deletions pkg/ccl/backupccl/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ import (
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/engine"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/interval"
"github.com/cockroachdb/cockroach/pkg/util/log"
Expand Down Expand Up @@ -1180,95 +1179,6 @@ func backupResumeHook(typ jobs.Type, settings *cluster.Settings) jobs.Resumer {
}
}

// showBackupPlanHook implements PlanHookFn.
func showBackupPlanHook(
ctx context.Context, stmt tree.Statement, p sql.PlanHookState,
) (sql.PlanHookRowFn, sqlbase.ResultColumns, []sql.PlanNode, error) {
backup, ok := stmt.(*tree.ShowBackup)
if !ok {
return nil, nil, nil, nil
}

if err := utilccl.CheckEnterpriseEnabled(
p.ExecCfg().Settings, p.ExecCfg().ClusterID(), p.ExecCfg().Organization(), "SHOW BACKUP",
); err != nil {
return nil, nil, nil, err
}

if err := p.RequireSuperUser(ctx, "SHOW BACKUP"); err != nil {
return nil, nil, nil, err
}

toFn, err := p.TypeAsString(backup.Path, "SHOW BACKUP")
if err != nil {
return nil, nil, nil, err
}
header := sqlbase.ResultColumns{
{Name: "database", Typ: types.String},
{Name: "table", Typ: types.String},
{Name: "start_time", Typ: types.Timestamp},
{Name: "end_time", Typ: types.Timestamp},
{Name: "size_bytes", Typ: types.Int},
{Name: "rows", Typ: types.Int},
}
fn := func(ctx context.Context, _ []sql.PlanNode, resultsCh chan<- tree.Datums) error {
// TODO(dan): Move this span into sql.
ctx, span := tracing.ChildSpan(ctx, stmt.StatementTag())
defer tracing.FinishSpan(span)

str, err := toFn()
if err != nil {
return err
}
desc, err := ReadBackupDescriptorFromURI(ctx, str, p.ExecCfg().Settings)
if err != nil {
return err
}
descs := make(map[sqlbase.ID]string)
for _, descriptor := range desc.Descriptors {
if database := descriptor.GetDatabase(); database != nil {
if _, ok := descs[database.ID]; !ok {
descs[database.ID] = database.Name
}
}
}
descSizes := make(map[sqlbase.ID]roachpb.BulkOpSummary)
for _, file := range desc.Files {
// TODO(dan): This assumes each file in the backup only contains
// data from a single table, which is usually but not always
// correct. It does not account for interleaved tables or if a
// BACKUP happened to catch a newly created table that hadn't yet
// been split into its own range.
_, tableID, err := encoding.DecodeUvarintAscending(file.Span.Key)
if err != nil {
continue
}
s := descSizes[sqlbase.ID(tableID)]
s.Add(file.EntryCounts)
descSizes[sqlbase.ID(tableID)] = s
}
start := tree.DNull
if desc.StartTime.WallTime != 0 {
start = tree.MakeDTimestamp(timeutil.Unix(0, desc.StartTime.WallTime), time.Nanosecond)
}
for _, descriptor := range desc.Descriptors {
if table := descriptor.GetTable(); table != nil {
dbName := descs[table.ParentID]
resultsCh <- tree.Datums{
tree.NewDString(dbName),
tree.NewDString(table.Name),
start,
tree.MakeDTimestamp(timeutil.Unix(0, desc.EndTime.WallTime), time.Nanosecond),
tree.NewDInt(tree.DInt(descSizes[table.ID].DataSize)),
tree.NewDInt(tree.DInt(descSizes[table.ID].Rows)),
}
}
}
return nil
}
return fn, header, nil, nil
}

type versionedValues struct {
Key roachpb.Key
Values []roachpb.Value
Expand Down Expand Up @@ -1320,6 +1230,5 @@ func getAllRevisions(

func init() {
sql.AddPlanHook(backupPlanHook)
sql.AddPlanHook(showBackupPlanHook)
jobs.AddResumeHook(backupResumeHook)
}
64 changes: 0 additions & 64 deletions pkg/ccl/backupccl/backup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (
"bytes"
"context"
gosql "database/sql"
"database/sql/driver"
"fmt"
"hash/crc32"
"io"
Expand Down Expand Up @@ -2448,69 +2447,6 @@ func TestRestoreDatabaseVersusTable(t *testing.T) {
})
}

func TestShowBackup(t *testing.T) {
defer leaktest.AfterTest(t)()

const numAccounts = 11
_, _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone)
defer cleanupFn()

full, inc := localFoo+"/full", localFoo+"/inc"

beforeFull := timeutil.Now()
sqlDB.Exec(t, `BACKUP data.bank TO $1`, full)

var unused driver.Value
var start, end *time.Time
var dataSize, rows uint64
sqlDB.QueryRow(t, `SELECT * FROM [SHOW BACKUP $1] WHERE "table" = 'bank'`, full).Scan(
&unused, &unused, &start, &end, &dataSize, &rows,
)
if start != nil {
t.Errorf("expected null start time on full backup, got %v", *start)
}
if !(*end).After(beforeFull) {
t.Errorf("expected now (%s) to be in (%s, %s)", beforeFull, start, end)
}
if dataSize <= 0 {
t.Errorf("expected dataSize to be >0 got : %d", dataSize)
}
if rows != numAccounts {
t.Errorf("expected %d got: %d", numAccounts, rows)
}

// Mess with half the rows.
affectedRows, err := sqlDB.Exec(t,
`UPDATE data.bank SET id = -1 * id WHERE id > $1`, numAccounts/2,
).RowsAffected()
if err != nil {
t.Fatal(err)
} else if affectedRows != numAccounts/2 {
t.Fatalf("expected to update %d rows, got %d", numAccounts/2, affectedRows)
}

beforeInc := timeutil.Now()
sqlDB.Exec(t, `BACKUP data.bank TO $1 INCREMENTAL FROM $2`, inc, full)

sqlDB.QueryRow(t, `SELECT * FROM [SHOW BACKUP $1] WHERE "table" = 'bank'`, inc).Scan(
&unused, &unused, &start, &end, &dataSize, &rows,
)
if start == nil {
t.Errorf("expected start time on inc backup, got %v", *start)
}
if !(*end).After(beforeInc) {
t.Errorf("expected now (%s) to be in (%s, %s)", beforeInc, start, end)
}
if dataSize <= 0 {
t.Errorf("expected dataSize to be >0 got : %d", dataSize)
}
// We added affectedRows and removed affectedRows, so there should be 2*
// affectedRows in the backup.
if expected := affectedRows * 2; rows != uint64(expected) {
t.Errorf("expected %d got: %d", expected, rows)
}
}

func TestBackupAzureAccountName(t *testing.T) {
defer leaktest.AfterTest(t)()

Expand Down
Loading

0 comments on commit b1c30ff

Please sign in to comment.