Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
40793: sql,sqlbase,client: bootstrap TableDescriptor timestamps from MVCC r=ajwerner a=ajwerner

This PR adds back the changes in cockroachdb#40581 and deals with the fallout that led to cockroachdb#40781 in the 3rd commit.

Release Justification: Fixes a bug formerly considered to be a release blocker. 


Co-authored-by: Andrew Werner <[email protected]>
  • Loading branch information
craig[bot] and ajwerner committed Sep 17, 2019
2 parents faaa699 + 05a000a commit 0c6fcf0
Show file tree
Hide file tree
Showing 79 changed files with 861 additions and 230 deletions.
2 changes: 1 addition & 1 deletion docs/generated/settings/settings.html
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,6 @@
<tr><td><code>trace.debug.enable</code></td><td>boolean</td><td><code>false</code></td><td>if set, traces for recent requests can be seen in the /debug page</td></tr>
<tr><td><code>trace.lightstep.token</code></td><td>string</td><td><code></code></td><td>if set, traces go to Lightstep using this token</td></tr>
<tr><td><code>trace.zipkin.collector</code></td><td>string</td><td><code></code></td><td>if set, traces go to the given Zipkin instance (example: '127.0.0.1:9411'); ignored if trace.lightstep.token is set</td></tr>
<tr><td><code>version</code></td><td>custom validation</td><td><code>19.1-9</code></td><td>set the active cluster version in the format '<major>.<minor>'</td></tr>
<tr><td><code>version</code></td><td>custom validation</td><td><code>19.1-10</code></td><td>set the active cluster version in the format '<major>.<minor>'</td></tr>
</tbody>
</table>
43 changes: 32 additions & 11 deletions pkg/ccl/backupccl/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,23 @@ func readBackupDescriptor(
if err := protoutil.Unmarshal(descBytes, &backupDesc); err != nil {
return BackupDescriptor{}, err
}
for _, d := range backupDesc.Descriptors {
// Calls to GetTable are generally frowned upon.
// This specific call exists to provide backwards compatibility with
// backups created prior to version 19.1. Starting in v19.1 the
// ModificationTime is always written in backups for all versions
// of table descriptors. In earlier cockroach versions only later
// table descriptor versions contain a non-empty ModificationTime.
// Later versions of CockroachDB use the MVCC timestamp to fill in
// the ModificationTime for table descriptors. When performing a restore
// we no longer have access to that MVCC timestamp but we can set it
// to a value we know will be safe.
if t := d.GetTable(); t == nil {
continue
} else if t.Version == 1 && t.ModificationTime.IsEmpty() {
t.ModificationTime = hlc.Timestamp{WallTime: 1}
}
}
return backupDesc, err
}

Expand Down Expand Up @@ -178,7 +195,7 @@ func getRelevantDescChanges(
// obviously interesting to our backup.
for _, i := range descs {
interestingIDs[i.GetID()] = struct{}{}
if t := i.GetTable(); t != nil {
if t := i.Table(hlc.Timestamp{}); t != nil {
for j := t.ReplacementOf.ID; j != sqlbase.InvalidID; j = priorIDs[j] {
interestingIDs[j] = struct{}{}
}
Expand All @@ -200,7 +217,7 @@ func getRelevantDescChanges(
return nil, err
}
for _, i := range starting {
if table := i.GetTable(); table != nil {
if table := i.Table(hlc.Timestamp{}); table != nil {
// We need to add to interestingIDs so that if we later see a delete for
// this ID we still know it is interesting to us, even though we will not
// have a parentID at that point (since the delete is a nil desc).
Expand Down Expand Up @@ -231,7 +248,7 @@ func getRelevantDescChanges(
if _, ok := interestingIDs[change.ID]; ok {
interestingChanges = append(interestingChanges, change)
} else if change.Desc != nil {
if table := change.Desc.GetTable(); table != nil {
if table := change.Desc.Table(hlc.Timestamp{}); table != nil {
if _, ok := interestingParents[table.ParentID]; ok {
interestingIDs[table.ID] = struct{}{}
interestingChanges = append(interestingChanges, change)
Expand Down Expand Up @@ -279,7 +296,8 @@ func getAllDescChanges(
return nil, err
}
r.Desc = &desc
if t := desc.GetTable(); t != nil && t.ReplacementOf.ID != sqlbase.InvalidID {
t := desc.Table(rev.Timestamp)
if t != nil && t.ReplacementOf.ID != sqlbase.InvalidID {
priorIDs[t.ID] = t.ReplacementOf.ID
}
}
Expand All @@ -303,6 +321,9 @@ func allSQLDescriptors(ctx context.Context, txn *client.Txn) ([]sqlbase.Descript
return nil, errors.NewAssertionErrorWithWrappedErrf(err,
"%s: unable to unmarshal SQL descriptor", row.Key)
}
if row.Value != nil {
sqlDescs[i].Table(row.Value.Timestamp)
}
}
return sqlDescs, nil
}
Expand Down Expand Up @@ -379,7 +400,7 @@ func spansForAllTableIndexes(
// in them that we didn't already get above e.g. indexes or tables that are
// not in latest because they were dropped during the time window in question.
for _, rev := range revs {
if tbl := rev.Desc.GetTable(); tbl != nil {
if tbl := rev.Desc.Table(hlc.Timestamp{}); tbl != nil {
for _, idx := range tbl.AllNonDropIndexes() {
key := tableAndIndex{tableID: tbl.ID, indexID: idx.ID}
if !added[key] {
Expand Down Expand Up @@ -1016,7 +1037,7 @@ func backupPlanHook(
return err
}
}
if tableDesc := desc.GetTable(); tableDesc != nil {
if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil {
if err := p.CheckPrivilege(ctx, tableDesc, privilege.SELECT); err != nil {
return err
}
Expand Down Expand Up @@ -1083,7 +1104,7 @@ func backupPlanHook(
tablesInPrev := make(map[sqlbase.ID]struct{})
dbsInPrev := make(map[sqlbase.ID]struct{})
for _, d := range prevBackups[len(prevBackups)-1].Descriptors {
if t := d.GetTable(); t != nil {
if t := d.Table(hlc.Timestamp{}); t != nil {
tablesInPrev[t.ID] = struct{}{}
}
}
Expand All @@ -1092,7 +1113,7 @@ func backupPlanHook(
}

for _, d := range targetDescs {
if t := d.GetTable(); t != nil {
if t := d.Table(hlc.Timestamp{}); t != nil {
// If we're trying to use a previous backup for this table, ideally it
// actually contains this table.
if _, ok := tablesInPrev[t.ID]; ok {
Expand Down Expand Up @@ -1504,7 +1525,7 @@ func maybeDowngradeTableDescsInBackupDescriptor(
// Copy Descriptors so we can return a shallow copy without mutating the slice.
copy(backupDescCopy.Descriptors, backupDesc.Descriptors)
for i := range backupDesc.Descriptors {
if tableDesc := backupDesc.Descriptors[i].GetTable(); tableDesc != nil {
if tableDesc := backupDesc.Descriptors[i].Table(hlc.Timestamp{}); tableDesc != nil {
downgraded, newDesc, err := tableDesc.MaybeDowngradeForeignKeyRepresentation(ctx, settings)
if err != nil {
return nil, err
Expand Down Expand Up @@ -1534,7 +1555,7 @@ func maybeUpgradeTableDescsInBackupDescriptors(
// descriptors so that they can be looked up.
for _, backupDesc := range backupDescs {
for _, desc := range backupDesc.Descriptors {
if table := desc.GetTable(); table != nil {
if table := desc.Table(hlc.Timestamp{}); table != nil {
protoGetter.Protos[string(sqlbase.MakeDescMetadataKey(table.ID))] =
sqlbase.WrapDescriptor(protoutil.Clone(table).(*sqlbase.TableDescriptor))
}
Expand All @@ -1544,7 +1565,7 @@ func maybeUpgradeTableDescsInBackupDescriptors(
for i := range backupDescs {
backupDesc := &backupDescs[i]
for j := range backupDesc.Descriptors {
if table := backupDesc.Descriptors[j].GetTable(); table != nil {
if table := backupDesc.Descriptors[j].Table(hlc.Timestamp{}); table != nil {
if _, err := table.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, skipFKsWithNoMatchingTable); err != nil {
return err
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/ccl/backupccl/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ func loadSQLDescsFromBackupsAtTime(

allDescs := make([]sqlbase.Descriptor, 0, len(byID))
for _, desc := range byID {
if t := desc.GetTable(); t != nil {
if t := desc.Table(hlc.Timestamp{}); t != nil {
// A table revisions may have been captured before it was in a DB that is
// backed up -- if the DB is missing, filter the table.
if byID[t.ParentID] == nil {
Expand Down Expand Up @@ -212,7 +212,7 @@ func selectTargets(

seenTable := false
for _, desc := range matched.descs {
if desc.GetTable() != nil {
if desc.Table(hlc.Timestamp{}) != nil {
seenTable = true
break
}
Expand Down Expand Up @@ -1534,7 +1534,7 @@ func doRestorePlan(
for _, desc := range sqlDescs {
if dbDesc := desc.GetDatabase(); dbDesc != nil {
databasesByID[dbDesc.ID] = dbDesc
} else if tableDesc := desc.GetTable(); tableDesc != nil {
} else if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil {
tablesByID[tableDesc.ID] = tableDesc
}
}
Expand Down Expand Up @@ -1686,7 +1686,7 @@ func createImportingTables(
var tables []*sqlbase.TableDescriptor
var oldTableIDs []sqlbase.ID
for _, desc := range sqlDescs {
if tableDesc := desc.GetTable(); tableDesc != nil {
if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil {
tables = append(tables, tableDesc)
oldTableIDs = append(oldTableIDs, tableDesc.ID)
}
Expand Down
86 changes: 86 additions & 0 deletions pkg/ccl/backupccl/restore_old_versions_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
// Copyright 2019 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt

package backupccl_test

import (
"io/ioutil"
"os"
"path/filepath"
"testing"

"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/stretchr/testify/require"
)

// TestRestoreOldVersions ensures that we can successfully restore tables
// and databases exported by old version.
//
// The files being restored live in testdata and are all made from the same
// input SQL which lives in <testdataBase>/create.sql.
//
// The SSTs were created via the following commands:
//
// VERSION=...
// roachprod wipe local
// roachprod stage local release ${VERSION}
// roachprod start local
// # If the version is v1.0.7 then you need to enable enterprise with the
// # enterprise.enabled cluster setting.
// roachprod sql local:1 -- -e "$(cat pkg/ccl/backupccl/testdata/restore_old_versions/create.sql)"
// # Create an S3 bucket to store the backup.
// roachprod sql local:1 -- -e "BACKUP DATABASE test TO 's3://<bucket-name>/${VERSION}?AWS_ACCESS_KEY_ID=<...>&AWS_SECRET_ACCESS_KEY=<...>'"
// # Then download the backup from s3 and plop the files into the appropriate
// # testdata directory.
//
func TestRestoreOldVersions(t *testing.T) {
defer leaktest.AfterTest(t)()
const (
testdataBase = "testdata/restore_old_versions"
exportDirs = testdataBase + "/exports"
)
dirs, err := ioutil.ReadDir(exportDirs)
require.NoError(t, err)
for _, dir := range dirs {
require.True(t, dir.IsDir())
exportDir, err := filepath.Abs(filepath.Join(exportDirs, dir.Name()))
require.NoError(t, err)
t.Run(dir.Name(), restoreOldVersionTest(exportDir))
}
}

func restoreOldVersionTest(exportDir string) func(t *testing.T) {
return func(t *testing.T) {
params := base.TestServerArgs{}
const numAccounts = 1000
_, _, sqlDB, dir, cleanup := backupRestoreTestSetupWithParams(t, singleNode, numAccounts,
initNone, base.TestClusterArgs{ServerArgs: params})
defer cleanup()
err := os.Symlink(exportDir, filepath.Join(dir, "foo"))
require.NoError(t, err)
sqlDB.Exec(t, `CREATE DATABASE test`)
var unused string
var importedRows int
sqlDB.QueryRow(t, `RESTORE test.* FROM $1`, localFoo).Scan(
&unused, &unused, &unused, &importedRows, &unused, &unused, &unused,
)
const totalRows = 12
if importedRows != totalRows {
t.Fatalf("expected %d rows, got %d", totalRows, importedRows)
}
results := [][]string{
{"1", "1", "1"},
{"2", "2", "2"},
{"3", "3", "3"},
}
sqlDB.CheckQueryResults(t, `SELECT * FROM test.t1 ORDER BY k`, results)
sqlDB.CheckQueryResults(t, `SELECT * FROM test.t2 ORDER BY k`, results)
sqlDB.CheckQueryResults(t, `SELECT * FROM test.t4 ORDER BY k`, results)
}
}
3 changes: 2 additions & 1 deletion pkg/ccl/backupccl/show.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
)
Expand Down Expand Up @@ -145,7 +146,7 @@ func backupShowerDefault(ctx context.Context, showSchemas bool) backupShower {
var rows []tree.Datums
var row tree.Datums
for _, descriptor := range desc.Descriptors {
if table := descriptor.GetTable(); table != nil {
if table := descriptor.Table(hlc.Timestamp{}); table != nil {
dbName := descs[table.ParentID]
row = tree.Datums{
tree.NewDString(dbName),
Expand Down
5 changes: 3 additions & 2 deletions pkg/ccl/backupccl/targets.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/pkg/errors"
)

Expand Down Expand Up @@ -121,7 +122,7 @@ func newDescriptorResolver(descs []sqlbase.Descriptor) (*descriptorResolver, err
}
// Now on to the tables.
for _, desc := range descs {
if tbDesc := desc.GetTable(); tbDesc != nil {
if tbDesc := desc.Table(hlc.Timestamp{}); tbDesc != nil {
if tbDesc.Dropped() {
continue
}
Expand Down Expand Up @@ -214,7 +215,7 @@ func descriptorsMatchingTargets(
desc := descI.(sqlbase.Descriptor)

// If the parent database is not requested already, request it now
parentID := desc.GetTable().GetParentID()
parentID := desc.Table(hlc.Timestamp{}).GetParentID()
if _, ok := alreadyRequestedDBs[parentID]; !ok {
parentDesc := resolver.descByID[parentID]
ret.descs = append(ret.descs, parentDesc)
Expand Down
5 changes: 5 additions & 0 deletions pkg/ccl/backupccl/targets_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
)

Expand All @@ -34,6 +35,10 @@ func TestDescriptorsMatchingTargets(t *testing.T) {
*sqlbase.WrapDescriptor(&sqlbase.DatabaseDescriptor{ID: 3, Name: "data"}),
*sqlbase.WrapDescriptor(&sqlbase.DatabaseDescriptor{ID: 5, Name: "empty"}),
}
// Set the timestamp on the table descriptors.
for _, d := range descriptors {
d.Table(hlc.Timestamp{WallTime: 1})
}

tests := []struct {
sessionDatabase string
Expand Down
35 changes: 35 additions & 0 deletions pkg/ccl/backupccl/testdata/restore_old_versions/create.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
-- The below SQL is used to create the data that is then exported with BACKUP
-- for use in the RestoreOldVersions test. Each of t1, t2, and t3 should contain
-- the same rows when ordered by k.

CREATE DATABASE test;

SET database = test;

-- t1 gets some modifications.

CREATE TABLE t1 (k INT8 PRIMARY KEY, v1 INT8);

ALTER TABLE t1 ADD COLUMN v2 INT8;

CREATE INDEX t1_v2 ON t1 (v2);

-- t2 is an unmodified table.

CREATE TABLE t2 (k INT8 PRIMARY KEY, v1 INT8, v2 INT8);

-- t3 and t4 are interleaved.

CREATE TABLE t3 (k INT8 PRIMARY KEY);

CREATE TABLE t4 (k INT8 PRIMARY KEY, v1 INT8, v2 INT8, CONSTRAINT fk_t3 FOREIGN KEY (k) REFERENCES t3)
INTERLEAVE IN PARENT t3 (k);

INSERT INTO t1 VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3);

INSERT INTO t2 VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3);

INSERT INTO t3 VALUES (1), (2), (3);

INSERT INTO t4 VALUES (1, 1, 1), (2, 2, 2), (3, 3, 3);

Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
2 changes: 1 addition & 1 deletion pkg/ccl/changefeedccl/changefeed_stmt.go
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ func changefeedPlanHook(
}
targets := make(jobspb.ChangefeedTargets, len(targetDescs))
for _, desc := range targetDescs {
if tableDesc := desc.GetTable(); tableDesc != nil {
if tableDesc := desc.Table(hlc.Timestamp{}); tableDesc != nil {
targets[tableDesc.ID] = jobspb.ChangefeedTarget{
StatementTimeName: tableDesc.Name,
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/ccl/changefeedccl/table_history.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,8 @@ func fetchTableDescriptorVersions(
} else if !ok {
return nil
}
remaining, _, _, err := sqlbase.DecodeTableIDIndexID(it.UnsafeKey().Key)
k := it.UnsafeKey()
remaining, _, _, err := sqlbase.DecodeTableIDIndexID(k.Key)
if err != nil {
return err
}
Expand All @@ -282,7 +283,7 @@ func fetchTableDescriptorVersions(
if err := value.GetProto(&desc); err != nil {
return err
}
if tableDesc := desc.GetTable(); tableDesc != nil {
if tableDesc := desc.Table(k.Timestamp); tableDesc != nil {
tableDescs = append(tableDescs, tableDesc)
}
}
Expand Down
Loading

0 comments on commit 0c6fcf0

Please sign in to comment.