Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

restore: add resotre auto inc id for incremental restore #29021

Merged
merged 17 commits into from
Oct 26, 2021
Merged
25 changes: 23 additions & 2 deletions br/pkg/restore/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -406,11 +406,12 @@ func (rc *Client) createTable(
dom *domain.Domain,
table *metautil.Table,
newTS uint64,
ddlTables map[UniqueTableName]bool,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can the value be struct{} instead of bool?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if change it to struct{} then next statement in switch...case should change. https://github.com/pingcap/tidb/pull/29021/files#diff-6aa3f356317f5e47c13ad7dd73465a1cf4a550052819ffe40448e545bf857bf5R171
I think bool is better.

) (CreatedTable, error) {
if rc.IsSkipCreateSQL() {
log.Info("skip create table and alter autoIncID", zap.Stringer("table", table.Info.Name))
} else {
err := db.CreateTable(ctx, table)
err := db.CreateTable(ctx, table, ddlTables)
if err != nil {
return CreatedTable{}, errors.Trace(err)
}
Expand Down Expand Up @@ -449,6 +450,7 @@ func (rc *Client) GoCreateTables(
// Could we have a smaller size of tables?
log.Info("start create tables")

ddlTables := rc.DDLJobsMap()
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("Client.GoCreateTables", opentracing.ChildOf(span.Context()))
defer span1.Finish()
Expand All @@ -462,7 +464,7 @@ func (rc *Client) GoCreateTables(
return c.Err()
default:
}
rt, err := rc.createTable(c, db, dom, t, newTS)
rt, err := rc.createTable(c, db, dom, t, newTS, ddlTables)
if err != nil {
log.Error("create table failed",
zap.Error(err),
Expand Down Expand Up @@ -1062,6 +1064,25 @@ func (rc *Client) IsSkipCreateSQL() bool {
return rc.noSchema
}

// DDLJobsMap returns a map[UniqueTableName]bool about < db table, hasCreate/hasTruncate DDL >.
// if we execute some DDLs before create table.
// we may get two situation that need to rebase auto increment/random id.
// 1. truncate table: truncate will generate new id cache.
// 2. create table/create and rename table: the first create table will lock down the id cache.
// because we cannot create onExistReplace table.
// so the final create DDL with the correct auto increment/random id won't be executed.
func (rc *Client) DDLJobsMap() map[UniqueTableName]bool {
m := make(map[UniqueTableName]bool)
for _, job := range rc.ddlJobs {
if job.Type == model.ActionTruncateTable ||
job.Type == model.ActionCreateTable ||
job.Type == model.ActionRenameTable {
3pointer marked this conversation as resolved.
Show resolved Hide resolved
m[UniqueTableName{job.SchemaName, job.BinlogInfo.TableInfo.Name.String()}] = true
}
}
return m
}

// PreCheckTableTiFlashReplica checks whether TiFlash replica is less than TiFlash node.
func (rc *Client) PreCheckTableTiFlashReplica(
ctx context.Context,
Expand Down
58 changes: 46 additions & 12 deletions br/pkg/restore/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,11 @@ type DB struct {
se glue.Session
}

type UniqueTableName struct {
DB string
Table string
}

// NewDB returns a new DB.
func NewDB(g glue.Glue, store kv.Storage) (*DB, error) {
se, err := g.CreateSession(store)
Expand Down Expand Up @@ -97,7 +102,7 @@ func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error {
}

// CreateTable executes a CREATE TABLE SQL.
func (db *DB) CreateTable(ctx context.Context, table *metautil.Table) error {
func (db *DB) CreateTable(ctx context.Context, table *metautil.Table, ddlTables map[UniqueTableName]bool) error {
err := db.se.CreateTable(ctx, table.DB.Name, table.Info)
if err != nil {
log.Error("create table failed",
Expand All @@ -107,7 +112,11 @@ func (db *DB) CreateTable(ctx context.Context, table *metautil.Table) error {
return errors.Trace(err)
}

if table.Info.IsSequence() {
var restoreMetaSQL string
switch {
case table.Info.IsView():
return nil
case table.Info.IsSequence():
setValFormat := fmt.Sprintf("do setval(%s.%s, %%d);",
utils.EncloseName(table.DB.Name.O),
utils.EncloseName(table.Info.Name.O))
Expand Down Expand Up @@ -148,8 +157,38 @@ func (db *DB) CreateTable(ctx context.Context, table *metautil.Table) error {
return errors.Trace(err)
}
}
restoreMetaSQL := fmt.Sprintf(setValFormat, table.Info.AutoIncID)
if err = db.se.Execute(ctx, restoreMetaSQL); err != nil {
restoreMetaSQL = fmt.Sprintf(setValFormat, table.Info.AutoIncID)
err = db.se.Execute(ctx, restoreMetaSQL)
if err != nil {
log.Error("restore meta sql failed",
zap.String("query", restoreMetaSQL),
zap.Stringer("db", table.DB.Name),
zap.Stringer("table", table.Info.Name),
zap.Error(err))
return errors.Trace(err)
}
// only table exists in incremental restore should do alter after creation.
case ddlTables[UniqueTableName{table.DB.Name.String(), table.Info.Name.String()}]:
if utils.NeedAutoID(table.Info) {
restoreMetaSQL = fmt.Sprintf(
"alter table %s.%s auto_increment = %d;",
utils.EncloseName(table.DB.Name.O),
utils.EncloseName(table.Info.Name.O),
table.Info.AutoIncID)
} else if table.Info.PKIsHandle && table.Info.ContainsAutoRandomBits() {
restoreMetaSQL = fmt.Sprintf(
"alter table %s.%s auto_random_base = %d",
utils.EncloseName(table.DB.Name.O),
utils.EncloseName(table.Info.Name.O),
table.Info.AutoRandID)
} else {
log.Info("table exists in incremental ddl jobs, but don't need to be altered",
zap.Stringer("db", table.DB.Name),
zap.Stringer("table", table.Info.Name))
return nil
}
err = db.se.Execute(ctx, restoreMetaSQL)
if err != nil {
log.Error("restore meta sql failed",
zap.String("query", restoreMetaSQL),
zap.Stringer("db", table.DB.Name),
Expand Down Expand Up @@ -196,20 +235,15 @@ func FilterDDLJobs(allDDLJobs []*model.Job, tables []*metautil.Table) (ddlJobs [
}
}

type namePair struct {
db string
table string
}

for _, table := range tables {
tableIDs := make(map[int64]bool)
tableIDs[table.Info.ID] = true
tableNames := make(map[namePair]bool)
name := namePair{table.DB.Name.String(), table.Info.Name.String()}
tableNames := make(map[UniqueTableName]bool)
name := UniqueTableName{table.DB.Name.String(), table.Info.Name.String()}
tableNames[name] = true
for _, job := range allDDLJobs {
if job.BinlogInfo.TableInfo != nil {
name := namePair{job.SchemaName, job.BinlogInfo.TableInfo.Name.String()}
name = UniqueTableName{job.SchemaName, job.BinlogInfo.TableInfo.Name.String()}
if tableIDs[job.TableID] || tableNames[name] {
ddlJobs = append(ddlJobs, job)
tableIDs[job.TableID] = true
Expand Down
24 changes: 22 additions & 2 deletions br/pkg/restore/db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,13 +98,33 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) {
table.DB.Collate = "utf8mb4_bin"
err = db.CreateDatabase(context.Background(), table.DB)
c.Assert(err, IsNil, Commentf("Error create empty charset db: %s %s", err, s.mock.DSN))
err = db.CreateTable(context.Background(), &table)
uniqueMap := make(map[restore.UniqueTableName]bool)
err = db.CreateTable(context.Background(), &table, uniqueMap)
c.Assert(err, IsNil, Commentf("Error create table: %s %s", err, s.mock.DSN))

tk.MustExec("use test")
// Check if AutoIncID is altered successfully
autoIncID, err = strconv.ParseUint(tk.MustQuery("admin show `\"t\"` next_row_id").Rows()[0][3].(string), 10, 64)
c.Assert(err, IsNil, Commentf("Error query auto inc id: %s", err))
// Check if AutoIncID is altered successfully.
c.Assert(autoIncID, Equals, uint64(globalAutoID+100))

// try again, failed due to table exists.
table.Info.AutoIncID = globalAutoID + 200
err = db.CreateTable(context.Background(), &table, uniqueMap)
// Check if AutoIncID is not altered.
autoIncID, err = strconv.ParseUint(tk.MustQuery("admin show `\"t\"` next_row_id").Rows()[0][3].(string), 10, 64)
c.Assert(err, IsNil, Commentf("Error query auto inc id: %s", err))
c.Assert(autoIncID, Equals, uint64(globalAutoID+100))

// try again, success because we use alter sql in unique map.
table.Info.AutoIncID = globalAutoID + 300
uniqueMap[restore.UniqueTableName{"test", "\"t\""}] = true
err = db.CreateTable(context.Background(), &table, uniqueMap)
// Check if AutoIncID is altered to globalAutoID + 300.
autoIncID, err = strconv.ParseUint(tk.MustQuery("admin show `\"t\"` next_row_id").Rows()[0][3].(string), 10, 64)
c.Assert(err, IsNil, Commentf("Error query auto inc id: %s", err))
c.Assert(autoIncID, Equals, uint64(globalAutoID+300))

}

func (s *testRestoreSchemaSuite) TestFilterDDLJobs(c *C) {
Expand Down
6 changes: 3 additions & 3 deletions br/tests/br_300_small_tables/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ unset BR_LOG_TO_TERM
rm -f $BACKUPMETAV2_LOG
export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/task/progress-call-back=return(\"$PROGRESS_FILE\")"
run_br backup db --db "$DB" --log-file $BACKUPMETAV2_LOG -s "local://$TEST_DIR/${DB}v2" --pd $PD_ADDR --use-backupmeta-v2
backupv2_size=`grep "backup data size" "${BACKUPMETAV2_LOG}" | grep -oP '\[\K[^\]]+' | grep "backup data size" | awk -F '=' '{print $2}' | grep -oP '\d*\.\d+'`
backupv2_size=`grep "backup-data-size" "${BACKUPMETAV2_LOG}" | grep -oP '\[\K[^\]]+' | grep "backup-data-size" | awk -F '=' '{print $2}' | grep -oP '\d*\.\d+'`
echo "backup meta v2 backup size is ${backupv2_size}"
export GO_FAILPOINTS=""

Expand All @@ -61,7 +61,7 @@ rm -rf $PROGRESS_FILE
echo "backup meta v1 start..."
rm -f $BACKUPMETAV1_LOG
run_br backup db --db "$DB" --log-file $BACKUPMETAV1_LOG -s "local://$TEST_DIR/$DB" --pd $PD_ADDR
backupv1_size=`grep "backup data size" "${BACKUPMETAV1_LOG}" | grep -oP '\[\K[^\]]+' | grep "backup data size" | awk -F '=' '{print $2}' | grep -oP '\d*\.\d+'`
backupv1_size=`grep "backup-data-size" "${BACKUPMETAV1_LOG}" | grep -oP '\[\K[^\]]+' | grep "backup-data-size" | awk -F '=' '{print $2}' | grep -oP '\d*\.\d+'`
echo "backup meta v1 backup size is ${backupv1_size}"


Expand All @@ -83,7 +83,7 @@ done
rm -rf $RESTORE_LOG
echo "restore 1/300 of the table start..."
run_br restore table --db $DB --table "sbtest100" --log-file $RESTORE_LOG -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --no-schema
restore_size=`grep "restore data size" "${RESTORE_LOG}" | grep -oP '\[\K[^\]]+' | grep "restore data size" | awk -F '=' '{print $2}' | grep -oP '\d*\.\d+'`
restore_size=`grep "restore-data-size" "${RESTORE_LOG}" | grep -oP '\[\K[^\]]+' | grep "restore-data-size" | awk -F '=' '{print $2}' | grep -oP '\d*\.\d+'`
echo "restore data size is ${restore_size}"

diff=$(calc "$backupv2_size-$restore_size*$TABLES_COUNT")
Expand Down
10 changes: 8 additions & 2 deletions br/tests/br_incremental_ddl/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,19 @@ run_sql "CREATE DATABASE ${DB};"
run_sql "CREATE TABLE ${DB}.${TABLE}1 (c2 CHAR(255));"
run_sql "RENAME TABLE ${DB}.${TABLE}1 to ${DB}.${TABLE};"
run_sql "TRUNCATE TABLE ${DB}.${TABLE};"

# create new table to test alter succeed after rename ddl executed.
run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE}_rename (c CHAR(255));"
run_sql "RENAME TABLE ${DB}.${TABLE}_rename to ${DB}.${TABLE}_rename2;"
# insert records
for i in $(seq $ROW_COUNT); do
run_sql "INSERT INTO ${DB}.${TABLE}(c2) VALUES ('$i');"
run_sql "INSERT INTO ${DB}.${TABLE}_rename2(c) VALUES ('$i');"
done
# incremental backup
echo "incremental backup start..."
last_backup_ts=$(run_br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | grep -oE "^[0-9]+")
run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --lastbackupts $last_backup_ts
run_br --pd $PD_ADDR backup db -s "local://$TEST_DIR/$DB/inc" --db $DB --lastbackupts $last_backup_ts

run_sql "DROP DATABASE $DB;"
# full restore
Expand All @@ -63,13 +68,14 @@ if [ "${row_count_full}" != "${ROW_COUNT}" ];then
fi
# incremental restore
echo "incremental restore start..."
run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR
run_br restore db --db $DB -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR
row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}')
# check full restore
if [ "${row_count_inc}" != "${ROW_COUNT}" ];then
echo "TEST: [$TEST_NAME] incremental restore fail on database $DB"
exit 1
fi
run_sql "INSERT INTO ${DB}.${TABLE}(c2) VALUES ('1');"
run_sql "INSERT INTO ${DB}.${TABLE}_rename2(c) VALUES ('1');"

run_sql "DROP DATABASE $DB;"