From 8f7a8cff42aa39ef0202ab94f1dd9e185be2d0d9 Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Thu, 23 Sep 2021 17:37:41 +0800 Subject: [PATCH 01/14] commit-message: update the schema tracker core code about #1895 --- pkg/schema/tracker.go | 197 +++++++++++++++++++++++++++++++++++++++++- syncer/dml.go | 69 +++++++++------ syncer/syncer.go | 13 ++- syncer/syncer_test.go | 4 +- 4 files changed, 250 insertions(+), 33 deletions(-) diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index 63fa12b019..6617dd56c9 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/types" "go.uber.org/zap" "github.com/pingcap/dm/pkg/conn" @@ -55,9 +56,19 @@ var ( // Tracker is used to track schema locally. type Tracker struct { - store kv.Storage - dom *domain.Domain - se session.Session + store kv.Storage + dom *domain.Domain + se session.Session + toIndexes map[string]map[string]*ToIndexes +} + +// ToIndexes is downstream pk/uk info. +type ToIndexes struct { + schemaName string + tableName string + pks []string // include multiple primary key + uks []string // uk/uks + // uksIsNull []bool // uk/uks is null? } // NewTracker creates a new tracker. `sessionCfg` will be set as tracker's session variables if specified, or retrieve @@ -321,3 +332,183 @@ func (tr *Tracker) CreateTableIfNotExists(db, table string, ti *model.TableInfo) func (tr *Tracker) GetSystemVar(name string) (string, bool) { return tr.se.GetSessionVars().GetSystemVar(name) } + +// GetToIndexInfo gets downstream PK Index. +// note. this function will init toIndexes. +func (tr *Tracker) GetToIndexInfo(db, table string, originTi *model.TableInfo, tctx *tcontext.Context, task string, tidbConn *conn.BaseConn) (*model.IndexInfo, error) { + if tr.toIndexes == nil { + tr.toIndexes = make(map[string]map[string]*ToIndexes) + } + if dbindexes := tr.toIndexes[db]; dbindexes == nil { + dbindexes = make(map[string]*ToIndexes) + tr.toIndexes[db] = dbindexes + } + index := tr.toIndexes[db][table] + if index == nil { + log.L().Info(fmt.Sprintf("DownStream schema tracker init: %s.%s", db, table)) + index = &ToIndexes{ + schemaName: db, + tableName: table, + pks: make([]string, 0), + uks: make([]string, 0), + // uksIsNull: make([]bool, 0), + } + // tctx := tcontext.NewContext(ctx, log.With(zap.String("component", "schema-tracker"), zap.String("task", task))) + rows, err := tidbConn.QuerySQL(tctx, fmt.Sprintf("SHOW INDEX FROM %s FROM %s", table, db)) + if err != nil { + return nil, err + } + + cols, err := rows.Columns() + if err != nil { + return nil, err + } + // the column of show statement is too many, so make dynamic values for scan + values := make([][]byte, len(cols)) + scans := make([]interface{}, len(cols)) + for i := range values { + scans[i] = &values[i] + } + + for rows.Next() { + if err3 := rows.Scan(scans...); err3 != nil { + return nil, err3 + } + + // Key_name -- 2, Column_name -- 4, Null -- 9 + nonUnique := string(values[1]) // 0 is UK + keyName := string(values[2]) // pk is PRIMARY + columName := string(values[4]) + // isNull := string(values[9]) // Null is YES + + if strings.EqualFold(keyName, "PRIMARY") { + // handle multiple pk + index.pks = append(index.pks, columName) + log.L().Info(fmt.Sprintf("DownStream schema tracker %s.%s Find PK %s", db, table, columName)) + } else if strings.EqualFold(nonUnique, "0") { + index.uks = append(index.uks, columName) + log.L().Info(fmt.Sprintf("DownStream schema tracker %s.%s Find UK %s ", db, table, columName)) + } + } + // nolint:sqlclosecheck + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + tr.toIndexes[db][table] = index + } + + // construct model.IndexInfo, PK > not null UK + if len(index.pks) != 0 { + // handle multiple pk + columns := make([]*model.IndexColumn, 0, len(index.pks)) + for _, pk := range index.pks { + if orginColumn := model.FindColumnInfo(originTi.Columns, pk); orginColumn != nil { + column := &model.IndexColumn{ + Name: model.NewCIStr(pk), + Offset: orginColumn.Offset, + Length: types.UnspecifiedLength, + } + columns = append(columns, column) + } + } + if len(columns) != 0 { + return &model.IndexInfo{ + Table: model.NewCIStr(table), + Unique: true, + Primary: true, + State: model.StatePublic, + Tp: model.IndexTypeBtree, + Columns: columns, + }, nil + } + } + // else if len(index.uks) != 0 { + // for i := 0; i < len(index.uks); i++ { + // if !index.uksIsNull[i] { + // if originColumn := model.FindColumnInfo(originTi.Columns, index.uks[i]); originColumn != nil { + // return &model.IndexInfo{ + // Table: model.NewCIStr(table), + // Unique: true, + // Primary: false, + // State: model.StatePublic, + // Tp: model.IndexTypeBtree, + // Columns: []*model.IndexColumn{{ + // Name: model.NewCIStr(index.uks[i]), + // Offset: originColumn.Offset, + // Length: types.UnspecifiedLength, + // }}, + // }, nil + // } + // } + // } + // } + + return nil, nil +} + +// GetAvailableUKToIndexInfo gets available downstream UK whose data is not null +// note. this function will not init toIndexes. +func (tr *Tracker) GetAvailableUKToIndexInfo(db, table string, originTi *model.TableInfo, data []interface{}) *model.IndexInfo { + if tr.toIndexes == nil || tr.toIndexes[db] == nil || tr.toIndexes[db][table] == nil { + return nil + } + index := tr.toIndexes[db][table] + for i := 0; i < len(index.uks); i++ { + if originColumn := model.FindColumnInfo(originTi.Columns, index.uks[i]); originColumn != nil { + if data[originColumn.Offset] != nil { + return &model.IndexInfo{ + Table: model.NewCIStr(table), + Unique: true, + Primary: false, + State: model.StatePublic, + Tp: model.IndexTypeBtree, + Columns: []*model.IndexColumn{{ + Name: model.NewCIStr(index.uks[i]), + Offset: originColumn.Offset, + Length: types.UnspecifiedLength, + }}, + } + } + } + } + return nil +} + +// SetToIndexNotAvailable set toIndex available is false +// func (tr *Tracker) SetToIndexNotAvailable(db, table string) { + +// if tr.toIndexes == nil || tr.toIndexes[db] == nil || tr.toIndexes[db][table] == nil || !tr.toIndexes[db][table].isAlive { +// return +// } else { +// tr.toIndexes[db][table].isAlive = false +// } +// } + +// TrackToIndex remove schema or table in toIndex. +func (tr *Tracker) TrackToIndex(targetTables []*filter.Table) { + if tr.toIndexes == nil || targetTables == nil { + return + } + + for i := 0; i < len(targetTables); i++ { + db := targetTables[i].Schema + table := targetTables[i].Name + if tr.toIndexes[db] == nil { + return + } + if table == "" { + delete(tr.toIndexes, db) + log.L().Info(fmt.Sprintf("Remove downStream schema tracker %s ", db)) + } else { + if tr.toIndexes[db][table] == nil { + return + } + delete(tr.toIndexes[db], table) + log.L().Info(fmt.Sprintf("Remove downStream schema tracker %s.%s ", db, table)) + } + } + +} diff --git a/syncer/dml.go b/syncer/dml.go index 106404c02b..2320571cfe 100644 --- a/syncer/dml.go +++ b/syncer/dml.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/expression" "go.uber.org/zap" + tcontext "github.com/pingcap/dm/pkg/context" "github.com/pingcap/dm/pkg/log" "github.com/pingcap/dm/pkg/terror" ) @@ -100,23 +101,30 @@ RowLoop: } func (s *Syncer) genUpdateSQLs( + tctx *tcontext.Context, param *genDMLParam, oldValueFilters []expression.Expression, newValueFilters []expression.Expression, ) ([]string, [][]string, [][]interface{}, error) { var ( - qualifiedName = dbutil.TableName(param.schema, param.table) - data = param.data - originalData = param.originalData - columns = param.columns - ti = param.originalTableInfo - defaultIndexColumns = findFitIndex(ti) - replaceSQL string // `REPLACE INTO` SQL - sqls = make([]string, 0, len(data)/2) - keys = make([][]string, 0, len(data)/2) - values = make([][]interface{}, 0, len(data)/2) + qualifiedName = dbutil.TableName(param.schema, param.table) + data = param.data + originalData = param.originalData + columns = param.columns + ti = param.originalTableInfo + // defaultIndexColumns = findFitIndex(ti) + replaceSQL string // `REPLACE INTO` SQL + sqls = make([]string, 0, len(data)/2) + keys = make([][]string, 0, len(data)/2) + values = make([][]interface{}, 0, len(data)/2) ) + // if downstream pk exits, then use downstream pk + defaultIndexColumns, err := s.schemaTracker.GetToIndexInfo(param.schema, param.table, ti, tctx, s.cfg.Name, s.ddlDBConn.BaseConn) + if err != nil { + return nil, nil, nil, err + } + if param.safeMode { replaceSQL = genInsertReplace("REPLACE INTO", qualifiedName, columns) } @@ -167,7 +175,8 @@ RowLoop: } if defaultIndexColumns == nil { - defaultIndexColumns = getAvailableIndexColumn(ti, oriOldValues) + // defaultIndexColumns = getAvailableIndexColumn(ti, oriOldValues) + defaultIndexColumns = s.schemaTracker.GetAvailableUKToIndexInfo(param.schema, param.table, ti, oriOldValues) } ks := genMultipleKeys(ti, oriOldValues, qualifiedName) @@ -218,17 +227,23 @@ RowLoop: return sqls, keys, values, nil } -func (s *Syncer) genDeleteSQLs(param *genDMLParam, filterExprs []expression.Expression) ([]string, [][]string, [][]interface{}, error) { +func (s *Syncer) genDeleteSQLs(tctx *tcontext.Context, param *genDMLParam, filterExprs []expression.Expression) ([]string, [][]string, [][]interface{}, error) { var ( - qualifiedName = dbutil.TableName(param.schema, param.table) - dataSeq = param.originalData - ti = param.originalTableInfo - defaultIndexColumns = findFitIndex(ti) - sqls = make([]string, 0, len(dataSeq)) - keys = make([][]string, 0, len(dataSeq)) - values = make([][]interface{}, 0, len(dataSeq)) + qualifiedName = dbutil.TableName(param.schema, param.table) + dataSeq = param.originalData + ti = param.originalTableInfo + // defaultIndexColumns = findFitIndex(ti) + sqls = make([]string, 0, len(dataSeq)) + keys = make([][]string, 0, len(dataSeq)) + values = make([][]interface{}, 0, len(dataSeq)) ) + // if downstream pk exits, then use downstream pk + defaultIndexColumns, err := s.schemaTracker.GetToIndexInfo(param.schema, param.table, ti, tctx, s.cfg.Name, s.ddlDBConn.BaseConn) + if err != nil { + return nil, nil, nil, err + } + RowLoop: for _, data := range dataSeq { if len(data) != len(ti.Columns) { @@ -249,8 +264,10 @@ RowLoop: } if defaultIndexColumns == nil { - defaultIndexColumns = getAvailableIndexColumn(ti, value) + // defaultIndexColumns = getAvailableIndexColumn(ti, value) + defaultIndexColumns = s.schemaTracker.GetAvailableUKToIndexInfo(param.schema, param.table, ti, value) } + ks := genMultipleKeys(ti, value, qualifiedName) sql, value := genDeleteSQL(qualifiedName, value, ti.Columns, defaultIndexColumns) @@ -498,13 +515,13 @@ func findFitIndex(ti *model.TableInfo) *model.IndexInfo { return getSpecifiedIndexColumn(ti, fn) } -func getAvailableIndexColumn(ti *model.TableInfo, data []interface{}) *model.IndexInfo { - fn := func(i int) bool { - return data[i] == nil - } +// func getAvailableIndexColumn(ti *model.TableInfo, data []interface{}) *model.IndexInfo { +// fn := func(i int) bool { +// return data[i] == nil +// } - return getSpecifiedIndexColumn(ti, fn) -} +// return getSpecifiedIndexColumn(ti, fn) +// } func getSpecifiedIndexColumn(ti *model.TableInfo, fn func(i int) bool) *model.IndexInfo { for _, indexCols := range ti.Indices { diff --git a/syncer/syncer.go b/syncer/syncer.go index 5797ca6eaf..ff7b7bcddb 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -2223,7 +2223,7 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) err } param.safeMode = ec.safeMode - sqls, keys, args, err = s.genUpdateSQLs(param, oldExprFilter, newExprFilter) + sqls, keys, args, err = s.genUpdateSQLs(ec.tctx, param, oldExprFilter, newExprFilter) if err != nil { return terror.Annotatef(err, "gen update sqls failed, originSchema: %s, originTable: %s, schema: %s, table: %s", originSchema, originTable, schemaName, tableName) } @@ -2236,7 +2236,7 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) err return err2 } - sqls, keys, args, err = s.genDeleteSQLs(param, exprFilter) + sqls, keys, args, err = s.genDeleteSQLs(ec.tctx, param, exprFilter) if err != nil { return terror.Annotatef(err, "gen delete sqls failed, originSchema: %s, originTable: %s, schema: %s, table: %s", originSchema, originTable, schemaName, tableName) } @@ -2755,6 +2755,7 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. shouldTableExistNum int // tableNames[:shouldTableExistNum] should exist shouldRefTableExistNum int // tableNames[1:shouldTableExistNum] should exist, since first one is "caller table" tryFetchDownstreamTable bool // to make sure if not exists will execute correctly + shouldTrackToIndex bool // track toIndex ) switch node := stmt.(type) { @@ -2765,6 +2766,7 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. shouldSchemaExist = true case *ast.DropDatabaseStmt: shouldExecDDLOnSchemaTracker = true + shouldTrackToIndex = true if s.cfg.ShardMode == "" { if err := s.checkpoint.DeleteSchemaPoint(ec.tctx, srcTable.Schema); err != nil { return err @@ -2781,6 +2783,7 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. tryFetchDownstreamTable = true case *ast.DropTableStmt: shouldExecDDLOnSchemaTracker = true + shouldTrackToIndex = true if err := s.checkpoint.DeleteTablePoint(ec.tctx, srcTable.Schema, srcTable.Name); err != nil { return err } @@ -2788,8 +2791,10 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. shouldExecDDLOnSchemaTracker = true shouldSchemaExist = true shouldTableExistNum = 1 + shouldTrackToIndex = true case *ast.AlterTableStmt: shouldSchemaExist = true + shouldTrackToIndex = true // for DDL that adds FK, since TiDB doesn't fully support it yet, we simply ignore execution of this DDL. switch { case len(node.Specs) == 1 && node.Specs[0].Constraint != nil && node.Specs[0].Constraint.Tp == ast.ConstraintForeignKey: @@ -2808,6 +2813,10 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. ec.tctx.L().DPanic("unhandled DDL type cannot be tracked", zap.Stringer("type", reflect.TypeOf(stmt))) } + if shouldTrackToIndex { + s.schemaTracker.TrackToIndex(targetTables) + } + if shouldSchemaExist { if err := s.schemaTracker.CreateSchemaIfNotExists(srcTable.Schema); err != nil { return terror.ErrSchemaTrackerCannotCreateSchema.Delegate(err, srcTable.Schema) diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go index ea9aaf700c..7afb716e01 100644 --- a/syncer/syncer_test.go +++ b/syncer/syncer_test.go @@ -939,12 +939,12 @@ func (s *testSyncerSuite) TestGeneratedColumn(c *C) { c.Assert(args[0], DeepEquals, testCase.args[idx]) case replication.UPDATE_ROWS_EVENTv0, replication.UPDATE_ROWS_EVENTv1, replication.UPDATE_ROWS_EVENTv2: // test with sql_mode = false only - sqls, _, args, err = syncer.genUpdateSQLs(param, nil, nil) + sqls, _, args, err = syncer.genUpdateSQLs(tcontext.Background(), param, nil, nil) c.Assert(err, IsNil) c.Assert(sqls[0], Equals, testCase.expected[idx]) c.Assert(args[0], DeepEquals, testCase.args[idx]) case replication.DELETE_ROWS_EVENTv0, replication.DELETE_ROWS_EVENTv1, replication.DELETE_ROWS_EVENTv2: - sqls, _, args, err = syncer.genDeleteSQLs(param, nil) + sqls, _, args, err = syncer.genDeleteSQLs(tcontext.Background(), param, nil) c.Assert(err, IsNil) c.Assert(sqls[0], Equals, testCase.expected[idx]) c.Assert(args[0], DeepEquals, testCase.args[idx]) From 8a0ad438162f9c30e66eca5de453cfc35031475c Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Thu, 23 Sep 2021 18:55:49 +0800 Subject: [PATCH 02/14] save --- pkg/schema/tracker.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index 6617dd56c9..8baf4221ba 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -510,5 +510,4 @@ func (tr *Tracker) TrackToIndex(targetTables []*filter.Table) { log.L().Info(fmt.Sprintf("Remove downStream schema tracker %s.%s ", db, table)) } } - } From c28676c7fc6e90d9c55623313e16c80abaf6636b Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Sun, 26 Sep 2021 16:47:28 +0800 Subject: [PATCH 03/14] commit-message: change track downstream tables by use create table stmt --- pkg/schema/tracker.go | 310 +++++++++++++++++++++++------------------- syncer/dml.go | 10 +- syncer/syncer.go | 2 +- 3 files changed, 174 insertions(+), 148 deletions(-) diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index d3c66b9e5e..8b458aa5ed 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -19,6 +19,7 @@ import ( "strings" "github.com/pingcap/errors" + "github.com/pingcap/parser" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/model" "github.com/pingcap/parser/terror" @@ -31,12 +32,12 @@ import ( "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/mockstore" - "github.com/pingcap/tidb/types" "go.uber.org/zap" "github.com/pingcap/dm/pkg/conn" tcontext "github.com/pingcap/dm/pkg/context" "github.com/pingcap/dm/pkg/log" + "github.com/pingcap/dm/pkg/utils" ) const ( @@ -55,20 +56,19 @@ var ( // Tracker is used to track schema locally. type Tracker struct { - store kv.Storage - dom *domain.Domain - se session.Session - toIndexes map[string]map[string]*ToIndexes + store kv.Storage + dom *domain.Domain + se session.Session + downstreamTrack map[string]*ast.CreateTableStmt // downstream tracker tableid -> createTableStmt } // ToIndexes is downstream pk/uk info. -type ToIndexes struct { - schemaName string - tableName string - pks []string // include multiple primary key - uks []string // uk/uks - // uksIsNull []bool // uk/uks is null? -} +// type ToIndexes struct { +// tableID string +// pks []string // include multiple primary key +// uks []string // uk/uks +// // uksIsNull []bool // uk/uks is null? +// } // NewTracker creates a new tracker. `sessionCfg` will be set as tracker's session variables if specified, or retrieve // some variable from downstream TiDB using `tidbConn`. @@ -341,181 +341,207 @@ func (tr *Tracker) GetSystemVar(name string) (string, bool) { return tr.se.GetSessionVars().GetSystemVar(name) } -// GetToIndexInfo gets downstream PK Index. -// note. this function will init toIndexes. -func (tr *Tracker) GetToIndexInfo(db, table string, originTi *model.TableInfo, tctx *tcontext.Context, task string, tidbConn *conn.BaseConn) (*model.IndexInfo, error) { - if tr.toIndexes == nil { - tr.toIndexes = make(map[string]map[string]*ToIndexes) - } - if dbindexes := tr.toIndexes[db]; dbindexes == nil { - dbindexes = make(map[string]*ToIndexes) - tr.toIndexes[db] = dbindexes +// GetDownStreamIndexInfo gets downstream PK/UK(not null) Index. +// note. this function will init downstreamTrack's table info. +func (tr *Tracker) GetDownStreamIndexInfo(tableID string, originTi *model.TableInfo, tctx *tcontext.Context, task string, tidbConn *conn.BaseConn) (*model.IndexInfo, error) { + if tr.downstreamTrack == nil { + tr.downstreamTrack = make(map[string]*ast.CreateTableStmt) } - index := tr.toIndexes[db][table] - if index == nil { - log.L().Info(fmt.Sprintf("DownStream schema tracker init: %s.%s", db, table)) - index = &ToIndexes{ - schemaName: db, - tableName: table, - pks: make([]string, 0), - uks: make([]string, 0), - // uksIsNull: make([]bool, 0), - } - // tctx := tcontext.NewContext(ctx, log.With(zap.String("component", "schema-tracker"), zap.String("task", task))) - rows, err := tidbConn.QuerySQL(tctx, fmt.Sprintf("SHOW INDEX FROM %s FROM %s", table, db)) - if err != nil { - return nil, err - } + createTableStmt := tr.downstreamTrack[tableID] + if createTableStmt == nil { + + log.L().Info(fmt.Sprintf("DownStream schema tracker init: %s", tableID)) - cols, err := rows.Columns() + rows, err := tidbConn.QuerySQL(tctx, fmt.Sprintf("SHOW CREATE TABLE %s", tableID)) if err != nil { return nil, err } - // the column of show statement is too many, so make dynamic values for scan - values := make([][]byte, len(cols)) - scans := make([]interface{}, len(cols)) - for i := range values { - scans[i] = &values[i] - } + + var tableName string + var createStr string for rows.Next() { - if err3 := rows.Scan(scans...); err3 != nil { + if err3 := rows.Scan(&tableName, &createStr); err3 != nil { return nil, err3 } + // parse create table stmt. + parser := parser.New() - // Key_name -- 2, Column_name -- 4, Null -- 9 - nonUnique := string(values[1]) // 0 is UK - keyName := string(values[2]) // pk is PRIMARY - columName := string(values[4]) - // isNull := string(values[9]) // Null is YES - - if strings.EqualFold(keyName, "PRIMARY") { - // handle multiple pk - index.pks = append(index.pks, columName) - log.L().Info(fmt.Sprintf("DownStream schema tracker %s.%s Find PK %s", db, table, columName)) - } else if strings.EqualFold(nonUnique, "0") { - index.uks = append(index.uks, columName) - log.L().Info(fmt.Sprintf("DownStream schema tracker %s.%s Find UK %s ", db, table, columName)) + stmtNode, err := parser.ParseOneStmt(createStr, "", "") + if err != nil { + return nil, err } + createTableStmt = stmtNode.(*ast.CreateTableStmt) } - // nolint:sqlclosecheck + if err := rows.Close(); err != nil { return nil, err } if err := rows.Err(); err != nil { return nil, err } - tr.toIndexes[db][table] = index + tr.downstreamTrack[tableID] = createTableStmt } - // construct model.IndexInfo, PK > not null UK - if len(index.pks) != 0 { - // handle multiple pk - columns := make([]*model.IndexColumn, 0, len(index.pks)) - for _, pk := range index.pks { - if orginColumn := model.FindColumnInfo(originTi.Columns, pk); orginColumn != nil { - column := &model.IndexColumn{ - Name: model.NewCIStr(pk), - Offset: orginColumn.Offset, - Length: types.UnspecifiedLength, - } - columns = append(columns, column) + // get PK/UK from Constraints. + var index *model.IndexInfo + for _, constraint := range createTableStmt.Constraints { + var keys []*ast.IndexPartSpecification + + switch constraint.Tp { + case ast.ConstraintPrimaryKey: // pk. + keys = constraint.Keys + case ast.ConstraintUniq, ast.ConstraintUniqKey, ast.ConstraintUniqIndex: // unique,unique key,unique index. + + if index == nil { + keys = constraint.Keys + } else { + // if index has been found, uk should be jump. + continue } + default: + continue } - if len(columns) != 0 { - return &model.IndexInfo{ - Table: model.NewCIStr(table), - Unique: true, - Primary: true, - State: model.StatePublic, - Tp: model.IndexTypeBtree, - Columns: columns, - }, nil + + if keys != nil { + columns := make([]*model.IndexColumn, 0, len(keys)) + isAllNotNull := true // pk is true, uk should check. + for _, key := range keys { + + // UK should check not null. + if constraint.Tp != ast.ConstraintPrimaryKey { + + for _, column := range createTableStmt.Cols { + if key.Column.Name.String() == column.Name.String() { + hasNotNull := false + for _, option := range column.Options { + if option.Tp == ast.ColumnOptionNotNull { + hasNotNull = true + break + } + } + if !hasNotNull { + isAllNotNull = false + } + break + } + } + } + + if !isAllNotNull { + break + } + + if orginColumn := model.FindColumnInfo(originTi.Columns, key.Column.Name.O); orginColumn != nil { + column := &model.IndexColumn{ + Name: key.Column.Name, + Offset: orginColumn.Offset, + Length: key.Length, + } + columns = append(columns, column) + } + } + + if !isAllNotNull { + continue + } + + if len(columns) != 0 { + if constraint.Tp == ast.ConstraintPrimaryKey { + index = &model.IndexInfo{ + Table: createTableStmt.Table.Name, + Unique: true, + Primary: true, + State: model.StatePublic, + Tp: model.IndexTypeBtree, + Columns: columns, + } + log.L().Debug(fmt.Sprintf("Find DownStream table %s pk %s", tableID, constraint.Name)) + return index, nil // pk > uk. + } + // uk should continiue to find pk. + index = &model.IndexInfo{ + Table: createTableStmt.Table.Name, + Unique: true, + Primary: false, + State: model.StatePublic, + Tp: model.IndexTypeBtree, + Columns: columns, + } + log.L().Debug(fmt.Sprintf("Find DownStream table %s uk(not null) %s", tableID, constraint.Name)) + } } } - // else if len(index.uks) != 0 { - // for i := 0; i < len(index.uks); i++ { - // if !index.uksIsNull[i] { - // if originColumn := model.FindColumnInfo(originTi.Columns, index.uks[i]); originColumn != nil { - // return &model.IndexInfo{ - // Table: model.NewCIStr(table), - // Unique: true, - // Primary: false, - // State: model.StatePublic, - // Tp: model.IndexTypeBtree, - // Columns: []*model.IndexColumn{{ - // Name: model.NewCIStr(index.uks[i]), - // Offset: originColumn.Offset, - // Length: types.UnspecifiedLength, - // }}, - // }, nil - // } - // } - // } - // } - - return nil, nil + + if index == nil { + log.L().Debug(fmt.Sprintf("DownStream table %s has no pk/uk(not null)!", tableID)) + } + + return index, nil } -// GetAvailableUKToIndexInfo gets available downstream UK whose data is not null -// note. this function will not init toIndexes. -func (tr *Tracker) GetAvailableUKToIndexInfo(db, table string, originTi *model.TableInfo, data []interface{}) *model.IndexInfo { - if tr.toIndexes == nil || tr.toIndexes[db] == nil || tr.toIndexes[db][table] == nil { +// GetAvailableDownStreanUKIndexInfo gets available downstream UK whose data is not null. +// note. this function will not init downstreamTrack. +func (tr *Tracker) GetAvailableDownStreanUKIndexInfo(tableID string, originTi *model.TableInfo, data []interface{}) *model.IndexInfo { + if tr.downstreamTrack == nil || tr.downstreamTrack[tableID] == nil { return nil } - index := tr.toIndexes[db][table] - for i := 0; i < len(index.uks); i++ { - if originColumn := model.FindColumnInfo(originTi.Columns, index.uks[i]); originColumn != nil { - if data[originColumn.Offset] != nil { + createTableStmt := tr.downstreamTrack[tableID] + for _, constraint := range createTableStmt.Constraints { + + if constraint.Tp == ast.ConstraintUniq || constraint.Tp == ast.ConstraintUniqKey || constraint.Tp == ast.ConstraintUniqIndex { + columns := make([]*model.IndexColumn, 0, len(constraint.Keys)) + for _, key := range constraint.Keys { + if orginColumn := model.FindColumnInfo(originTi.Columns, key.Column.Name.O); orginColumn != nil { + // check data is null. + if columnData := data[orginColumn.Offset]; columnData != nil { + column := &model.IndexColumn{ + Name: key.Column.Name, + Offset: orginColumn.Offset, + Length: key.Length, + } + columns = append(columns, column) + } + } + } + if len(constraint.Keys) == len(columns) { + log.L().Debug(fmt.Sprintf("Find DownStream table %s uk(data not null) %s", tableID, constraint.Name)) return &model.IndexInfo{ - Table: model.NewCIStr(table), + Table: createTableStmt.Table.Name, Unique: true, Primary: false, State: model.StatePublic, Tp: model.IndexTypeBtree, - Columns: []*model.IndexColumn{{ - Name: model.NewCIStr(index.uks[i]), - Offset: originColumn.Offset, - Length: types.UnspecifiedLength, - }}, + Columns: columns, } } } } + log.L().Debug(fmt.Sprintf("DownStream table %s has no pk/uk(even data not null)!", tableID)) return nil } -// SetToIndexNotAvailable set toIndex available is false -// func (tr *Tracker) SetToIndexNotAvailable(db, table string) { - -// if tr.toIndexes == nil || tr.toIndexes[db] == nil || tr.toIndexes[db][table] == nil || !tr.toIndexes[db][table].isAlive { -// return -// } else { -// tr.toIndexes[db][table].isAlive = false -// } -// } - -// TrackToIndex remove schema or table in toIndex. -func (tr *Tracker) TrackToIndex(targetTables []*filter.Table) { - if tr.toIndexes == nil || targetTables == nil { +// ReTrackDownStreamIndex just remove schema or table in downstreamTrack. +func (tr *Tracker) ReTrackDownStreamIndex(targetTables []*filter.Table) { + if tr.downstreamTrack == nil || targetTables == nil { return } for i := 0; i < len(targetTables); i++ { - db := targetTables[i].Schema - table := targetTables[i].Name - if tr.toIndexes[db] == nil { - return - } - if table == "" { - delete(tr.toIndexes, db) - log.L().Info(fmt.Sprintf("Remove downStream schema tracker %s ", db)) - } else { - if tr.toIndexes[db][table] == nil { - return + tableID := utils.GenTableID(targetTables[i]) + if tr.downstreamTrack[tableID] == nil { + // handle just have schema + if targetTables[i].Schema != "" && targetTables[i].Name == "" { + for k := range tr.downstreamTrack { + if strings.HasPrefix(k, tableID+".") { + delete(tr.downstreamTrack, k) + } + } + log.L().Info(fmt.Sprintf("Remove downStream schema tracker %s ", targetTables[i].Schema)) } - delete(tr.toIndexes[db], table) - log.L().Info(fmt.Sprintf("Remove downStream schema tracker %s.%s ", db, table)) + } else { + delete(tr.downstreamTrack, tableID) + log.L().Info(fmt.Sprintf("Remove downStream schema tracker %s ", tableID)) } } } diff --git a/syncer/dml.go b/syncer/dml.go index 336cc180eb..96d5e5f269 100644 --- a/syncer/dml.go +++ b/syncer/dml.go @@ -111,7 +111,7 @@ func (s *Syncer) genUpdateSQLs( originalData = param.originalData columns = param.columns ti = param.originalTableInfo - // defaultIndexColumns = findFitIndex(ti) + // defaultIndexColumns = findFitIndex(ti)d replaceSQL string // `REPLACE INTO` SQL sqls = make([]string, 0, len(data)/2) keys = make([][]string, 0, len(data)/2) @@ -119,7 +119,7 @@ func (s *Syncer) genUpdateSQLs( ) // if downstream pk exits, then use downstream pk - defaultIndexColumns, err := s.schemaTracker.GetToIndexInfo(param.schema, param.table, ti, tctx, s.cfg.Name, s.ddlDBConn.BaseConn) + defaultIndexColumns, err := s.schemaTracker.GetDownStreamIndexInfo(tableID, ti, tctx, s.cfg.Name, s.ddlDBConn.BaseConn) if err != nil { return nil, nil, nil, err } @@ -175,7 +175,7 @@ RowLoop: if defaultIndexColumns == nil { // defaultIndexColumns = getAvailableIndexColumn(ti, oriOldValues) - defaultIndexColumns = s.schemaTracker.GetAvailableUKToIndexInfo(param.schema, param.table, ti, oriOldValues) + defaultIndexColumns = s.schemaTracker.GetAvailableDownStreanUKIndexInfo(tableID, ti, oriOldValues) } ks := genMultipleKeys(ti, oriOldValues, tableID) @@ -238,7 +238,7 @@ func (s *Syncer) genDeleteSQLs(tctx *tcontext.Context, param *genDMLParam, filte ) // if downstream pk exits, then use downstream pk - defaultIndexColumns, err := s.schemaTracker.GetToIndexInfo(param.schema, param.table, ti, tctx, s.cfg.Name, s.ddlDBConn.BaseConn) + defaultIndexColumns, err := s.schemaTracker.GetDownStreamIndexInfo(tableID, ti, tctx, s.cfg.Name, s.ddlDBConn.BaseConn) if err != nil { return nil, nil, nil, err } @@ -264,7 +264,7 @@ RowLoop: if defaultIndexColumns == nil { // defaultIndexColumns = getAvailableIndexColumn(ti, value) - defaultIndexColumns = s.schemaTracker.GetAvailableUKToIndexInfo(param.schema, param.table, ti, value) + defaultIndexColumns = s.schemaTracker.GetAvailableDownStreanUKIndexInfo(tableID, ti, value) } ks := genMultipleKeys(ti, value, tableID) diff --git a/syncer/syncer.go b/syncer/syncer.go index c0ed10f40d..3c19748992 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -2903,7 +2903,7 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. } if shouldTrackToIndex { - s.schemaTracker.TrackToIndex(targetTables) + s.schemaTracker.ReTrackDownStreamIndex(targetTables) } if shouldSchemaExist { From 246c8874710d05f4a630822d901ecd3092528de5 Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Sun, 26 Sep 2021 18:21:48 +0800 Subject: [PATCH 04/14] commit-message: change track downstream tables by use create table stmt --- syncer/syncer.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/syncer/syncer.go b/syncer/syncer.go index 3c19748992..ed66ca2f20 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -2844,7 +2844,7 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. shouldTableExistNum int // tableNames[:shouldTableExistNum] should exist shouldRefTableExistNum int // tableNames[1:shouldTableExistNum] should exist, since first one is "caller table" tryFetchDownstreamTable bool // to make sure if not exists will execute correctly - shouldTrackToIndex bool // track toIndex + shouldReTrackDownstreamIndex bool // retrack downstreamIndex ) switch node := stmt.(type) { @@ -2855,7 +2855,7 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. shouldSchemaExist = true case *ast.DropDatabaseStmt: shouldExecDDLOnSchemaTracker = true - shouldTrackToIndex = true + shouldReTrackDownstreamIndex = true if s.cfg.ShardMode == "" { if err := s.checkpoint.DeleteSchemaPoint(ec.tctx, srcTable.Schema); err != nil { return err @@ -2872,7 +2872,7 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. tryFetchDownstreamTable = true case *ast.DropTableStmt: shouldExecDDLOnSchemaTracker = true - shouldTrackToIndex = true + shouldReTrackDownstreamIndex = true if err := s.checkpoint.DeleteTablePoint(ec.tctx, srcTable); err != nil { return err } @@ -2880,10 +2880,10 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. shouldExecDDLOnSchemaTracker = true shouldSchemaExist = true shouldTableExistNum = 1 - shouldTrackToIndex = true + shouldReTrackDownstreamIndex = true case *ast.AlterTableStmt: shouldSchemaExist = true - shouldTrackToIndex = true + shouldReTrackDownstreamIndex = true // for DDL that adds FK, since TiDB doesn't fully support it yet, we simply ignore execution of this DDL. switch { case len(node.Specs) == 1 && node.Specs[0].Constraint != nil && node.Specs[0].Constraint.Tp == ast.ConstraintForeignKey: @@ -2902,7 +2902,7 @@ func (s *Syncer) trackDDL(usedSchema string, sql string, tableNames [][]*filter. ec.tctx.L().DPanic("unhandled DDL type cannot be tracked", zap.Stringer("type", reflect.TypeOf(stmt))) } - if shouldTrackToIndex { + if shouldReTrackDownstreamIndex { s.schemaTracker.ReTrackDownStreamIndex(targetTables) } From 03762768d56a52cb26ef7c8d97438ef5187aeafc Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Wed, 29 Sep 2021 12:40:38 +0800 Subject: [PATCH 05/14] commit-message: change track downstream scheam info by TableInfo --- pkg/schema/tracker.go | 393 +++++++++++++++++++++++------------------- syncer/dml.go | 38 ++-- 2 files changed, 236 insertions(+), 195 deletions(-) diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index 8b458aa5ed..e352b8d6c0 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -18,10 +18,12 @@ import ( "fmt" "strings" + dterror "github.com/pingcap/dm/pkg/terror" "github.com/pingcap/errors" "github.com/pingcap/parser" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/model" + "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb-tools/pkg/filter" tidbConfig "github.com/pingcap/tidb/config" @@ -32,6 +34,8 @@ import ( "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" "go.uber.org/zap" "github.com/pingcap/dm/pkg/conn" @@ -56,19 +60,24 @@ var ( // Tracker is used to track schema locally. type Tracker struct { - store kv.Storage - dom *domain.Domain - se session.Session - downstreamTrack map[string]*ast.CreateTableStmt // downstream tracker tableid -> createTableStmt + store kv.Storage + dom *domain.Domain + se session.Session + downstreamTracker *DownstreamTracker // downstream tracker tableid -> createTableStmt } // ToIndexes is downstream pk/uk info. -// type ToIndexes struct { -// tableID string -// pks []string // include multiple primary key -// uks []string // uk/uks -// // uksIsNull []bool // uk/uks is null? -// } +type DownstreamTracker struct { + stmtParser *parser.Parser // statement parser + tableInfos map[string]*downstreamTableInfo // downstream table infos +} + +// downstreamTableInfo contains tableinfo and index cache +type downstreamTableInfo struct { + tableInfo *model.TableInfo // tableInfo which comes from parse create statement syntaxtree + indexCache *model.IndexInfo // index cache include pk/uk(not null) + availableUKCache []*model.IndexInfo // index cache include uks(data not null) +} // NewTracker creates a new tracker. `sessionCfg` will be set as tracker's session variables if specified, or retrieve // some variable from downstream TiDB using `tidbConn`. @@ -166,10 +175,29 @@ func NewTracker(ctx context.Context, task string, sessionCfg map[string]string, return nil, err } + // init downstreamTracker + downstreamTracker, err := initDownStreamTracker(sessionCfg["sql_mode"]) + if err != nil { + return nil, err + } + return &Tracker{ - store: store, - dom: dom, - se: se, + store: store, + dom: dom, + se: se, + downstreamTracker: downstreamTracker, + }, nil +} + +// initDownStreamTracker init downstream tracker by sql_mode str which comes from "SHOW VARIABLES like %SQL_MODE". +func initDownStreamTracker(sqlmode string) (*DownstreamTracker, error) { + stmtParser, err := utils.GetParserFromSQLModeStr(sqlmode) + if err != nil { + return nil, err + } + return &DownstreamTracker{ + stmtParser: stmtParser, + tableInfos: make(map[string]*downstreamTableInfo), }, nil } @@ -343,205 +371,222 @@ func (tr *Tracker) GetSystemVar(name string) (string, bool) { // GetDownStreamIndexInfo gets downstream PK/UK(not null) Index. // note. this function will init downstreamTrack's table info. -func (tr *Tracker) GetDownStreamIndexInfo(tableID string, originTi *model.TableInfo, tctx *tcontext.Context, task string, tidbConn *conn.BaseConn) (*model.IndexInfo, error) { - if tr.downstreamTrack == nil { - tr.downstreamTrack = make(map[string]*ast.CreateTableStmt) - } - createTableStmt := tr.downstreamTrack[tableID] - if createTableStmt == nil { - - log.L().Info(fmt.Sprintf("DownStream schema tracker init: %s", tableID)) - - rows, err := tidbConn.QuerySQL(tctx, fmt.Sprintf("SHOW CREATE TABLE %s", tableID)) +func (tr *Tracker) GetDownStreamIndexInfo(tctx *tcontext.Context, tableID string, originTi *model.TableInfo, downstreamConn *conn.BaseConn) (*model.IndexInfo, error) { + dti, ok := tr.downstreamTracker.tableInfos[tableID] + if !ok { + log.L().Info("DownStream schema tracker init. ", zap.String("tableID", tableID)) + ti, err := tr.getTiByCreateStmt(tctx, tableID, downstreamConn) if err != nil { return nil, err } + dti = getDownStreamTi(ti, originTi) + tr.downstreamTracker.tableInfos[tableID] = dti + } + return dti.indexCache, nil +} - var tableName string - var createStr string +// GetAvailableDownStreanUKIndexInfo gets available downstream UK whose data is not null. +// note. this function will not init downstreamTrack. +func (tr *Tracker) GetAvailableDownStreanUKIndexInfo(tableID string, originTi *model.TableInfo, data []interface{}) *model.IndexInfo { + dti, ok := tr.downstreamTracker.tableInfos[tableID] + if !ok || dti.availableUKCache == nil || len(dti.availableUKCache) == 0 { + return nil + } + + // func for check data is not null + fn := func(i int) bool { + return data[i] != nil + } - for rows.Next() { - if err3 := rows.Scan(&tableName, &createStr); err3 != nil { - return nil, err3 + for i, uk := range dti.availableUKCache { + // check uk's column data is not null + if isSpecifiedIndexColumn(uk, fn) { + if i != 0 { + // exchange available uk to the first of the arry to reduce judgements for next row + temp := dti.availableUKCache[0] + dti.availableUKCache[0] = uk + dti.availableUKCache[i] = temp } - // parse create table stmt. - parser := parser.New() + return uk + } + } + return nil +} - stmtNode, err := parser.ParseOneStmt(createStr, "", "") - if err != nil { - return nil, err +// ReTrackDownStreamIndex just remove schema or table in downstreamTrack. +func (tr *Tracker) ReTrackDownStreamIndex(targetTables []*filter.Table) { + if targetTables == nil { + return + } + + for i := 0; i < len(targetTables); i++ { + tableID := utils.GenTableID(targetTables[i]) + if tr.downstreamTracker.tableInfos[tableID] == nil { + // handle just have schema + if targetTables[i].Schema != "" && targetTables[i].Name == "" { + for k := range tr.downstreamTracker.tableInfos { + if strings.HasPrefix(k, tableID+".") { + delete(tr.downstreamTracker.tableInfos, k) + } + } + log.L().Info("Remove downStream schema tracker.", zap.String("schema", targetTables[i].Schema)) } - createTableStmt = stmtNode.(*ast.CreateTableStmt) + } else { + delete(tr.downstreamTracker.tableInfos, tableID) + log.L().Info("Remove downStream schema tracker.", zap.String("tableID", tableID)) } + } +} - if err := rows.Close(); err != nil { - return nil, err +// getTiByCreateStmt get downstream tableInfo by "SHOW CREATE TABLE" stmt. +func (tr *Tracker) getTiByCreateStmt(tctx *tcontext.Context, tableID string, downstreamConn *conn.BaseConn) (*model.TableInfo, error) { + querySQL := fmt.Sprintf("SHOW CREATE TABLE %s", tableID) + rows, err := downstreamConn.QuerySQL(tctx, querySQL) + if err != nil { + return nil, dterror.DBErrorAdapt(err, dterror.ErrDBDriverError) + } + var tableName, createStr string + if rows.Next() { + if err = rows.Scan(&tableName, &createStr); err != nil { + return nil, dterror.DBErrorAdapt(rows.Err(), dterror.ErrDBDriverError) } - if err := rows.Err(); err != nil { - return nil, err + if err = rows.Close(); err != nil { + return nil, dterror.DBErrorAdapt(rows.Err(), dterror.ErrDBDriverError) + } + if err = rows.Err(); err != nil { + return nil, dterror.DBErrorAdapt(rows.Err(), dterror.ErrDBDriverError) } - tr.downstreamTrack[tableID] = createTableStmt } - // get PK/UK from Constraints. - var index *model.IndexInfo - for _, constraint := range createTableStmt.Constraints { - var keys []*ast.IndexPartSpecification - - switch constraint.Tp { - case ast.ConstraintPrimaryKey: // pk. - keys = constraint.Keys - case ast.ConstraintUniq, ast.ConstraintUniqKey, ast.ConstraintUniqIndex: // unique,unique key,unique index. - - if index == nil { - keys = constraint.Keys - } else { - // if index has been found, uk should be jump. - continue + log.L().Info("Show create table info", zap.String("tableID", tableID), zap.String("create string", createStr)) + // parse create table stmt. + stmtNode, err := tr.downstreamTracker.stmtParser.ParseOneStmt(createStr, "", "") + if err != nil { + // maybe sql_mode is not matching,Reacquire a parser + newParser, err := utils.GetParserForConn(tctx.Ctx, downstreamConn.DBConn) + if err != nil { + return nil, dterror.ErrParseSQL.Delegate(err, createStr) + } else { + stmtNode, err = newParser.ParseOneStmt(createStr, "", "") + if err != nil { + return nil, dterror.ErrParseSQL.Delegate(err, createStr) } - default: - continue + tr.downstreamTracker.stmtParser = newParser } + } - if keys != nil { - columns := make([]*model.IndexColumn, 0, len(keys)) - isAllNotNull := true // pk is true, uk should check. - for _, key := range keys { - - // UK should check not null. - if constraint.Tp != ast.ConstraintPrimaryKey { - - for _, column := range createTableStmt.Cols { - if key.Column.Name.String() == column.Name.String() { - hasNotNull := false - for _, option := range column.Options { - if option.Tp == ast.ColumnOptionNotNull { - hasNotNull = true - break - } - } - if !hasNotNull { - isAllNotNull = false - } - break - } - } - } - - if !isAllNotNull { - break - } + ti, err := ddl.MockTableInfo(mock.NewContext(), stmtNode.(*ast.CreateTableStmt), 111) + if err != nil { + return nil, dterror.ErrParseSQL.Delegate(err, createStr) + } + return ti, nil +} - if orginColumn := model.FindColumnInfo(originTi.Columns, key.Column.Name.O); orginColumn != nil { - column := &model.IndexColumn{ - Name: key.Column.Name, - Offset: orginColumn.Offset, - Length: key.Length, - } - columns = append(columns, column) - } - } +// getDownStreamTi constructs downstreamTable index cache by tableinfo +func getDownStreamTi(ti *model.TableInfo, originTi *model.TableInfo) *downstreamTableInfo { + var ( + indexCache *model.IndexInfo + availableUKCache []*model.IndexInfo = make([]*model.IndexInfo, 0, len(ti.Indices)) + hasPk bool = false + ) + + // func for check not null constraint + fn := func(i int) bool { + return mysql.HasNotNullFlag(ti.Columns[i].Flag) + } - if !isAllNotNull { - continue + for _, idx := range ti.Indices { + if idx.Primary { + indexCache = idx + hasPk = true + } else if idx.Unique { + // second check not null unique key + if isSpecifiedIndexColumn(idx, fn) { + indexCache = idx + } else { + availableUKCache = append(availableUKCache, idx) } + } + } - if len(columns) != 0 { - if constraint.Tp == ast.ConstraintPrimaryKey { - index = &model.IndexInfo{ - Table: createTableStmt.Table.Name, - Unique: true, - Primary: true, - State: model.StatePublic, - Tp: model.IndexTypeBtree, - Columns: columns, - } - log.L().Debug(fmt.Sprintf("Find DownStream table %s pk %s", tableID, constraint.Name)) - return index, nil // pk > uk. - } - // uk should continiue to find pk. - index = &model.IndexInfo{ - Table: createTableStmt.Table.Name, - Unique: true, - Primary: false, - State: model.StatePublic, - Tp: model.IndexTypeBtree, - Columns: columns, - } - log.L().Debug(fmt.Sprintf("Find DownStream table %s uk(not null) %s", tableID, constraint.Name)) - } + // handle pk exceptional case. + // e.g. "create table t(a int primary key, b int)". + if !hasPk { + exPk := handlePkExCase(ti) + if exPk != nil { + indexCache = exPk } } - if index == nil { - log.L().Debug(fmt.Sprintf("DownStream table %s has no pk/uk(not null)!", tableID)) + // redirect column offset as originTi + indexCache = redirectIndexKeys(indexCache, originTi) + for i, uk := range availableUKCache { + availableUKCache[i] = redirectIndexKeys(uk, originTi) } - return index, nil + return &downstreamTableInfo{ + tableInfo: ti, + indexCache: indexCache, + availableUKCache: availableUKCache, + } } -// GetAvailableDownStreanUKIndexInfo gets available downstream UK whose data is not null. -// note. this function will not init downstreamTrack. -func (tr *Tracker) GetAvailableDownStreanUKIndexInfo(tableID string, originTi *model.TableInfo, data []interface{}) *model.IndexInfo { - if tr.downstreamTrack == nil || tr.downstreamTrack[tableID] == nil { +// redirectIndexKeys redirect index's columns offset in origin tableinfo +func redirectIndexKeys(index *model.IndexInfo, originTi *model.TableInfo) *model.IndexInfo { + if index == nil || originTi == nil { return nil } - createTableStmt := tr.downstreamTrack[tableID] - for _, constraint := range createTableStmt.Constraints { - - if constraint.Tp == ast.ConstraintUniq || constraint.Tp == ast.ConstraintUniqKey || constraint.Tp == ast.ConstraintUniqIndex { - columns := make([]*model.IndexColumn, 0, len(constraint.Keys)) - for _, key := range constraint.Keys { - if orginColumn := model.FindColumnInfo(originTi.Columns, key.Column.Name.O); orginColumn != nil { - // check data is null. - if columnData := data[orginColumn.Offset]; columnData != nil { - column := &model.IndexColumn{ - Name: key.Column.Name, - Offset: orginColumn.Offset, - Length: key.Length, - } - columns = append(columns, column) - } - } - } - if len(constraint.Keys) == len(columns) { - log.L().Debug(fmt.Sprintf("Find DownStream table %s uk(data not null) %s", tableID, constraint.Name)) - return &model.IndexInfo{ - Table: createTableStmt.Table.Name, - Unique: true, - Primary: false, - State: model.StatePublic, - Tp: model.IndexTypeBtree, - Columns: columns, - } + + columns := make([]*model.IndexColumn, 0, len(index.Columns)) + for _, key := range index.Columns { + if originColumn := model.FindColumnInfo(originTi.Columns, key.Name.O); originColumn != nil { + column := &model.IndexColumn{ + Name: key.Name, + Offset: originColumn.Offset, + Length: key.Length, } + columns = append(columns, column) + } + } + if len(columns) == len(index.Columns) { + return &model.IndexInfo{ + Table: index.Table, + Unique: index.Unique, + Primary: index.Primary, + State: index.State, + Tp: index.Tp, + Columns: columns, } } - log.L().Debug(fmt.Sprintf("DownStream table %s has no pk/uk(even data not null)!", tableID)) return nil } -// ReTrackDownStreamIndex just remove schema or table in downstreamTrack. -func (tr *Tracker) ReTrackDownStreamIndex(targetTables []*filter.Table) { - if tr.downstreamTrack == nil || targetTables == nil { - return +// handlePkExCase is handle pk exceptional case. +// e.g. "create table t(a int primary key, b int)". +func handlePkExCase(ti *model.TableInfo) *model.IndexInfo { + if pk := ti.GetPkColInfo(); pk != nil { + return &model.IndexInfo{ + Table: ti.Name, + Unique: true, + Primary: true, + State: model.StatePublic, + Tp: model.IndexTypeBtree, + Columns: []*model.IndexColumn{{ + Name: pk.Name, + Offset: pk.Offset, + Length: types.UnspecifiedLength, + }}, + } } + return nil +} - for i := 0; i < len(targetTables); i++ { - tableID := utils.GenTableID(targetTables[i]) - if tr.downstreamTrack[tableID] == nil { - // handle just have schema - if targetTables[i].Schema != "" && targetTables[i].Name == "" { - for k := range tr.downstreamTrack { - if strings.HasPrefix(k, tableID+".") { - delete(tr.downstreamTrack, k) - } - } - log.L().Info(fmt.Sprintf("Remove downStream schema tracker %s ", targetTables[i].Schema)) - } - } else { - delete(tr.downstreamTrack, tableID) - log.L().Info(fmt.Sprintf("Remove downStream schema tracker %s ", tableID)) +// isSpecifiedIndexColumn checks all of index's columns are matching 'fn' +func isSpecifiedIndexColumn(index *model.IndexInfo, fn func(i int) bool) bool { + for _, col := range index.Columns { + if !fn(col.Offset) { + return false } } + return true } diff --git a/syncer/dml.go b/syncer/dml.go index 96d5e5f269..ecf44f2740 100644 --- a/syncer/dml.go +++ b/syncer/dml.go @@ -111,15 +111,14 @@ func (s *Syncer) genUpdateSQLs( originalData = param.originalData columns = param.columns ti = param.originalTableInfo - // defaultIndexColumns = findFitIndex(ti)d - replaceSQL string // `REPLACE INTO` SQL - sqls = make([]string, 0, len(data)/2) - keys = make([][]string, 0, len(data)/2) - values = make([][]interface{}, 0, len(data)/2) + replaceSQL string // `REPLACE INTO` SQL + sqls = make([]string, 0, len(data)/2) + keys = make([][]string, 0, len(data)/2) + values = make([][]interface{}, 0, len(data)/2) ) - // if downstream pk exits, then use downstream pk - defaultIndexColumns, err := s.schemaTracker.GetDownStreamIndexInfo(tableID, ti, tctx, s.cfg.Name, s.ddlDBConn.BaseConn) + // if downstream pk/uk(not null) exits, then use downstream pk/uk(not null) + defaultIndexColumns, err := s.schemaTracker.GetDownStreamIndexInfo(tctx, tableID, ti, s.ddlDBConn.BaseConn) if err != nil { return nil, nil, nil, err } @@ -174,7 +173,6 @@ RowLoop: } if defaultIndexColumns == nil { - // defaultIndexColumns = getAvailableIndexColumn(ti, oriOldValues) defaultIndexColumns = s.schemaTracker.GetAvailableDownStreanUKIndexInfo(tableID, ti, oriOldValues) } @@ -231,14 +229,13 @@ func (s *Syncer) genDeleteSQLs(tctx *tcontext.Context, param *genDMLParam, filte tableID = param.tableID dataSeq = param.originalData ti = param.originalTableInfo - // defaultIndexColumns = findFitIndex(ti) - sqls = make([]string, 0, len(dataSeq)) - keys = make([][]string, 0, len(dataSeq)) - values = make([][]interface{}, 0, len(dataSeq)) + sqls = make([]string, 0, len(dataSeq)) + keys = make([][]string, 0, len(dataSeq)) + values = make([][]interface{}, 0, len(dataSeq)) ) - // if downstream pk exits, then use downstream pk - defaultIndexColumns, err := s.schemaTracker.GetDownStreamIndexInfo(tableID, ti, tctx, s.cfg.Name, s.ddlDBConn.BaseConn) + // if downstream pk/uk(not null) exits, then use downstream pk/uk(not null) + defaultIndexColumns, err := s.schemaTracker.GetDownStreamIndexInfo(tctx, tableID, ti, s.ddlDBConn.BaseConn) if err != nil { return nil, nil, nil, err } @@ -263,7 +260,6 @@ RowLoop: } if defaultIndexColumns == nil { - // defaultIndexColumns = getAvailableIndexColumn(ti, value) defaultIndexColumns = s.schemaTracker.GetAvailableDownStreanUKIndexInfo(tableID, ti, value) } ks := genMultipleKeys(ti, value, tableID) @@ -513,13 +509,13 @@ func findFitIndex(ti *model.TableInfo) *model.IndexInfo { return getSpecifiedIndexColumn(ti, fn) } -// func getAvailableIndexColumn(ti *model.TableInfo, data []interface{}) *model.IndexInfo { -// fn := func(i int) bool { -// return data[i] == nil -// } +func getAvailableIndexColumn(ti *model.TableInfo, data []interface{}) *model.IndexInfo { + fn := func(i int) bool { + return data[i] == nil + } -// return getSpecifiedIndexColumn(ti, fn) -// } + return getSpecifiedIndexColumn(ti, fn) +} func getSpecifiedIndexColumn(ti *model.TableInfo, fn func(i int) bool) *model.IndexInfo { for _, indexCols := range ti.Indices { From 823c0d12ec1ed465becbe84b020eec368edc451b Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Wed, 29 Sep 2021 15:21:17 +0800 Subject: [PATCH 06/14] commit-message: use TableInfo and index cache to track downstream schema --- pkg/schema/tracker.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index e352b8d6c0..1449b6f79e 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -66,7 +66,7 @@ type Tracker struct { downstreamTracker *DownstreamTracker // downstream tracker tableid -> createTableStmt } -// ToIndexes is downstream pk/uk info. +// DownstreamTracker tracks downstream schema. type DownstreamTracker struct { stmtParser *parser.Parser // statement parser tableInfos map[string]*downstreamTableInfo // downstream table infos @@ -466,13 +466,15 @@ func (tr *Tracker) getTiByCreateStmt(tctx *tcontext.Context, tableID string, dow newParser, err := utils.GetParserForConn(tctx.Ctx, downstreamConn.DBConn) if err != nil { return nil, dterror.ErrParseSQL.Delegate(err, createStr) - } else { - stmtNode, err = newParser.ParseOneStmt(createStr, "", "") - if err != nil { - return nil, dterror.ErrParseSQL.Delegate(err, createStr) - } - tr.downstreamTracker.stmtParser = newParser } + + stmtNode, err = newParser.ParseOneStmt(createStr, "", "") + if err != nil { + return nil, dterror.ErrParseSQL.Delegate(err, createStr) + } + + tr.downstreamTracker.stmtParser = newParser + } ti, err := ddl.MockTableInfo(mock.NewContext(), stmtNode.(*ast.CreateTableStmt), 111) From 47d10cb52547c113b161cf82e8d9cd0e8c909927 Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Wed, 29 Sep 2021 15:21:17 +0800 Subject: [PATCH 07/14] commit-message: use TableInfo and index cache to track downstream schema --- pkg/schema/tracker.go | 250 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 247 insertions(+), 3 deletions(-) diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index ee96c3de71..4a2117c6be 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -18,7 +18,9 @@ import ( "fmt" "strings" + dterror "github.com/pingcap/dm/pkg/terror" "github.com/pingcap/errors" + "github.com/pingcap/parser" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/model" "github.com/pingcap/parser/terror" @@ -28,14 +30,18 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" "go.uber.org/zap" "github.com/pingcap/dm/pkg/conn" tcontext "github.com/pingcap/dm/pkg/context" "github.com/pingcap/dm/pkg/log" + "github.com/pingcap/dm/pkg/utils" ) const ( @@ -54,9 +60,23 @@ var ( // Tracker is used to track schema locally. type Tracker struct { - store kv.Storage - dom *domain.Domain - se session.Session + store kv.Storage + dom *domain.Domain + se session.Session + downstreamTracker *DownstreamTracker // downstream tracker tableid -> createTableStmt +} + +// DownstreamTracker tracks downstream schema. +type DownstreamTracker struct { + stmtParser *parser.Parser // statement parser + tableInfos map[string]*downstreamTableInfo // downstream table infos +} + +// downstreamTableInfo contains tableinfo and index cache +type downstreamTableInfo struct { + tableInfo *model.TableInfo // tableInfo which comes from parse create statement syntaxtree + indexCache *model.IndexInfo // index cache include pk/uk(not null) + availableUKCache []*model.IndexInfo // index cache include uks(data not null) } // NewTracker creates a new tracker. `sessionCfg` will be set as tracker's session variables if specified, or retrieve @@ -329,3 +349,227 @@ func (tr *Tracker) CreateTableIfNotExists(table *filter.Table, ti *model.TableIn func (tr *Tracker) GetSystemVar(name string) (string, bool) { return tr.se.GetSessionVars().GetSystemVar(name) } + +// GetDownStreamIndexInfo gets downstream PK/UK(not null) Index. +// note. this function will init downstreamTrack's table info. +func (tr *Tracker) GetDownStreamIndexInfo(tctx *tcontext.Context, tableID string, originTi *model.TableInfo, downstreamConn *conn.BaseConn) (*model.IndexInfo, error) { + dti, ok := tr.downstreamTracker.tableInfos[tableID] + if !ok { + log.L().Info("DownStream schema tracker init. ", zap.String("tableID", tableID)) + ti, err := tr.getTiByCreateStmt(tctx, tableID, downstreamConn) + if err != nil { + return nil, err + } + dti = getDownStreamTi(ti, originTi) + tr.downstreamTracker.tableInfos[tableID] = dti + } + return dti.indexCache, nil +} + +// GetAvailableDownStreanUKIndexInfo gets available downstream UK whose data is not null. +// note. this function will not init downstreamTrack. +func (tr *Tracker) GetAvailableDownStreanUKIndexInfo(tableID string, originTi *model.TableInfo, data []interface{}) *model.IndexInfo { + dti, ok := tr.downstreamTracker.tableInfos[tableID] + if !ok || dti.availableUKCache == nil || len(dti.availableUKCache) == 0 { + return nil + } + + // func for check data is not null + fn := func(i int) bool { + return data[i] != nil + } + + for i, uk := range dti.availableUKCache { + // check uk's column data is not null + if isSpecifiedIndexColumn(uk, fn) { + if i != 0 { + // exchange available uk to the first of the arry to reduce judgements for next row + temp := dti.availableUKCache[0] + dti.availableUKCache[0] = uk + dti.availableUKCache[i] = temp + } + return uk + } + } + return nil +} + +// ReTrackDownStreamIndex just remove schema or table in downstreamTrack. +func (tr *Tracker) ReTrackDownStreamIndex(targetTables []*filter.Table) { + if targetTables == nil { + return + } + + for i := 0; i < len(targetTables); i++ { + tableID := utils.GenTableID(targetTables[i]) + if tr.downstreamTracker.tableInfos[tableID] == nil { + // handle just have schema + if targetTables[i].Schema != "" && targetTables[i].Name == "" { + for k := range tr.downstreamTracker.tableInfos { + if strings.HasPrefix(k, tableID+".") { + delete(tr.downstreamTracker.tableInfos, k) + } + } + log.L().Info("Remove downStream schema tracker.", zap.String("schema", targetTables[i].Schema)) + } + } else { + delete(tr.downstreamTracker.tableInfos, tableID) + log.L().Info("Remove downStream schema tracker.", zap.String("tableID", tableID)) + } + } +} + +// getTiByCreateStmt get downstream tableInfo by "SHOW CREATE TABLE" stmt. +func (tr *Tracker) getTiByCreateStmt(tctx *tcontext.Context, tableID string, downstreamConn *conn.BaseConn) (*model.TableInfo, error) { + querySQL := fmt.Sprintf("SHOW CREATE TABLE %s", tableID) + rows, err := downstreamConn.QuerySQL(tctx, querySQL) + if err != nil { + return nil, dterror.DBErrorAdapt(err, dterror.ErrDBDriverError) + } + var tableName, createStr string + if rows.Next() { + if err = rows.Scan(&tableName, &createStr); err != nil { + return nil, dterror.DBErrorAdapt(rows.Err(), dterror.ErrDBDriverError) + } + if err = rows.Close(); err != nil { + return nil, dterror.DBErrorAdapt(rows.Err(), dterror.ErrDBDriverError) + } + if err = rows.Err(); err != nil { + return nil, dterror.DBErrorAdapt(rows.Err(), dterror.ErrDBDriverError) + } + } + + log.L().Info("Show create table info", zap.String("tableID", tableID), zap.String("create string", createStr)) + // parse create table stmt. + stmtNode, err := tr.downstreamTracker.stmtParser.ParseOneStmt(createStr, "", "") + if err != nil { + // maybe sql_mode is not matching,Reacquire a parser + newParser, err := utils.GetParserForConn(tctx.Ctx, downstreamConn.DBConn) + if err != nil { + return nil, dterror.ErrParseSQL.Delegate(err, createStr) + } + + stmtNode, err = newParser.ParseOneStmt(createStr, "", "") + if err != nil { + return nil, dterror.ErrParseSQL.Delegate(err, createStr) + } + + tr.downstreamTracker.stmtParser = newParser + + } + + ti, err := ddl.MockTableInfo(mock.NewContext(), stmtNode.(*ast.CreateTableStmt), 111) + if err != nil { + return nil, dterror.ErrParseSQL.Delegate(err, createStr) + } + return ti, nil +} + +// getDownStreamTi constructs downstreamTable index cache by tableinfo +func getDownStreamTi(ti *model.TableInfo, originTi *model.TableInfo) *downstreamTableInfo { + var ( + indexCache *model.IndexInfo + availableUKCache []*model.IndexInfo = make([]*model.IndexInfo, 0, len(ti.Indices)) + hasPk bool = false + ) + + // func for check not null constraint + fn := func(i int) bool { + return mysql.HasNotNullFlag(ti.Columns[i].Flag) + } + + for _, idx := range ti.Indices { + if idx.Primary { + indexCache = idx + hasPk = true + } else if idx.Unique { + // second check not null unique key + if isSpecifiedIndexColumn(idx, fn) { + indexCache = idx + } else { + availableUKCache = append(availableUKCache, idx) + } + } + } + + // handle pk exceptional case. + // e.g. "create table t(a int primary key, b int)". + if !hasPk { + exPk := handlePkExCase(ti) + if exPk != nil { + indexCache = exPk + } + } + + // redirect column offset as originTi + indexCache = redirectIndexKeys(indexCache, originTi) + for i, uk := range availableUKCache { + availableUKCache[i] = redirectIndexKeys(uk, originTi) + } + + return &downstreamTableInfo{ + tableInfo: ti, + indexCache: indexCache, + availableUKCache: availableUKCache, + } +} + +// redirectIndexKeys redirect index's columns offset in origin tableinfo +func redirectIndexKeys(index *model.IndexInfo, originTi *model.TableInfo) *model.IndexInfo { + if index == nil || originTi == nil { + return nil + } + + columns := make([]*model.IndexColumn, 0, len(index.Columns)) + for _, key := range index.Columns { + if originColumn := model.FindColumnInfo(originTi.Columns, key.Name.O); originColumn != nil { + column := &model.IndexColumn{ + Name: key.Name, + Offset: originColumn.Offset, + Length: key.Length, + } + columns = append(columns, column) + } + } + if len(columns) == len(index.Columns) { + return &model.IndexInfo{ + Table: index.Table, + Unique: index.Unique, + Primary: index.Primary, + State: index.State, + Tp: index.Tp, + Columns: columns, + } + } + return nil +} + +// handlePkExCase is handle pk exceptional case. +// e.g. "create table t(a int primary key, b int)". +func handlePkExCase(ti *model.TableInfo) *model.IndexInfo { + if pk := ti.GetPkColInfo(); pk != nil { + return &model.IndexInfo{ + Table: ti.Name, + Unique: true, + Primary: true, + State: model.StatePublic, + Tp: model.IndexTypeBtree, + Columns: []*model.IndexColumn{{ + Name: pk.Name, + Offset: pk.Offset, + Length: types.UnspecifiedLength, + }}, + } + } + return nil +} + +// isSpecifiedIndexColumn checks all of index's columns are matching 'fn' +func isSpecifiedIndexColumn(index *model.IndexInfo, fn func(i int) bool) bool { + for _, col := range index.Columns { + if !fn(col.Offset) { + return false + } + } + return true +} From 982e5c3d3c43d513b91bcafe823feab1899bf2c3 Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Wed, 13 Oct 2021 14:11:12 +0800 Subject: [PATCH 08/14] commit-message: fix fmt and retest ut --- pkg/schema/tracker.go | 45 +++++++++++++++++--------------------- pkg/schema/tracker_test.go | 26 +++++++++++----------- syncer/dml.go | 8 ------- syncer/syncer_test.go | 2 -- 4 files changed, 33 insertions(+), 48 deletions(-) diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index 185774266f..95fd6ffb8c 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -19,11 +19,6 @@ import ( "strings" "sync" - "github.com/pingcap/dm/pkg/conn" - tcontext "github.com/pingcap/dm/pkg/context" - "github.com/pingcap/dm/pkg/log" - dterror "github.com/pingcap/dm/pkg/terror" - "github.com/pingcap/dm/pkg/utils" "github.com/pingcap/errors" "github.com/pingcap/parser" "github.com/pingcap/parser/ast" @@ -42,12 +37,18 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/mock" "go.uber.org/zap" + + "github.com/pingcap/dm/pkg/conn" + tcontext "github.com/pingcap/dm/pkg/context" + "github.com/pingcap/dm/pkg/log" + dterror "github.com/pingcap/dm/pkg/terror" + "github.com/pingcap/dm/pkg/utils" ) const ( // TiDBClusteredIndex is the variable name for clustered index. TiDBClusteredIndex = "tidb_enable_clustered_index" - //downstream mock table id, consists of serial numbers of letters. + // downstream mock table id, consists of serial numbers of letters. mockTableID = 121402101900011104 ) @@ -74,7 +75,7 @@ type DownstreamTracker struct { tableInfos sync.Map // downstream table infos } -// downstreamTableInfo contains tableinfo and index cache +// downstreamTableInfo contains tableinfo and index cache. type downstreamTableInfo struct { tableInfo *model.TableInfo // tableInfo which comes from parse create statement syntaxtree indexCache *model.IndexInfo // index cache include pk/uk(not null) @@ -192,13 +193,13 @@ func NewTracker(ctx context.Context, task string, sessionCfg map[string]string, } // initDownStreamTracker init downstream tracker by sql_mode str which comes from "SHOW VARIABLES like %SQL_MODE". -func initDownStreamTracker(ctx context.Context, tidbConn *conn.BaseConn, sqlmode string) (*DownstreamTracker, error) { +func initDownStreamTracker(ctx context.Context, downStreamConn *conn.BaseConn, sqlmode string) (*DownstreamTracker, error) { var stmtParser *parser.Parser var err error if sqlmode != "" { stmtParser, err = utils.GetParserFromSQLModeStr(sqlmode) } else { - stmtParser, err = utils.GetParserForConn(ctx, tidbConn.DBConn) + stmtParser, err = utils.GetParserForConn(ctx, downStreamConn.DBConn) } if err != nil { return nil, err @@ -455,17 +456,12 @@ func (tr *Tracker) getTiByCreateStmt(tctx *tcontext.Context, tableID string, dow if err != nil { return nil, dterror.DBErrorAdapt(err, dterror.ErrDBDriverError) } + defer rows.Close() var tableName, createStr string if rows.Next() { if err = rows.Scan(&tableName, &createStr); err != nil { return nil, dterror.DBErrorAdapt(rows.Err(), dterror.ErrDBDriverError) } - if err = rows.Close(); err != nil { - return nil, dterror.DBErrorAdapt(rows.Err(), dterror.ErrDBDriverError) - } - if err = rows.Err(); err != nil { - return nil, dterror.DBErrorAdapt(rows.Err(), dterror.ErrDBDriverError) - } } log.L().Info("Show create table info", zap.String("tableID", tableID), zap.String("create string", createStr)) @@ -473,18 +469,17 @@ func (tr *Tracker) getTiByCreateStmt(tctx *tcontext.Context, tableID string, dow stmtNode, err := tr.downstreamTracker.stmtParser.ParseOneStmt(createStr, "", "") if err != nil { // maybe sql_mode is not matching,Reacquire a parser - newParser, err := utils.GetParserForConn(tctx.Ctx, downstreamConn.DBConn) - if err != nil { + newParser, err1 := utils.GetParserForConn(tctx.Ctx, downstreamConn.DBConn) + if err1 != nil { return nil, dterror.ErrParseSQL.Delegate(err, createStr) } - stmtNode, err = newParser.ParseOneStmt(createStr, "", "") - if err != nil { + stmtNode, err1 = newParser.ParseOneStmt(createStr, "", "") + if err1 != nil { return nil, dterror.ErrParseSQL.Delegate(err, createStr) } tr.downstreamTracker.stmtParser = newParser - } ti, err := ddl.MockTableInfo(mock.NewContext(), stmtNode.(*ast.CreateTableStmt), mockTableID) @@ -494,12 +489,12 @@ func (tr *Tracker) getTiByCreateStmt(tctx *tcontext.Context, tableID string, dow return ti, nil } -// getDownStreamTi constructs downstreamTable index cache by tableinfo +// getDownStreamTi constructs downstreamTable index cache by tableinfo. func getDownStreamTi(ti *model.TableInfo, originTi *model.TableInfo) *downstreamTableInfo { var ( indexCache *model.IndexInfo - availableUKCache []*model.IndexInfo = make([]*model.IndexInfo, 0, len(ti.Indices)) - hasPk bool = false + availableUKCache = make([]*model.IndexInfo, 0, len(ti.Indices)) + hasPk = false ) // func for check not null constraint @@ -543,7 +538,7 @@ func getDownStreamTi(ti *model.TableInfo, originTi *model.TableInfo) *downstream } } -// redirectIndexKeys redirect index's columns offset in origin tableinfo +// redirectIndexKeys redirect index's columns offset in origin tableinfo. func redirectIndexKeys(index *model.IndexInfo, originTi *model.TableInfo) *model.IndexInfo { if index == nil || originTi == nil { return nil @@ -593,7 +588,7 @@ func handlePkExCase(ti *model.TableInfo) *model.IndexInfo { return nil } -// isSpecifiedIndexColumn checks all of index's columns are matching 'fn' +// isSpecifiedIndexColumn checks all of index's columns are matching 'fn'. func isSpecifiedIndexColumn(index *model.IndexInfo, fn func(i int) bool) bool { for _, col := range index.Columns { if !fn(col.Offset) { diff --git a/pkg/schema/tracker_test.go b/pkg/schema/tracker_test.go index 35d6c2ece7..57028f5ffe 100644 --- a/pkg/schema/tracker_test.go +++ b/pkg/schema/tracker_test.go @@ -30,11 +30,12 @@ import ( "github.com/pingcap/parser/model" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/ddl" + timock "github.com/pingcap/tidb/util/mock" "go.uber.org/zap/zapcore" - "github.com/pingcap/dm/pkg/conn" tcontext "github.com/pingcap/dm/pkg/context" - timock "github.com/pingcap/tidb/util/mock" + + "github.com/pingcap/dm/pkg/conn" ) func Test(t *testing.T) { @@ -100,7 +101,7 @@ func (s *trackerSuite) TestTiDBAndSessionCfg(c *C) { // empty or default config in downstream mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( sqlmock.NewRows([]string{"Variable_name", "Value"}). - AddRow("sql_mode", "")) + AddRow("sql_mode", defaultTestSessionCfg["sql_mode"])) tracker, err := NewTracker(context.Background(), "test-tracker", nil, baseConn) c.Assert(err, IsNil) c.Assert(mock.ExpectationsWereMet(), IsNil) @@ -518,12 +519,11 @@ func (s *trackerSuite) TestNotSupportedVariable(c *C) { mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( sqlmock.NewRows([]string{"Variable_name", "Value"}). - AddRow("sql_mode", "")) + AddRow("sql_mode", defaultTestSessionCfg["sql_mode"])) oldSessionVar := map[string]string{ "tidb_enable_change_column_type": "ON", } - _, err = NewTracker(context.Background(), "test-tracker", oldSessionVar, baseConn) c.Assert(err, IsNil) } @@ -577,7 +577,7 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { tableID := "`test`.`test`" - //downstream has no pk/uk + // downstream has no pk/uk mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int, b int, c varchar(10))")) @@ -588,7 +588,7 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { c.Assert(indexinfo, IsNil) tracker.downstreamTracker.tableInfos.Delete(tableID) - //downstream has pk(not constraints like "create table t(a int primary key,b int not null)" + // downstream has pk(not constraints like "create table t(a int primary key,b int not null)" mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int, b int, c varchar(10), PRIMARY KEY (c))")) @@ -599,7 +599,7 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { c.Assert(indexinfo, NotNil) tracker.downstreamTracker.tableInfos.Delete(tableID) - //downstream has composite pks + // downstream has composite pks mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int, b int, c varchar(10), PRIMARY KEY (a,b))")) @@ -610,7 +610,7 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { c.Assert(len(indexinfo.Columns) == 2, IsTrue) tracker.downstreamTracker.tableInfos.Delete(tableID) - //downstream has uk(not null) + // downstream has uk(not null) mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int unique not null, b int, c varchar(10))")) @@ -621,7 +621,7 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { c.Assert(indexinfo.Columns, NotNil) tracker.downstreamTracker.tableInfos.Delete(tableID) - //downstream has uk(without not null) + // downstream has uk(without not null) mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int unique, b int, c varchar(10))")) @@ -633,7 +633,7 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { c.Assert(dti.(*downstreamTableInfo).availableUKCache, NotNil) tracker.downstreamTracker.tableInfos.Delete(tableID) - //downstream has uks + // downstream has uks mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int unique, b int unique, c varchar(10) unique not null)")) @@ -645,7 +645,7 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { c.Assert(len(dti.(*downstreamTableInfo).availableUKCache) == 2, IsTrue) tracker.downstreamTracker.tableInfos.Delete(tableID) - //downstream has pk and uk, pk has priority + // downstream has pk and uk, pk has priority mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int unique not null , b int, c varchar(10), PRIMARY KEY (c))")) @@ -655,7 +655,7 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { tracker.downstreamTracker.tableInfos.Delete(tableID) } -// TestChangeDownstreamSqlMode is check sql mode change +// TestChangeDownstreamSqlMode is check sql mode change. func (s *trackerSuite) TestChangeDownstreamSqlMode(c *C) { log.SetLevel(zapcore.ErrorLevel) diff --git a/syncer/dml.go b/syncer/dml.go index 630984f5e5..331be28e97 100644 --- a/syncer/dml.go +++ b/syncer/dml.go @@ -509,14 +509,6 @@ func findFitIndex(ti *model.TableInfo) *model.IndexInfo { return getSpecifiedIndexColumn(ti, fn) } -func getAvailableIndexColumn(ti *model.TableInfo, data []interface{}) *model.IndexInfo { - fn := func(i int) bool { - return data[i] == nil - } - - return getSpecifiedIndexColumn(ti, fn) -} - func getSpecifiedIndexColumn(ti *model.TableInfo, fn func(i int) bool) *model.IndexInfo { for _, indexCols := range ti.Indices { if !indexCols.Unique { diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go index 3a3bb351d5..7dc20f83de 100644 --- a/syncer/syncer_test.go +++ b/syncer/syncer_test.go @@ -95,8 +95,6 @@ type testSyncerSuite struct { db *sql.DB cfg *config.SubTaskConfig eventsGenerator *event.Generator - syncer *replication.BinlogSyncer - streamer *replication.BinlogStreamer } type MockStreamer struct { From c77d91e4c0d39d8034e75718f7a61b99ce1a6886 Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Tue, 19 Oct 2021 18:07:53 +0800 Subject: [PATCH 09/14] commit-message: update set downstream tracker sql mode by default value --- _utils/terror_gen/errors_release.txt | 3 + errors.toml | 18 ++++ pkg/schema/tracker.go | 150 ++++++++++++++------------- pkg/schema/tracker_test.go | 150 ++++++++++----------------- pkg/terror/error_list.go | 11 +- syncer/dml.go | 8 +- syncer/syncer.go | 19 ++-- 7 files changed, 182 insertions(+), 177 deletions(-) diff --git a/_utils/terror_gen/errors_release.txt b/_utils/terror_gen/errors_release.txt index c7f9f09cc2..3744e033a9 100644 --- a/_utils/terror_gen/errors_release.txt +++ b/_utils/terror_gen/errors_release.txt @@ -491,6 +491,9 @@ ErrSchemaTrackerInvalidCreateTableStmt,[code=44009:class=schema-tracker:scope=in ErrSchemaTrackerRestoreStmtFail,[code=44010:class=schema-tracker:scope=internal:level=medium], "Message: fail to restore the statement" ErrSchemaTrackerCannotDropTable,[code=44011:class=schema-tracker:scope=internal:level=high], "Message: failed to drop table for %v in schema tracker" ErrSchemaTrackerInit,[code=44012:class=schema-tracker:scope=internal:level=high], "Message: failed to create schema tracker" +ErrSchemaTrackerCannotSetDownstreamSQLMode,[code=44013:class=schema-tracker:scope=internal:level=high], "Message: failed to set default downstream sql_mode %v in schema tracker" +ErrSchemaTrackerCannotInitDownstreamParser,[code=44014:class=schema-tracker:scope=internal:level=high], "Message: failed to init downstream parser by sql_mode %v in schema tracker" +ErrSchemaTrackerCannotMockDownstreamTable,[code=44015:class=schema-tracker:scope=internal:level=high], "Message: failed to mock downstream table by create table statement %v in schema tracker" ErrSchedulerNotStarted,[code=46001:class=scheduler:scope=internal:level=high], "Message: the scheduler has not started" ErrSchedulerStarted,[code=46002:class=scheduler:scope=internal:level=medium], "Message: the scheduler has already started" ErrSchedulerWorkerExist,[code=46003:class=scheduler:scope=internal:level=medium], "Message: dm-worker with name %s already exists" diff --git a/errors.toml b/errors.toml index e4b2b88362..31ac130a6b 100644 --- a/errors.toml +++ b/errors.toml @@ -2956,6 +2956,24 @@ description = "" workaround = "" tags = ["internal", "high"] +[error.DM-schema-tracker-44013] +message = "failed to set default downstream sql_mode %v in schema tracker" +description = "" +workaround = "" +tags = ["internal", "high"] + +[error.DM-schema-tracker-44014] +message = "failed to init downstream parser by sql_mode %v in schema tracker" +description = "" +workaround = "" +tags = ["internal", "high"] + +[error.DM-schema-tracker-44015] +message = "failed to mock downstream table by create table statement %v in schema tracker" +description = "" +workaround = "" +tags = ["internal", "high"] + [error.DM-scheduler-46001] message = "the scheduler has not started" description = "" diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index 95fd6ffb8c..75d5f6382e 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -49,7 +49,8 @@ const ( // TiDBClusteredIndex is the variable name for clustered index. TiDBClusteredIndex = "tidb_enable_clustered_index" // downstream mock table id, consists of serial numbers of letters. - mockTableID = 121402101900011104 + mockTableID = 121402101900011104 + defaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION" ) var ( @@ -63,16 +64,18 @@ var ( // Tracker is used to track schema locally. type Tracker struct { - store kv.Storage - dom *domain.Domain - se session.Session - downstreamTracker *DownstreamTracker // downstream tracker tableid -> createTableStmt + store kv.Storage + dom *domain.Domain + se session.Session + dsTracker *downstreamTracker // downstream tracker tableid -> createTableStmt } -// DownstreamTracker tracks downstream schema. -type DownstreamTracker struct { - stmtParser *parser.Parser // statement parser - tableInfos sync.Map // downstream table infos +// downstreamTracker tracks downstream schema. +type downstreamTracker struct { + trackMutex sync.Mutex // downstream track mutex + downstreamConn *conn.BaseConn // downstream connection + stmtParser *parser.Parser // statement parser + tableInfos sync.Map // downstream table infos } // downstreamTableInfo contains tableinfo and index cache. @@ -83,9 +86,9 @@ type downstreamTableInfo struct { } // NewTracker creates a new tracker. `sessionCfg` will be set as tracker's session variables if specified, or retrieve -// some variable from downstream TiDB using `tidbConn`. +// some variable from downstream using `downstreamConn`. // NOTE **sessionCfg is a reference to caller**. -func NewTracker(ctx context.Context, task string, sessionCfg map[string]string, tidbConn *conn.BaseConn) (*Tracker, error) { +func NewTracker(ctx context.Context, task string, sessionCfg map[string]string, downstreamConn *conn.BaseConn) (*Tracker, error) { // NOTE: tidb uses a **global** config so can't isolate tracker's config from each other. If that isolation is needed, // we might SetGlobalConfig before every call to tracker, or use some patch like https://github.com/bouk/monkey tidbConfig.UpdateGlobal(func(conf *tidbConfig.Config) { @@ -104,7 +107,7 @@ func NewTracker(ctx context.Context, task string, sessionCfg map[string]string, for _, k := range downstreamVars { if _, ok := sessionCfg[k]; !ok { var ignoredColumn interface{} - rows, err2 := tidbConn.QuerySQL(tctx, fmt.Sprintf("SHOW VARIABLES LIKE '%s'", k)) + rows, err2 := downstreamConn.QuerySQL(tctx, fmt.Sprintf("SHOW VARIABLES LIKE '%s'", k)) if err2 != nil { return nil, err2 } @@ -179,33 +182,15 @@ func NewTracker(ctx context.Context, task string, sessionCfg map[string]string, } // init downstreamTracker - downstreamTracker, err := initDownStreamTracker(ctx, tidbConn, sessionCfg["sql_mode"]) - if err != nil { - return nil, err + dsTracker := &downstreamTracker{ + downstreamConn: downstreamConn, } return &Tracker{ - store: store, - dom: dom, - se: se, - downstreamTracker: downstreamTracker, - }, nil -} - -// initDownStreamTracker init downstream tracker by sql_mode str which comes from "SHOW VARIABLES like %SQL_MODE". -func initDownStreamTracker(ctx context.Context, downStreamConn *conn.BaseConn, sqlmode string) (*DownstreamTracker, error) { - var stmtParser *parser.Parser - var err error - if sqlmode != "" { - stmtParser, err = utils.GetParserFromSQLModeStr(sqlmode) - } else { - stmtParser, err = utils.GetParserForConn(ctx, downStreamConn.DBConn) - } - if err != nil { - return nil, err - } - return &DownstreamTracker{ - stmtParser: stmtParser, + store: store, + dom: dom, + se: se, + dsTracker: dsTracker, }, nil } @@ -379,29 +364,39 @@ func (tr *Tracker) GetSystemVar(name string) (string, bool) { // GetDownStreamIndexInfo gets downstream PK/UK(not null) Index. // note. this function will init downstreamTrack's table info. -func (tr *Tracker) GetDownStreamIndexInfo(tctx *tcontext.Context, tableID string, originTi *model.TableInfo, downstreamConn *conn.BaseConn) (*model.IndexInfo, error) { - dti, ok := tr.downstreamTracker.tableInfos.Load(tableID) +func (tr *Tracker) GetDownStreamIndexInfo(tctx *tcontext.Context, tableID string, originTi *model.TableInfo) (*model.IndexInfo, error) { + dti, ok := tr.dsTracker.tableInfos.Load(tableID) if !ok { + tr.dsTracker.trackMutex.Lock() + defer tr.dsTracker.trackMutex.Unlock() + // index info maybe has been inited by other routine + dti, ok = tr.dsTracker.tableInfos.Load(tableID) + if ok { + return dti.(*downstreamTableInfo).indexCache, nil + } + log.L().Info("DownStream schema tracker init. ", zap.String("tableID", tableID)) - ti, err := tr.getTiByCreateStmt(tctx, tableID, downstreamConn) + ti, err := tr.getTIByCreateStmt(tctx, tableID, originTi.Name.O) if err != nil { return nil, err } + dti = getDownStreamTi(ti, originTi) - tr.downstreamTracker.tableInfos.Store(tableID, dti) + tr.dsTracker.tableInfos.Store(tableID, dti) } return dti.(*downstreamTableInfo).indexCache, nil } -// GetAvailableDownStreanUKIndexInfo gets available downstream UK whose data is not null. +// GetAvailableDownStreamUKIndexInfo gets available downstream UK whose data is not null. // note. this function will not init downstreamTrack. -func (tr *Tracker) GetAvailableDownStreanUKIndexInfo(tableID string, originTi *model.TableInfo, data []interface{}) *model.IndexInfo { - dtii, ok := tr.downstreamTracker.tableInfos.Load(tableID) - dti := dtii.(*downstreamTableInfo) - if !ok || dti.availableUKCache == nil || len(dti.availableUKCache) == 0 { +func (tr *Tracker) GetAvailableDownStreamUKIndexInfo(tableID string, originTi *model.TableInfo, data []interface{}) *model.IndexInfo { + dtii, ok := tr.dsTracker.tableInfos.Load(tableID) + + if !ok || dtii.(*downstreamTableInfo).availableUKCache == nil || len(dtii.(*downstreamTableInfo).availableUKCache) == 0 { return nil } + dti := dtii.(*downstreamTableInfo) // func for check data is not null fn := func(i int) bool { return data[i] != nil @@ -411,7 +406,7 @@ func (tr *Tracker) GetAvailableDownStreanUKIndexInfo(tableID string, originTi *m // check uk's column data is not null if isSpecifiedIndexColumn(uk, fn) { if i != 0 { - // exchange available uk to the first of the arry to reduce judgements for next row + // exchange available uk to the first of the array to reduce judgements for next row temp := dti.availableUKCache[0] dti.availableUKCache[0] = uk dti.availableUKCache[i] = temp @@ -424,37 +419,43 @@ func (tr *Tracker) GetAvailableDownStreanUKIndexInfo(tableID string, originTi *m // ReTrackDownStreamIndex just remove schema or table in downstreamTrack. func (tr *Tracker) ReTrackDownStreamIndex(targetTables []*filter.Table) { - if targetTables == nil { + if len(targetTables) == 0 { return } for i := 0; i < len(targetTables); i++ { tableID := utils.GenTableID(targetTables[i]) - _, ok := tr.downstreamTracker.tableInfos.Load(tableID) + _, ok := tr.dsTracker.tableInfos.LoadAndDelete(tableID) if !ok { // handle just have schema if targetTables[i].Schema != "" && targetTables[i].Name == "" { - tr.downstreamTracker.tableInfos.Range(func(k, v interface{}) bool { + tr.dsTracker.tableInfos.Range(func(k, v interface{}) bool { if strings.HasPrefix(k.(string), tableID+".") { - tr.downstreamTracker.tableInfos.Delete(k) + tr.dsTracker.tableInfos.Delete(k) } return true }) - log.L().Info("Remove downStream schema tracker.", zap.String("schema", targetTables[i].Schema)) + log.L().Info("Remove downstream schema tracker", zap.String("schema", targetTables[i].Schema)) } } else { - tr.downstreamTracker.tableInfos.Delete(tableID) - log.L().Info("Remove downStream schema tracker.", zap.String("tableID", tableID)) + log.L().Info("Remove downstream schema tracker", zap.String("tableID", tableID)) } } } -// getTiByCreateStmt get downstream tableInfo by "SHOW CREATE TABLE" stmt. -func (tr *Tracker) getTiByCreateStmt(tctx *tcontext.Context, tableID string, downstreamConn *conn.BaseConn) (*model.TableInfo, error) { +// getTIByCreateStmt get downstream tableInfo by "SHOW CREATE TABLE" stmt. +func (tr *Tracker) getTIByCreateStmt(tctx *tcontext.Context, tableID string, originTableName string) (*model.TableInfo, error) { + if tr.dsTracker.stmtParser == nil { + err := tr.initDownStreamSQLModeAndParser(tctx) + if err != nil { + return nil, err + } + } + querySQL := fmt.Sprintf("SHOW CREATE TABLE %s", tableID) - rows, err := downstreamConn.QuerySQL(tctx, querySQL) + rows, err := tr.dsTracker.downstreamConn.QuerySQL(tctx, querySQL) if err != nil { - return nil, dterror.DBErrorAdapt(err, dterror.ErrDBDriverError) + return nil, dterror.ErrSchemaTrackerCannotFetchDownstreamTable.Delegate(err, tableID, originTableName) } defer rows.Close() var tableName, createStr string @@ -466,30 +467,35 @@ func (tr *Tracker) getTiByCreateStmt(tctx *tcontext.Context, tableID string, dow log.L().Info("Show create table info", zap.String("tableID", tableID), zap.String("create string", createStr)) // parse create table stmt. - stmtNode, err := tr.downstreamTracker.stmtParser.ParseOneStmt(createStr, "", "") + stmtNode, err := tr.dsTracker.stmtParser.ParseOneStmt(createStr, "", "") if err != nil { - // maybe sql_mode is not matching,Reacquire a parser - newParser, err1 := utils.GetParserForConn(tctx.Ctx, downstreamConn.DBConn) - if err1 != nil { - return nil, dterror.ErrParseSQL.Delegate(err, createStr) - } - - stmtNode, err1 = newParser.ParseOneStmt(createStr, "", "") - if err1 != nil { - return nil, dterror.ErrParseSQL.Delegate(err, createStr) - } - - tr.downstreamTracker.stmtParser = newParser + return nil, dterror.ErrSchemaTrackerInvalidCreateTableStmt.Delegate(err, createStr) } ti, err := ddl.MockTableInfo(mock.NewContext(), stmtNode.(*ast.CreateTableStmt), mockTableID) if err != nil { - return nil, dterror.ErrParseSQL.Delegate(err, createStr) + return nil, dterror.ErrSchemaTrackerCannotMockDownstreamTable.Delegate(err, createStr) } return ti, nil } -// getDownStreamTi constructs downstreamTable index cache by tableinfo. +// initDownStreamTrackerParser init downstream tracker parser by default sql_mode. +func (tr *Tracker) initDownStreamSQLModeAndParser(tctx *tcontext.Context) error { + setSQLMode := fmt.Sprintf("SET SESSION SQL_MODE = '%s'", defaultSQLMode) + _, err := tr.dsTracker.downstreamConn.DBConn.ExecContext(tctx.Ctx, setSQLMode) + if err != nil { + return dterror.ErrSchemaTrackerCannotSetDownstreamSQLMode.Delegate(err, defaultSQLMode) + } + + stmtParser, err := utils.GetParserFromSQLModeStr(defaultSQLMode) + if err != nil { + return dterror.ErrSchemaTrackerCannotInitDownstreamParser.Delegate(err, defaultSQLMode) + } + tr.dsTracker.stmtParser = stmtParser + return nil +} + +// getDownStreamTi constructs downstreamTable index cache by tableinfo. func getDownStreamTi(ti *model.TableInfo, originTi *model.TableInfo) *downstreamTableInfo { var ( indexCache *model.IndexInfo diff --git a/pkg/schema/tracker_test.go b/pkg/schema/tracker_test.go index 57028f5ffe..ba03d3d4c6 100644 --- a/pkg/schema/tracker_test.go +++ b/pkg/schema/tracker_test.go @@ -33,6 +33,8 @@ import ( timock "github.com/pingcap/tidb/util/mock" "go.uber.org/zap/zapcore" + dlog "github.com/pingcap/dm/pkg/log" + tcontext "github.com/pingcap/dm/pkg/context" "github.com/pingcap/dm/pkg/conn" @@ -528,30 +530,25 @@ func (s *trackerSuite) TestNotSupportedVariable(c *C) { c.Assert(err, IsNil) } -func (s *trackerSuite) TestInitDownStreamTracker(c *C) { +func (s *trackerSuite) TestInitDownStreamSQLModeAndParser(c *C) { log.SetLevel(zapcore.ErrorLevel) - var downstreamTracker *DownstreamTracker - var err error - - // sql_mode has been defined - downstreamTracker, err = initDownStreamTracker(context.Background(), nil, defaultTestSessionCfg["sql_mode"]) - c.Assert(err, IsNil) - c.Assert(downstreamTracker.stmtParser, NotNil) - - // sql_mode has not been defined + // tracker and sqlmock db, mock, err := sqlmock.New() c.Assert(err, IsNil) defer db.Close() con, err := db.Conn(context.Background()) c.Assert(err, IsNil) baseConn := conn.NewBaseConn(con, nil) - mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( - sqlmock.NewRows([]string{"Variable_name", "Value"}). - AddRow("sql_mode", defaultTestSessionCfg["sql_mode"])) - downstreamTracker, err = initDownStreamTracker(context.Background(), baseConn, "") + tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) c.Assert(err, IsNil) - c.Assert(downstreamTracker.stmtParser, NotNil) + + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", defaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + tctx := tcontext.NewContext(context.Background(), dlog.L()) + + err = tracker.initDownStreamSQLModeAndParser(tctx) + c.Assert(err, IsNil) + c.Assert(tracker.dsTracker.stmtParser, NotNil) } func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { @@ -574,6 +571,7 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { baseConn := conn.NewBaseConn(con, nil) tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) c.Assert(err, IsNil) + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", defaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) tableID := "`test`.`test`" @@ -581,116 +579,78 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int, b int, c varchar(10))")) - indexinfo, err := tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err := tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok := tracker.downstreamTracker.tableInfos.Load(tableID) + _, ok := tracker.dsTracker.tableInfos.Load(tableID) c.Assert(ok, IsTrue) c.Assert(indexinfo, IsNil) - tracker.downstreamTracker.tableInfos.Delete(tableID) + tracker.dsTracker.tableInfos.Delete(tableID) // downstream has pk(not constraints like "create table t(a int primary key,b int not null)" mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int, b int, c varchar(10), PRIMARY KEY (c))")) - indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok = tracker.downstreamTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos.Load(tableID) c.Assert(ok, IsTrue) c.Assert(indexinfo, NotNil) - tracker.downstreamTracker.tableInfos.Delete(tableID) + tracker.dsTracker.tableInfos.Delete(tableID) // downstream has composite pks mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int, b int, c varchar(10), PRIMARY KEY (a,b))")) - indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok = tracker.downstreamTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos.Load(tableID) c.Assert(ok, IsTrue) c.Assert(len(indexinfo.Columns) == 2, IsTrue) - tracker.downstreamTracker.tableInfos.Delete(tableID) + tracker.dsTracker.tableInfos.Delete(tableID) // downstream has uk(not null) mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int unique not null, b int, c varchar(10))")) - indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok = tracker.downstreamTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos.Load(tableID) c.Assert(ok, IsTrue) c.Assert(indexinfo.Columns, NotNil) - tracker.downstreamTracker.tableInfos.Delete(tableID) + tracker.dsTracker.tableInfos.Delete(tableID) // downstream has uk(without not null) mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int unique, b int, c varchar(10))")) - indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - dti, ok := tracker.downstreamTracker.tableInfos.Load(tableID) + dti, ok := tracker.dsTracker.tableInfos.Load(tableID) c.Assert(ok, IsTrue) c.Assert(indexinfo, IsNil) c.Assert(dti.(*downstreamTableInfo).availableUKCache, NotNil) - tracker.downstreamTracker.tableInfos.Delete(tableID) + tracker.dsTracker.tableInfos.Delete(tableID) // downstream has uks mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int unique, b int unique, c varchar(10) unique not null)")) - indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - dti, ok = tracker.downstreamTracker.tableInfos.Load(tableID) + dti, ok = tracker.dsTracker.tableInfos.Load(tableID) c.Assert(ok, IsTrue) c.Assert(indexinfo, NotNil) c.Assert(len(dti.(*downstreamTableInfo).availableUKCache) == 2, IsTrue) - tracker.downstreamTracker.tableInfos.Delete(tableID) + tracker.dsTracker.tableInfos.Delete(tableID) // downstream has pk and uk, pk has priority mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int unique not null , b int, c varchar(10), PRIMARY KEY (c))")) - indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) c.Assert(indexinfo.Primary, IsTrue) - tracker.downstreamTracker.tableInfos.Delete(tableID) -} - -// TestChangeDownstreamSqlMode is check sql mode change. -func (s *trackerSuite) TestChangeDownstreamSqlMode(c *C) { - log.SetLevel(zapcore.ErrorLevel) - - // origin table info - p := parser.New() - se := timock.NewContext() - node, err := p.ParseOneStmt("create table t(a int, b int, c varchar(10))", "utf8mb4", "utf8mb4_bin") - c.Assert(err, IsNil) - oriTi, err := ddl.MockTableInfo(se, node.(*ast.CreateTableStmt), 1) - c.Assert(err, IsNil) - - // tracker and sqlmock - db, mock, err := sqlmock.New() - c.Assert(err, IsNil) - defer db.Close() - con, err := db.Conn(context.Background()) - c.Assert(err, IsNil) - baseConn := conn.NewBaseConn(con, nil) - tmpSessionCfg := map[string]string{"sql_mode": "NO_BACKSLASH_ESCAPES"} - tracker, err := NewTracker(context.Background(), "test-tracker", tmpSessionCfg, baseConn) - c.Assert(err, IsNil) - - tableID := "`test`.`test`" - - // create sql need NO_BACKSLASH_ESCAPES - mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( - sqlmock.NewRows([]string{"Table", "Create Table"}). - AddRow("test", "create table t(a int PRIMARY KEY, b int, c varchar(10) default '\\'')")) - // mock show sqlmode - mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( - sqlmock.NewRows([]string{"Variable_name", "Value"}). - AddRow("sql_mode", defaultTestSessionCfg["sql_mode"])) - indexinfo, err := tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) - c.Assert(err, IsNil) - c.Assert(indexinfo, NotNil) + tracker.dsTracker.tableInfos.Delete(tableID) } func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { @@ -713,6 +673,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { baseConn := conn.NewBaseConn(con, nil) tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) c.Assert(err, IsNil) + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", defaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) tableID := "`test`.`test`" @@ -720,61 +681,61 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int, b int, c varchar(10))")) - indexinfo, err := tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err := tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) c.Assert(indexinfo, IsNil) data := []interface{}{1, 2, 3} - indexinfo = tracker.GetAvailableDownStreanUKIndexInfo(tableID, oriTi, data) + indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) c.Assert(indexinfo, IsNil) - tracker.downstreamTracker.tableInfos.Delete(tableID) + tracker.dsTracker.tableInfos.Delete(tableID) // downstream has uk but data is null mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int unique, b int, c varchar(10))")) - indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) c.Assert(indexinfo, IsNil) data = []interface{}{nil, 2, 3} - indexinfo = tracker.GetAvailableDownStreanUKIndexInfo(tableID, oriTi, data) + indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) c.Assert(indexinfo, IsNil) - tracker.downstreamTracker.tableInfos.Delete(tableID) + tracker.dsTracker.tableInfos.Delete(tableID) // downstream has uk and data is not null mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int unique, b int, c varchar(10))")) - indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) c.Assert(indexinfo, IsNil) data = []interface{}{1, 2, 3} - indexinfo = tracker.GetAvailableDownStreanUKIndexInfo(tableID, oriTi, data) + indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) c.Assert(indexinfo, NotNil) - tracker.downstreamTracker.tableInfos.Delete(tableID) + tracker.dsTracker.tableInfos.Delete(tableID) // downstream has union uk but data has null mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int, b int, c varchar(10), unique key(a, b))")) - indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) c.Assert(indexinfo, IsNil) data = []interface{}{1, nil, 3} - indexinfo = tracker.GetAvailableDownStreanUKIndexInfo(tableID, oriTi, data) + indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) c.Assert(indexinfo, IsNil) - tracker.downstreamTracker.tableInfos.Delete(tableID) + tracker.dsTracker.tableInfos.Delete(tableID) // downstream has union uk but data has null mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int, b int, c varchar(10), unique key(a, b))")) - indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) c.Assert(indexinfo, IsNil) data = []interface{}{1, 2, 3} - indexinfo = tracker.GetAvailableDownStreanUKIndexInfo(tableID, oriTi, data) + indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) c.Assert(indexinfo, NotNil) - tracker.downstreamTracker.tableInfos.Delete(tableID) + tracker.dsTracker.tableInfos.Delete(tableID) } func (s *trackerSuite) TestReTrackDownStreamIndex(c *C) { @@ -797,34 +758,35 @@ func (s *trackerSuite) TestReTrackDownStreamIndex(c *C) { baseConn := conn.NewBaseConn(con, nil) tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) c.Assert(err, IsNil) + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", defaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) tableID := "`test`.`test`" mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int, b int, c varchar(10), PRIMARY KEY (a,b))")) - _, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + _, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok := tracker.downstreamTracker.tableInfos.Load(tableID) + _, ok := tracker.dsTracker.tableInfos.Load(tableID) c.Assert(ok, IsTrue) // just table targetTables := []*filter.Table{{Schema: "test", Name: "a"}, {Schema: "test", Name: "test"}} tracker.ReTrackDownStreamIndex(targetTables) - _, ok = tracker.downstreamTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos.Load(tableID) c.Assert(ok, IsFalse) mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("test", "create table t(a int, b int, c varchar(10), PRIMARY KEY (a,b))")) - _, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi, baseConn) + _, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok = tracker.downstreamTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos.Load(tableID) c.Assert(ok, IsTrue) // just schema targetTables = []*filter.Table{{Schema: "test", Name: "a"}, {Schema: "test", Name: ""}} tracker.ReTrackDownStreamIndex(targetTables) - _, ok = tracker.downstreamTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos.Load(tableID) c.Assert(ok, IsFalse) } diff --git a/pkg/terror/error_list.go b/pkg/terror/error_list.go index 1eb74ca913..4d29241b60 100644 --- a/pkg/terror/error_list.go +++ b/pkg/terror/error_list.go @@ -604,6 +604,9 @@ const ( codeSchemaTrackerRestoreStmtFail codeSchemaTrackerCannotDropTable codeSchemaTrackerInit + codeSchemaTrackerCannotSetDownstreamSQLMode + codeSchemaTrackerCannotInitDownstreamParser + codeSchemaTrackerCannotMockDownstreamTable ) // HA scheduler. @@ -1235,7 +1238,13 @@ var ( "fail to restore the statement", "") ErrSchemaTrackerCannotDropTable = New(codeSchemaTrackerCannotDropTable, ClassSchemaTracker, ScopeInternal, LevelHigh, "failed to drop table for %v in schema tracker", "") - ErrSchemaTrackerInit = New(codeSchemaTrackerInit, ClassSchemaTracker, ScopeInternal, LevelHigh, "failed to create schema tracker", "") + ErrSchemaTrackerInit = New(codeSchemaTrackerInit, ClassSchemaTracker, ScopeInternal, LevelHigh, "failed to create schema tracker", "") + ErrSchemaTrackerCannotSetDownstreamSQLMode = New(codeSchemaTrackerCannotSetDownstreamSQLMode, ClassSchemaTracker, ScopeInternal, LevelHigh, + "failed to set default downstream sql_mode %v in schema tracker", "") + ErrSchemaTrackerCannotInitDownstreamParser = New(codeSchemaTrackerCannotInitDownstreamParser, ClassSchemaTracker, ScopeInternal, LevelHigh, + "failed to init downstream parser by sql_mode %v in schema tracker", "") + ErrSchemaTrackerCannotMockDownstreamTable = New(codeSchemaTrackerCannotMockDownstreamTable, ClassSchemaTracker, ScopeInternal, LevelHigh, + "failed to mock downstream table by create table statement %v in schema tracker", "") // HA scheduler. ErrSchedulerNotStarted = New(codeSchedulerNotStarted, ClassScheduler, ScopeInternal, LevelHigh, "the scheduler has not started", "") diff --git a/syncer/dml.go b/syncer/dml.go index 331be28e97..4394165434 100644 --- a/syncer/dml.go +++ b/syncer/dml.go @@ -118,7 +118,7 @@ func (s *Syncer) genUpdateSQLs( ) // if downstream pk/uk(not null) exits, then use downstream pk/uk(not null) - defaultIndexColumns, err := s.schemaTracker.GetDownStreamIndexInfo(tctx, tableID, ti, s.ddlDBConn.BaseConn) + defaultIndexColumns, err := s.schemaTracker.GetDownStreamIndexInfo(tctx, tableID, ti) if err != nil { return nil, nil, nil, err } @@ -173,7 +173,7 @@ RowLoop: } if defaultIndexColumns == nil { - defaultIndexColumns = s.schemaTracker.GetAvailableDownStreanUKIndexInfo(tableID, ti, oriOldValues) + defaultIndexColumns = s.schemaTracker.GetAvailableDownStreamUKIndexInfo(tableID, ti, oriOldValues) } ks := genMultipleKeys(ti, oriOldValues, tableID) @@ -235,7 +235,7 @@ func (s *Syncer) genDeleteSQLs(tctx *tcontext.Context, param *genDMLParam, filte ) // if downstream pk/uk(not null) exits, then use downstream pk/uk(not null) - defaultIndexColumns, err := s.schemaTracker.GetDownStreamIndexInfo(tctx, tableID, ti, s.ddlDBConn.BaseConn) + defaultIndexColumns, err := s.schemaTracker.GetDownStreamIndexInfo(tctx, tableID, ti) if err != nil { return nil, nil, nil, err } @@ -260,7 +260,7 @@ RowLoop: } if defaultIndexColumns == nil { - defaultIndexColumns = s.schemaTracker.GetAvailableDownStreanUKIndexInfo(tableID, ti, value) + defaultIndexColumns = s.schemaTracker.GetAvailableDownStreamUKIndexInfo(tableID, ti, value) } ks := genMultipleKeys(ti, value, tableID) diff --git a/syncer/syncer.go b/syncer/syncer.go index 7c2b966afb..744f8b6c41 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -139,10 +139,11 @@ type Syncer struct { fromDB *dbconn.UpStreamConn - toDB *conn.BaseDB - toDBConns []*dbconn.DBConn - ddlDB *conn.BaseDB - ddlDBConn *dbconn.DBConn + toDB *conn.BaseDB + toDBConns []*dbconn.DBConn + ddlDB *conn.BaseDB + ddlDBConn *dbconn.DBConn + downstreamTrackConn *dbconn.DBConn jobs []chan *job jobsClosed atomic.Bool @@ -317,7 +318,7 @@ func (s *Syncer) Init(ctx context.Context) (err error) { } rollbackHolder.Add(fr.FuncRollback{Name: "close-DBs", Fn: s.closeDBs}) - s.schemaTracker, err = schema.NewTracker(ctx, s.cfg.Name, s.cfg.To.Session, s.ddlDBConn.BaseConn) + s.schemaTracker, err = schema.NewTracker(ctx, s.cfg.Name, s.cfg.To.Session, s.downstreamTrackConn.BaseConn) if err != nil { return terror.ErrSchemaTrackerInit.Delegate(err) } @@ -576,6 +577,11 @@ func (s *Syncer) resetDBs(tctx *tcontext.Context) error { return terror.WithScope(err, terror.ScopeDownstream) } + err = s.downstreamTrackConn.ResetConn(tctx) + if err != nil { + return terror.WithScope(err, terror.ScopeDownstream) + } + err = s.checkpoint.ResetConn(tctx) if err != nil { return terror.WithScope(err, terror.ScopeDownstream) @@ -3127,13 +3133,14 @@ func (s *Syncer) createDBs(ctx context.Context) error { dbCfg.RawDBCfg = config.DefaultRawDBConfig().SetReadTimeout(maxDDLConnectionTimeout) var ddlDBConns []*dbconn.DBConn - s.ddlDB, ddlDBConns, err = dbconn.CreateConns(s.tctx, s.cfg, dbCfg, 1) + s.ddlDB, ddlDBConns, err = dbconn.CreateConns(s.tctx, s.cfg, dbCfg, 2) if err != nil { dbconn.CloseUpstreamConn(s.tctx, s.fromDB) dbconn.CloseBaseDB(s.tctx, s.toDB) return err } s.ddlDBConn = ddlDBConns[0] + s.downstreamTrackConn = ddlDBConns[1] printServerVersion(s.tctx, s.fromDB.BaseDB, "upstream") printServerVersion(s.tctx, s.toDB, "downstream") From 08eca2a61aab11e611dec023072e6268082edcb8 Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Thu, 21 Oct 2021 14:42:18 +0800 Subject: [PATCH 10/14] commit-message: fix fail or panic in unit-test --- pkg/schema/tracker.go | 57 ++++----- pkg/schema/tracker_test.go | 56 ++++---- syncer/syncer_test.go | 253 +++---------------------------------- 3 files changed, 70 insertions(+), 296 deletions(-) diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index 2bc6b3372c..88adfe256a 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -17,7 +17,6 @@ import ( "context" "fmt" "strings" - "sync" "github.com/pingcap/errors" "github.com/pingcap/parser" @@ -50,7 +49,7 @@ const ( TiDBClusteredIndex = "tidb_enable_clustered_index" // downstream mock table id, consists of serial numbers of letters. mockTableID = 121402101900011104 - defaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION" + DefaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION" ) var ( @@ -72,10 +71,9 @@ type Tracker struct { // downstreamTracker tracks downstream schema. type downstreamTracker struct { - trackMutex sync.Mutex // downstream track mutex - downstreamConn *conn.BaseConn // downstream connection - stmtParser *parser.Parser // statement parser - tableInfos sync.Map // downstream table infos + downstreamConn *conn.BaseConn // downstream connection + stmtParser *parser.Parser // statement parser + tableInfos map[string]*downstreamTableInfo // downstream table infos } // downstreamTableInfo contains tableinfo and index cache. @@ -184,6 +182,7 @@ func NewTracker(ctx context.Context, task string, sessionCfg map[string]string, // init downstreamTracker dsTracker := &downstreamTracker{ downstreamConn: downstreamConn, + tableInfos: make(map[string]*downstreamTableInfo), } return &Tracker{ @@ -365,38 +364,29 @@ func (tr *Tracker) GetSystemVar(name string) (string, bool) { // GetDownStreamIndexInfo gets downstream PK/UK(not null) Index. // note. this function will init downstreamTrack's table info. func (tr *Tracker) GetDownStreamIndexInfo(tctx *tcontext.Context, tableID string, originTi *model.TableInfo) (*model.IndexInfo, error) { - dti, ok := tr.dsTracker.tableInfos.Load(tableID) + dti, ok := tr.dsTracker.tableInfos[tableID] if !ok { - tr.dsTracker.trackMutex.Lock() - defer tr.dsTracker.trackMutex.Unlock() - // index info maybe has been inited by other routine - dti, ok = tr.dsTracker.tableInfos.Load(tableID) - if ok { - return dti.(*downstreamTableInfo).indexCache, nil - } - - log.L().Info("DownStream schema tracker init. ", zap.String("tableID", tableID)) + log.L().Info("Downstream schema tracker init. ", zap.String("tableID", tableID)) ti, err := tr.getTIByCreateStmt(tctx, tableID, originTi.Name.O) if err != nil { + log.L().Error("Init dowstream schema info error. ", zap.String("tableID", tableID), zap.Error(err)) return nil, err } dti = getDownStreamTi(ti, originTi) - tr.dsTracker.tableInfos.Store(tableID, dti) + tr.dsTracker.tableInfos[tableID] = dti } - return dti.(*downstreamTableInfo).indexCache, nil + return dti.indexCache, nil } // GetAvailableDownStreamUKIndexInfo gets available downstream UK whose data is not null. // note. this function will not init downstreamTrack. func (tr *Tracker) GetAvailableDownStreamUKIndexInfo(tableID string, originTi *model.TableInfo, data []interface{}) *model.IndexInfo { - dtii, ok := tr.dsTracker.tableInfos.Load(tableID) + dti, ok := tr.dsTracker.tableInfos[tableID] - if !ok || dtii.(*downstreamTableInfo).availableUKCache == nil || len(dtii.(*downstreamTableInfo).availableUKCache) == 0 { + if !ok || len(dti.availableUKCache) == 0 { return nil } - - dti := dtii.(*downstreamTableInfo) // func for check data is not null fn := func(i int) bool { return data[i] != nil @@ -425,19 +415,19 @@ func (tr *Tracker) ReTrackDownStreamIndex(targetTables []*filter.Table) { for i := 0; i < len(targetTables); i++ { tableID := utils.GenTableID(targetTables[i]) - _, ok := tr.dsTracker.tableInfos.LoadAndDelete(tableID) + _, ok := tr.dsTracker.tableInfos[tableID] if !ok { // handle just have schema if targetTables[i].Schema != "" && targetTables[i].Name == "" { - tr.dsTracker.tableInfos.Range(func(k, v interface{}) bool { - if strings.HasPrefix(k.(string), tableID+".") { - tr.dsTracker.tableInfos.Delete(k) + for k := range tr.dsTracker.tableInfos { + if strings.HasPrefix(k, tableID+".") { + delete(tr.dsTracker.tableInfos, k) + log.L().Info("Remove downstream schema tracker", zap.String("tableID", tableID)) } - return true - }) - log.L().Info("Remove downstream schema tracker", zap.String("schema", targetTables[i].Schema)) + } } } else { + delete(tr.dsTracker.tableInfos, tableID) log.L().Info("Remove downstream schema tracker", zap.String("tableID", tableID)) } } @@ -481,15 +471,14 @@ func (tr *Tracker) getTIByCreateStmt(tctx *tcontext.Context, tableID string, ori // initDownStreamTrackerParser init downstream tracker parser by default sql_mode. func (tr *Tracker) initDownStreamSQLModeAndParser(tctx *tcontext.Context) error { - setSQLMode := fmt.Sprintf("SET SESSION SQL_MODE = '%s'", defaultSQLMode) + setSQLMode := fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode) _, err := tr.dsTracker.downstreamConn.DBConn.ExecContext(tctx.Ctx, setSQLMode) if err != nil { - return dterror.ErrSchemaTrackerCannotSetDownstreamSQLMode.Delegate(err, defaultSQLMode) + return dterror.ErrSchemaTrackerCannotSetDownstreamSQLMode.Delegate(err, DefaultSQLMode) } - - stmtParser, err := utils.GetParserFromSQLModeStr(defaultSQLMode) + stmtParser, err := utils.GetParserFromSQLModeStr(DefaultSQLMode) if err != nil { - return dterror.ErrSchemaTrackerCannotInitDownstreamParser.Delegate(err, defaultSQLMode) + return dterror.ErrSchemaTrackerCannotInitDownstreamParser.Delegate(err, DefaultSQLMode) } tr.dsTracker.stmtParser = stmtParser return nil diff --git a/pkg/schema/tracker_test.go b/pkg/schema/tracker_test.go index ba03d3d4c6..bea32a0456 100644 --- a/pkg/schema/tracker_test.go +++ b/pkg/schema/tracker_test.go @@ -543,7 +543,7 @@ func (s *trackerSuite) TestInitDownStreamSQLModeAndParser(c *C) { tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) c.Assert(err, IsNil) - mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", defaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) tctx := tcontext.NewContext(context.Background(), dlog.L()) err = tracker.initDownStreamSQLModeAndParser(tctx) @@ -571,7 +571,7 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { baseConn := conn.NewBaseConn(con, nil) tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) c.Assert(err, IsNil) - mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", defaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) tableID := "`test`.`test`" @@ -581,10 +581,10 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { AddRow("test", "create table t(a int, b int, c varchar(10))")) indexinfo, err := tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok := tracker.dsTracker.tableInfos.Load(tableID) + _, ok := tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsTrue) c.Assert(indexinfo, IsNil) - tracker.dsTracker.tableInfos.Delete(tableID) + delete(tracker.dsTracker.tableInfos, tableID) // downstream has pk(not constraints like "create table t(a int primary key,b int not null)" mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( @@ -592,10 +592,10 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { AddRow("test", "create table t(a int, b int, c varchar(10), PRIMARY KEY (c))")) indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok = tracker.dsTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsTrue) c.Assert(indexinfo, NotNil) - tracker.dsTracker.tableInfos.Delete(tableID) + delete(tracker.dsTracker.tableInfos, tableID) // downstream has composite pks mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( @@ -603,10 +603,10 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { AddRow("test", "create table t(a int, b int, c varchar(10), PRIMARY KEY (a,b))")) indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok = tracker.dsTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsTrue) c.Assert(len(indexinfo.Columns) == 2, IsTrue) - tracker.dsTracker.tableInfos.Delete(tableID) + delete(tracker.dsTracker.tableInfos, tableID) // downstream has uk(not null) mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( @@ -614,10 +614,10 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { AddRow("test", "create table t(a int unique not null, b int, c varchar(10))")) indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok = tracker.dsTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsTrue) c.Assert(indexinfo.Columns, NotNil) - tracker.dsTracker.tableInfos.Delete(tableID) + delete(tracker.dsTracker.tableInfos, tableID) // downstream has uk(without not null) mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( @@ -625,11 +625,11 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { AddRow("test", "create table t(a int unique, b int, c varchar(10))")) indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - dti, ok := tracker.dsTracker.tableInfos.Load(tableID) + dti, ok := tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsTrue) c.Assert(indexinfo, IsNil) - c.Assert(dti.(*downstreamTableInfo).availableUKCache, NotNil) - tracker.dsTracker.tableInfos.Delete(tableID) + c.Assert(dti.availableUKCache, NotNil) + delete(tracker.dsTracker.tableInfos, tableID) // downstream has uks mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( @@ -637,11 +637,11 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { AddRow("test", "create table t(a int unique, b int unique, c varchar(10) unique not null)")) indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - dti, ok = tracker.dsTracker.tableInfos.Load(tableID) + dti, ok = tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsTrue) c.Assert(indexinfo, NotNil) - c.Assert(len(dti.(*downstreamTableInfo).availableUKCache) == 2, IsTrue) - tracker.dsTracker.tableInfos.Delete(tableID) + c.Assert(len(dti.availableUKCache) == 2, IsTrue) + delete(tracker.dsTracker.tableInfos, tableID) // downstream has pk and uk, pk has priority mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( @@ -650,7 +650,7 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) c.Assert(indexinfo.Primary, IsTrue) - tracker.dsTracker.tableInfos.Delete(tableID) + delete(tracker.dsTracker.tableInfos, tableID) } func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { @@ -673,7 +673,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { baseConn := conn.NewBaseConn(con, nil) tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) c.Assert(err, IsNil) - mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", defaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) tableID := "`test`.`test`" @@ -687,7 +687,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { data := []interface{}{1, 2, 3} indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) c.Assert(indexinfo, IsNil) - tracker.dsTracker.tableInfos.Delete(tableID) + delete(tracker.dsTracker.tableInfos, tableID) // downstream has uk but data is null mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( @@ -699,7 +699,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { data = []interface{}{nil, 2, 3} indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) c.Assert(indexinfo, IsNil) - tracker.dsTracker.tableInfos.Delete(tableID) + delete(tracker.dsTracker.tableInfos, tableID) // downstream has uk and data is not null mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( @@ -711,7 +711,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { data = []interface{}{1, 2, 3} indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) c.Assert(indexinfo, NotNil) - tracker.dsTracker.tableInfos.Delete(tableID) + delete(tracker.dsTracker.tableInfos, tableID) // downstream has union uk but data has null mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( @@ -723,7 +723,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { data = []interface{}{1, nil, 3} indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) c.Assert(indexinfo, IsNil) - tracker.dsTracker.tableInfos.Delete(tableID) + delete(tracker.dsTracker.tableInfos, tableID) // downstream has union uk but data has null mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( @@ -735,7 +735,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { data = []interface{}{1, 2, 3} indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) c.Assert(indexinfo, NotNil) - tracker.dsTracker.tableInfos.Delete(tableID) + delete(tracker.dsTracker.tableInfos, tableID) } func (s *trackerSuite) TestReTrackDownStreamIndex(c *C) { @@ -758,7 +758,7 @@ func (s *trackerSuite) TestReTrackDownStreamIndex(c *C) { baseConn := conn.NewBaseConn(con, nil) tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) c.Assert(err, IsNil) - mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", defaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) tableID := "`test`.`test`" @@ -767,13 +767,13 @@ func (s *trackerSuite) TestReTrackDownStreamIndex(c *C) { AddRow("test", "create table t(a int, b int, c varchar(10), PRIMARY KEY (a,b))")) _, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok := tracker.dsTracker.tableInfos.Load(tableID) + _, ok := tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsTrue) // just table targetTables := []*filter.Table{{Schema: "test", Name: "a"}, {Schema: "test", Name: "test"}} tracker.ReTrackDownStreamIndex(targetTables) - _, ok = tracker.dsTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsFalse) mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( @@ -781,12 +781,12 @@ func (s *trackerSuite) TestReTrackDownStreamIndex(c *C) { AddRow("test", "create table t(a int, b int, c varchar(10), PRIMARY KEY (a,b))")) _, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) c.Assert(err, IsNil) - _, ok = tracker.dsTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsTrue) // just schema targetTables = []*filter.Table{{Schema: "test", Name: "a"}, {Schema: "test", Name: ""}} tracker.ReTrackDownStreamIndex(targetTables) - _, ok = tracker.dsTracker.tableInfos.Load(tableID) + _, ok = tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsFalse) } diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go index 6af3367448..8c7473c2bf 100644 --- a/syncer/syncer_test.go +++ b/syncer/syncer_test.go @@ -27,7 +27,6 @@ import ( sqlmock "github.com/DATA-DOG/go-sqlmock" "github.com/pingcap/failpoint" - "github.com/pingcap/parser/model" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/dm/dm/config" @@ -43,7 +42,6 @@ import ( "github.com/pingcap/dm/pkg/retry" "github.com/pingcap/dm/pkg/schema" streamer2 "github.com/pingcap/dm/pkg/streamer" - "github.com/pingcap/dm/pkg/utils" "github.com/pingcap/dm/syncer/dbconn" "github.com/go-mysql-org/go-mysql/mysql" @@ -91,7 +89,6 @@ const ( ) type testSyncerSuite struct { - db *sql.DB cfg *config.SubTaskConfig eventsGenerator *event.Generator } @@ -711,235 +708,6 @@ func (s *testSyncerSuite) TestColumnMapping(c *C) { } } -func (s *testSyncerSuite) TestGeneratedColumn(c *C) { - // TODO Currently mock eventGenerator don't support generate json,varchar field event, so use real mysql binlog event here - _, err := s.db.Exec("SET GLOBAL binlog_format = 'ROW';") - c.Assert(err, IsNil) - - pos, gset, err := utils.GetMasterStatus(context.Background(), s.db, "mysql") - c.Assert(err, IsNil) - - //nolint:errcheck - defer s.db.Exec("drop database if exists gctest_1") - - s.cfg.BAList = &filter.Rules{ - DoDBs: []string{"~^gctest_.*"}, - } - - createSQLs := []string{ - "create database if not exists gctest_1 DEFAULT CHARSET=utf8mb4", - "create table if not exists gctest_1.t_1(id int, age int, cfg varchar(40), cfg_json json as (cfg) virtual)", - "create table if not exists gctest_1.t_2(id int primary key, age int, cfg varchar(40), cfg_json json as (cfg) virtual)", - "create table if not exists gctest_1.t_3(id int, cfg varchar(40), gen_id int as (cfg->\"$.id\"), unique key gen_id_unique(`gen_id`))", - } - - // if table has json typed generated column but doesn't have primary key or unique key, - // update/delete operation will not be replicated successfully because json field can't - // compared with raw value in where condition. In unit test we only check generated SQL - // and don't check the data replication to downstream. - testCases := []struct { - sqls []string - expected []string - args [][]interface{} - }{ - { - []string{ - "insert into gctest_1.t_1(id, age, cfg) values (1, 18, '{}')", - "insert into gctest_1.t_1(id, age, cfg) values (2, 19, '{\"key\": \"value\"}')", - "insert into gctest_1.t_1(id, age, cfg) values (3, 17, NULL)", - "insert into gctest_1.t_2(id, age, cfg) values (1, 18, '{}')", - "insert into gctest_1.t_2(id, age, cfg) values (2, 19, '{\"key\": \"value\", \"int\": 123}')", - "insert into gctest_1.t_2(id, age, cfg) values (3, 17, NULL)", - "insert into gctest_1.t_3(id, cfg) values (1, '{\"id\": 1}')", - "insert into gctest_1.t_3(id, cfg) values (2, '{\"id\": 2}')", - "insert into gctest_1.t_3(id, cfg) values (3, '{\"id\": 3}')", - }, - []string{ - "INSERT INTO `gctest_1`.`t_1` (`id`,`age`,`cfg`) VALUES (?,?,?)", - "INSERT INTO `gctest_1`.`t_1` (`id`,`age`,`cfg`) VALUES (?,?,?)", - "INSERT INTO `gctest_1`.`t_1` (`id`,`age`,`cfg`) VALUES (?,?,?)", - "INSERT INTO `gctest_1`.`t_2` (`id`,`age`,`cfg`) VALUES (?,?,?)", - "INSERT INTO `gctest_1`.`t_2` (`id`,`age`,`cfg`) VALUES (?,?,?)", - "INSERT INTO `gctest_1`.`t_2` (`id`,`age`,`cfg`) VALUES (?,?,?)", - "INSERT INTO `gctest_1`.`t_3` (`id`,`cfg`) VALUES (?,?)", - "INSERT INTO `gctest_1`.`t_3` (`id`,`cfg`) VALUES (?,?)", - "INSERT INTO `gctest_1`.`t_3` (`id`,`cfg`) VALUES (?,?)", - }, - [][]interface{}{ - {int32(1), int32(18), "{}"}, - {int32(2), int32(19), "{\"key\": \"value\"}"}, - {int32(3), int32(17), nil}, - {int32(1), int32(18), "{}"}, - {int32(2), int32(19), "{\"key\": \"value\", \"int\": 123}"}, - {int32(3), int32(17), nil}, - {int32(1), "{\"id\": 1}"}, - {int32(2), "{\"id\": 2}"}, - {int32(3), "{\"id\": 3}"}, - }, - }, - { - []string{ - "update gctest_1.t_1 set cfg = '{\"a\": 12}', age = 21 where id = 1", - "update gctest_1.t_1 set cfg = '{}' where id = 2 and age = 19", - "update gctest_1.t_1 set age = 20 where cfg is NULL", - "update gctest_1.t_2 set cfg = '{\"a\": 12}', age = 21 where id = 1", - "update gctest_1.t_2 set cfg = '{}' where id = 2 and age = 19", - "update gctest_1.t_2 set age = 20 where cfg is NULL", - "update gctest_1.t_3 set cfg = '{\"id\": 11}' where id = 1", - "update gctest_1.t_3 set cfg = '{\"id\": 12, \"old_id\": 2}' where gen_id = 2", - }, - []string{ - "UPDATE `gctest_1`.`t_1` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? AND `age` = ? AND `cfg` = ? AND `cfg_json` = ? LIMIT 1", - "UPDATE `gctest_1`.`t_1` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? AND `age` = ? AND `cfg` = ? AND `cfg_json` = ? LIMIT 1", - "UPDATE `gctest_1`.`t_1` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? AND `age` = ? AND `cfg` IS ? AND `cfg_json` IS ? LIMIT 1", - "UPDATE `gctest_1`.`t_2` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? LIMIT 1", - "UPDATE `gctest_1`.`t_2` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? LIMIT 1", - "UPDATE `gctest_1`.`t_2` SET `id` = ?, `age` = ?, `cfg` = ? WHERE `id` = ? LIMIT 1", - "UPDATE `gctest_1`.`t_3` SET `id` = ?, `cfg` = ? WHERE `gen_id` = ? LIMIT 1", - "UPDATE `gctest_1`.`t_3` SET `id` = ?, `cfg` = ? WHERE `gen_id` = ? LIMIT 1", - }, - [][]interface{}{ - {int32(1), int32(21), "{\"a\": 12}", int32(1), int32(18), "{}", []uint8("{}")}, - {int32(2), int32(19), "{}", int32(2), int32(19), "{\"key\": \"value\"}", []uint8("{\"key\":\"value\"}")}, - {int32(3), int32(20), nil, int32(3), int32(17), nil, nil}, - {int32(1), int32(21), "{\"a\": 12}", int32(1)}, - {int32(2), int32(19), "{}", int32(2)}, - {int32(3), int32(20), nil, int32(3)}, - {int32(1), "{\"id\": 11}", int32(1)}, - {int32(2), "{\"id\": 12, \"old_id\": 2}", int32(2)}, - }, - }, - { - []string{ - "delete from gctest_1.t_1 where id = 1", - "delete from gctest_1.t_1 where id = 2 and age = 19", - "delete from gctest_1.t_1 where cfg is NULL", - "delete from gctest_1.t_2 where id = 1", - "delete from gctest_1.t_2 where id = 2 and age = 19", - "delete from gctest_1.t_2 where cfg is NULL", - "delete from gctest_1.t_3 where id = 1", - "delete from gctest_1.t_3 where gen_id = 12", - }, - []string{ - "DELETE FROM `gctest_1`.`t_1` WHERE `id` = ? AND `age` = ? AND `cfg` = ? AND `cfg_json` = ? LIMIT 1", - "DELETE FROM `gctest_1`.`t_1` WHERE `id` = ? AND `age` = ? AND `cfg` = ? AND `cfg_json` = ? LIMIT 1", - "DELETE FROM `gctest_1`.`t_1` WHERE `id` = ? AND `age` = ? AND `cfg` IS ? AND `cfg_json` IS ? LIMIT 1", - "DELETE FROM `gctest_1`.`t_2` WHERE `id` = ? LIMIT 1", - "DELETE FROM `gctest_1`.`t_2` WHERE `id` = ? LIMIT 1", - "DELETE FROM `gctest_1`.`t_2` WHERE `id` = ? LIMIT 1", - "DELETE FROM `gctest_1`.`t_3` WHERE `gen_id` = ? LIMIT 1", - "DELETE FROM `gctest_1`.`t_3` WHERE `gen_id` = ? LIMIT 1", - }, - [][]interface{}{ - {int32(1), int32(21), "{\"a\": 12}", []uint8("{\"a\":12}")}, - {int32(2), int32(19), "{}", []uint8("{}")}, - {int32(3), int32(20), nil, nil}, - {int32(1)}, - {int32(2)}, - {int32(3)}, - {int32(11)}, - {int32(12)}, - }, - }, - } - - dropSQLs := []string{ - "drop table gctest_1.t_1", - "drop table gctest_1.t_2", - "drop table gctest_1.t_3", - "drop database gctest_1", - } - - for _, sql := range createSQLs { - _, err = s.db.Exec(sql) - c.Assert(err, IsNil) - } - - syncer := NewSyncer(s.cfg, nil) - // use upstream dbConn as mock downstream - dbConn, err := s.db.Conn(context.Background()) - c.Assert(err, IsNil) - baseDB := conn.NewBaseDB(s.db, func() {}) - syncer.fromDB = &dbconn.UpStreamConn{BaseDB: baseDB} - syncer.ddlDB = baseDB - syncer.ddlDBConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} - syncer.toDBConns = []*dbconn.DBConn{{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})}} - c.Assert(syncer.setSyncCfg(), IsNil) - syncer.schemaTracker, err = schema.NewTracker(context.Background(), s.cfg.Name, defaultTestSessionCfg, syncer.ddlDBConn.BaseConn) - c.Assert(err, IsNil) - syncer.reset() - - syncer.streamerController = NewStreamerController(syncer.syncCfg, true, syncer.fromDB, syncer.binlogType, syncer.cfg.RelayDir, syncer.timezone) - err = syncer.streamerController.Start(tcontext.Background(), binlog.InitLocation(pos, gset)) - c.Assert(err, IsNil) - - for _, testCase := range testCases { - for _, sql := range testCase.sqls { - _, err = s.db.Exec(sql) - c.Assert(err, IsNil, Commentf("sql: %s", sql)) - } - idx := 0 - for { - if idx >= len(testCase.sqls) { - break - } - var e *replication.BinlogEvent - e, err = syncer.streamerController.GetEvent(tcontext.Background()) - c.Assert(err, IsNil) - switch ev := e.Event.(type) { - case *replication.RowsEvent: - table := &filter.Table{ - Schema: string(ev.Table.Schema), - Name: string(ev.Table.Table), - } - var ti *model.TableInfo - ti, err = syncer.getTableInfo(tcontext.Background(), table, table) - c.Assert(err, IsNil) - var ( - sqls []string - args [][]interface{} - ) - - prunedColumns, prunedRows, err2 := pruneGeneratedColumnDML(ti, ev.Rows) - c.Assert(err2, IsNil) - param := &genDMLParam{ - tableID: utils.GenTableID(table), - data: prunedRows, - originalData: ev.Rows, - columns: prunedColumns, - sourceTableInfo: ti, - } - switch e.Header.EventType { - case replication.WRITE_ROWS_EVENTv0, replication.WRITE_ROWS_EVENTv1, replication.WRITE_ROWS_EVENTv2: - sqls, _, args, err = syncer.genInsertSQLs(param, nil) - c.Assert(err, IsNil) - c.Assert(sqls[0], Equals, testCase.expected[idx]) - c.Assert(args[0], DeepEquals, testCase.args[idx]) - case replication.UPDATE_ROWS_EVENTv0, replication.UPDATE_ROWS_EVENTv1, replication.UPDATE_ROWS_EVENTv2: - // test with sql_mode = false only - sqls, _, args, err = syncer.genUpdateSQLs(tcontext.Background(), param, nil, nil) - c.Assert(err, IsNil) - c.Assert(sqls[0], Equals, testCase.expected[idx]) - c.Assert(args[0], DeepEquals, testCase.args[idx]) - case replication.DELETE_ROWS_EVENTv0, replication.DELETE_ROWS_EVENTv1, replication.DELETE_ROWS_EVENTv2: - sqls, _, args, err = syncer.genDeleteSQLs(tcontext.Background(), param, nil) - c.Assert(err, IsNil) - c.Assert(sqls[0], Equals, testCase.expected[idx]) - c.Assert(args[0], DeepEquals, testCase.args[idx]) - } - idx++ - default: - continue - } - } - } - - for _, sql := range dropSQLs { - _, err = s.db.Exec(sql) - c.Assert(err, IsNil) - } -} - func (s *testSyncerSuite) TestcheckpointID(c *C) { cfg, err := s.cfg.Clone() c.Assert(err, IsNil) @@ -1002,7 +770,17 @@ func (s *testSyncerSuite) TestRun(c *C) { {Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})}, } syncer.ddlDBConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} - syncer.schemaTracker, err = schema.NewTracker(context.Background(), s.cfg.Name, defaultTestSessionCfg, syncer.ddlDBConn.BaseConn) + syncer.downstreamTrackConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} + syncer.schemaTracker, err = schema.NewTracker(context.Background(), s.cfg.Name, defaultTestSessionCfg, syncer.downstreamTrackConn.BaseConn) + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", schema.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectQuery("SHOW CREATE TABLE " + "`test_1`.`t_1`").WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("t_1", "create table t_1(id int primary key, name varchar(24))")) + s.mockGetServerUnixTS(mock) + mock.ExpectQuery("SHOW CREATE TABLE " + "`test_1`.`t_2`").WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("t_2", "create table t_2(id int primary key, name varchar(24))")) + syncer.exprFilterGroup = NewExprFilterGroup(nil) c.Assert(err, IsNil) c.Assert(syncer.Type(), Equals, pb.UnitType_Sync) @@ -1014,7 +792,6 @@ func (s *testSyncerSuite) TestRun(c *C) { syncer.setupMockCheckpoint(c, checkPointDBConn, checkPointMock) syncer.reset() - s.mockGetServerUnixTS(mock) events1 := mockBinlogEvents{ mockBinlogEvent{typ: DBCreate, args: []interface{}{"test_1"}}, mockBinlogEvent{typ: TableCreate, args: []interface{}{"test_1", "create table test_1.t_1(id int primary key, name varchar(24))"}}, @@ -1220,6 +997,7 @@ func (s *testSyncerSuite) TestRun(c *C) { } func (s *testSyncerSuite) TestExitSafeModeByConfig(c *C) { + db, mock, err := sqlmock.New() c.Assert(err, IsNil) s.mockGetServerUnixTS(mock) @@ -1249,6 +1027,13 @@ func (s *testSyncerSuite) TestExitSafeModeByConfig(c *C) { {Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})}, } syncer.ddlDBConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} + syncer.downstreamTrackConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", schema.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + + mock.ExpectQuery("SHOW CREATE TABLE " + "`test_1`.`t_1`").WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("t_1", "create table t_1(id int primary key, name varchar(24))")) + syncer.schemaTracker, err = schema.NewTracker(context.Background(), s.cfg.Name, defaultTestSessionCfg, syncer.ddlDBConn.BaseConn) syncer.exprFilterGroup = NewExprFilterGroup(nil) c.Assert(err, IsNil) From 816abf284259e50d01db9fdf5d0879cd0d01042f Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Thu, 21 Oct 2021 14:49:59 +0800 Subject: [PATCH 11/14] commit-message: update fmt and comment for const --- pkg/schema/tracker.go | 3 ++- syncer/syncer_test.go | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index 88adfe256a..657d666a97 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -48,7 +48,8 @@ const ( // TiDBClusteredIndex is the variable name for clustered index. TiDBClusteredIndex = "tidb_enable_clustered_index" // downstream mock table id, consists of serial numbers of letters. - mockTableID = 121402101900011104 + mockTableID = 121402101900011104 + // DefaultSQLMode is downstream schema track and paser SQLMode. DefaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION" ) diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go index 8c7473c2bf..8644fc5bc8 100644 --- a/syncer/syncer_test.go +++ b/syncer/syncer_test.go @@ -997,7 +997,6 @@ func (s *testSyncerSuite) TestRun(c *C) { } func (s *testSyncerSuite) TestExitSafeModeByConfig(c *C) { - db, mock, err := sqlmock.New() c.Assert(err, IsNil) s.mockGetServerUnixTS(mock) From e47faa279bdea24e41a6bf68ba4068979f680311 Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Thu, 21 Oct 2021 16:39:50 +0800 Subject: [PATCH 12/14] commit-message: update downstream conn to dbconn --- pkg/schema/tracker.go | 24 +++++++------- pkg/schema/tracker_test.go | 57 +++++++++++++++++++------------- syncer/expr_filter_group_test.go | 11 ++++-- syncer/syncer.go | 4 +-- syncer/syncer_test.go | 15 +++++---- 5 files changed, 65 insertions(+), 46 deletions(-) diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index 657d666a97..b4330bb49d 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -37,11 +37,11 @@ import ( "github.com/pingcap/tidb/util/mock" "go.uber.org/zap" - "github.com/pingcap/dm/pkg/conn" tcontext "github.com/pingcap/dm/pkg/context" "github.com/pingcap/dm/pkg/log" dterror "github.com/pingcap/dm/pkg/terror" "github.com/pingcap/dm/pkg/utils" + "github.com/pingcap/dm/syncer/dbconn" ) const ( @@ -72,7 +72,7 @@ type Tracker struct { // downstreamTracker tracks downstream schema. type downstreamTracker struct { - downstreamConn *conn.BaseConn // downstream connection + downstreamConn *dbconn.DBConn // downstream connection stmtParser *parser.Parser // statement parser tableInfos map[string]*downstreamTableInfo // downstream table infos } @@ -87,7 +87,7 @@ type downstreamTableInfo struct { // NewTracker creates a new tracker. `sessionCfg` will be set as tracker's session variables if specified, or retrieve // some variable from downstream using `downstreamConn`. // NOTE **sessionCfg is a reference to caller**. -func NewTracker(ctx context.Context, task string, sessionCfg map[string]string, downstreamConn *conn.BaseConn) (*Tracker, error) { +func NewTracker(ctx context.Context, task string, sessionCfg map[string]string, downstreamConn *dbconn.DBConn) (*Tracker, error) { // NOTE: tidb uses a **global** config so can't isolate tracker's config from each other. If that isolation is needed, // we might SetGlobalConfig before every call to tracker, or use some patch like https://github.com/bouk/monkey tidbConfig.UpdateGlobal(func(conf *tidbConfig.Config) { @@ -368,7 +368,7 @@ func (tr *Tracker) GetDownStreamIndexInfo(tctx *tcontext.Context, tableID string dti, ok := tr.dsTracker.tableInfos[tableID] if !ok { log.L().Info("Downstream schema tracker init. ", zap.String("tableID", tableID)) - ti, err := tr.getTIByCreateStmt(tctx, tableID, originTi.Name.O) + ti, err := tr.getTableInfoByCreateStmt(tctx, tableID, originTi.Name.O) if err != nil { log.L().Error("Init dowstream schema info error. ", zap.String("tableID", tableID), zap.Error(err)) return nil, err @@ -408,18 +408,18 @@ func (tr *Tracker) GetAvailableDownStreamUKIndexInfo(tableID string, originTi *m return nil } -// ReTrackDownStreamIndex just remove schema or table in downstreamTrack. -func (tr *Tracker) ReTrackDownStreamIndex(targetTables []*filter.Table) { +// RemoveDownstreamSchema just remove schema or table in downstreamTrack. +func (tr *Tracker) RemoveDownstreamSchema(targetTables []*filter.Table) { if len(targetTables) == 0 { return } - for i := 0; i < len(targetTables); i++ { - tableID := utils.GenTableID(targetTables[i]) + for _, targetTable := range targetTables { + tableID := utils.GenTableID(targetTable) _, ok := tr.dsTracker.tableInfos[tableID] if !ok { // handle just have schema - if targetTables[i].Schema != "" && targetTables[i].Name == "" { + if targetTable.Schema != "" && targetTable.Name == "" { for k := range tr.dsTracker.tableInfos { if strings.HasPrefix(k, tableID+".") { delete(tr.dsTracker.tableInfos, k) @@ -434,8 +434,8 @@ func (tr *Tracker) ReTrackDownStreamIndex(targetTables []*filter.Table) { } } -// getTIByCreateStmt get downstream tableInfo by "SHOW CREATE TABLE" stmt. -func (tr *Tracker) getTIByCreateStmt(tctx *tcontext.Context, tableID string, originTableName string) (*model.TableInfo, error) { +// getTableInfoByCreateStmt get downstream tableInfo by "SHOW CREATE TABLE" stmt. +func (tr *Tracker) getTableInfoByCreateStmt(tctx *tcontext.Context, tableID string, originTableName string) (*model.TableInfo, error) { if tr.dsTracker.stmtParser == nil { err := tr.initDownStreamSQLModeAndParser(tctx) if err != nil { @@ -473,7 +473,7 @@ func (tr *Tracker) getTIByCreateStmt(tctx *tcontext.Context, tableID string, ori // initDownStreamTrackerParser init downstream tracker parser by default sql_mode. func (tr *Tracker) initDownStreamSQLModeAndParser(tctx *tcontext.Context) error { setSQLMode := fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode) - _, err := tr.dsTracker.downstreamConn.DBConn.ExecContext(tctx.Ctx, setSQLMode) + _, err := tr.dsTracker.downstreamConn.BaseConn.DBConn.ExecContext(tctx.Ctx, setSQLMode) if err != nil { return dterror.ErrSchemaTrackerCannotSetDownstreamSQLMode.Delegate(err, DefaultSQLMode) } diff --git a/pkg/schema/tracker_test.go b/pkg/schema/tracker_test.go index bea32a0456..faa6d82dc0 100644 --- a/pkg/schema/tracker_test.go +++ b/pkg/schema/tracker_test.go @@ -33,7 +33,9 @@ import ( timock "github.com/pingcap/tidb/util/mock" "go.uber.org/zap/zapcore" + "github.com/pingcap/dm/dm/config" dlog "github.com/pingcap/dm/pkg/log" + "github.com/pingcap/dm/syncer/dbconn" tcontext "github.com/pingcap/dm/pkg/context" @@ -49,12 +51,14 @@ var _ = Suite(&trackerSuite{}) var defaultTestSessionCfg = map[string]string{"sql_mode": "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION"} type trackerSuite struct { - baseConn *conn.BaseConn + dbConn *dbconn.DBConn db *sql.DB backupKeys []string + cfg *config.SubTaskConfig } func (s *trackerSuite) SetUpSuite(c *C) { + s.cfg = &config.SubTaskConfig{} s.backupKeys = downstreamVars downstreamVars = []string{"sql_mode"} db, _, err := sqlmock.New() @@ -62,7 +66,7 @@ func (s *trackerSuite) SetUpSuite(c *C) { c.Assert(err, IsNil) con, err := db.Conn(context.Background()) c.Assert(err, IsNil) - s.baseConn = conn.NewBaseConn(con, nil) + s.dbConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(con, nil)} } func (s *trackerSuite) TearDownSuite(c *C) { @@ -83,28 +87,29 @@ func (s *trackerSuite) TestTiDBAndSessionCfg(c *C) { con, err := db.Conn(context.Background()) c.Assert(err, IsNil) baseConn := conn.NewBaseConn(con, nil) - + dbConn := &dbconn.DBConn{Cfg: s.cfg, BaseConn: baseConn} // user give correct session config - _, err = NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) + + _, err = NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) // user give wrong session session, will return error sessionCfg := map[string]string{"sql_mode": "HaHa"} - _, err = NewTracker(context.Background(), "test-tracker", sessionCfg, baseConn) + _, err = NewTracker(context.Background(), "test-tracker", sessionCfg, dbConn) c.Assert(err, NotNil) // discover session config failed, will return error mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("sql_mode", "HaHa")) - _, err = NewTracker(context.Background(), "test-tracker", nil, baseConn) + _, err = NewTracker(context.Background(), "test-tracker", nil, dbConn) c.Assert(err, NotNil) // empty or default config in downstream mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("sql_mode", defaultTestSessionCfg["sql_mode"])) - tracker, err := NewTracker(context.Background(), "test-tracker", nil, baseConn) + tracker, err := NewTracker(context.Background(), "test-tracker", nil, dbConn) c.Assert(err, IsNil) c.Assert(mock.ExpectationsWereMet(), IsNil) err = tracker.Exec(context.Background(), "", "create database testdb;") @@ -114,7 +119,7 @@ func (s *trackerSuite) TestTiDBAndSessionCfg(c *C) { mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("sql_mode", "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_DATE,NO_ZERO_IN_DATE")) - tracker, err = NewTracker(context.Background(), "test-tracker", nil, baseConn) + tracker, err = NewTracker(context.Background(), "test-tracker", nil, dbConn) c.Assert(err, IsNil) c.Assert(mock.ExpectationsWereMet(), IsNil) c.Assert(tracker.se.GetSessionVars().SQLMode.HasOnlyFullGroupBy(), IsTrue) @@ -131,7 +136,7 @@ func (s *trackerSuite) TestTiDBAndSessionCfg(c *C) { // user set session config, get tracker config from downstream // no `STRICT_TRANS_TABLES`, no error now sessionCfg = map[string]string{"sql_mode": "NO_ZERO_DATE,NO_ZERO_IN_DATE,ANSI_QUOTES"} - tracker, err = NewTracker(context.Background(), "test-tracker", sessionCfg, baseConn) + tracker, err = NewTracker(context.Background(), "test-tracker", sessionCfg, dbConn) c.Assert(err, IsNil) c.Assert(mock.ExpectationsWereMet(), IsNil) @@ -163,7 +168,7 @@ func (s *trackerSuite) TestTiDBAndSessionCfg(c *C) { "sql_mode": "NO_ZERO_DATE,NO_ZERO_IN_DATE,ANSI_QUOTES", "tidb_enable_clustered_index": "ON", } - tracker, err = NewTracker(context.Background(), "test-tracker", sessionCfg, baseConn) + tracker, err = NewTracker(context.Background(), "test-tracker", sessionCfg, dbConn) c.Assert(err, IsNil) c.Assert(mock.ExpectationsWereMet(), IsNil) @@ -183,7 +188,7 @@ func (s *trackerSuite) TestDDL(c *C) { Name: "foo", } - tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.baseConn) + tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.dbConn) c.Assert(err, IsNil) // Table shouldn't exist before initialization. @@ -249,7 +254,7 @@ func (s *trackerSuite) TestDDL(c *C) { func (s *trackerSuite) TestGetSingleColumnIndices(c *C) { log.SetLevel(zapcore.ErrorLevel) - tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.baseConn) + tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.dbConn) c.Assert(err, IsNil) ctx := context.Background() @@ -288,7 +293,7 @@ func (s *trackerSuite) TestGetSingleColumnIndices(c *C) { func (s *trackerSuite) TestCreateSchemaIfNotExists(c *C) { log.SetLevel(zapcore.ErrorLevel) - tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.baseConn) + tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.dbConn) c.Assert(err, IsNil) // We cannot create a table without a database. @@ -316,7 +321,7 @@ func (s *trackerSuite) TestCreateSchemaIfNotExists(c *C) { func (s *trackerSuite) TestMultiDrop(c *C) { log.SetLevel(zapcore.ErrorLevel) - tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.baseConn) + tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.dbConn) c.Assert(err, IsNil) ctx := context.Background() @@ -364,7 +369,7 @@ func (s *trackerSuite) TestCreateTableIfNotExists(c *C) { Name: "foo", } - tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.baseConn) + tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.dbConn) c.Assert(err, IsNil) // Create some sort of complicated table. @@ -444,7 +449,7 @@ func (s *trackerSuite) TestAllSchemas(c *C) { log.SetLevel(zapcore.ErrorLevel) ctx := context.Background() - tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.baseConn) + tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, s.dbConn) c.Assert(err, IsNil) // nothing should exist... @@ -518,6 +523,7 @@ func (s *trackerSuite) TestNotSupportedVariable(c *C) { con, err := db.Conn(context.Background()) c.Assert(err, IsNil) baseConn := conn.NewBaseConn(con, nil) + dbConn := &dbconn.DBConn{Cfg: s.cfg, BaseConn: baseConn} mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( sqlmock.NewRows([]string{"Variable_name", "Value"}). @@ -526,7 +532,7 @@ func (s *trackerSuite) TestNotSupportedVariable(c *C) { oldSessionVar := map[string]string{ "tidb_enable_change_column_type": "ON", } - _, err = NewTracker(context.Background(), "test-tracker", oldSessionVar, baseConn) + _, err = NewTracker(context.Background(), "test-tracker", oldSessionVar, dbConn) c.Assert(err, IsNil) } @@ -540,7 +546,9 @@ func (s *trackerSuite) TestInitDownStreamSQLModeAndParser(c *C) { con, err := db.Conn(context.Background()) c.Assert(err, IsNil) baseConn := conn.NewBaseConn(con, nil) - tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) + dbConn := &dbconn.DBConn{Cfg: s.cfg, BaseConn: baseConn} + + tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) @@ -569,7 +577,8 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { con, err := db.Conn(context.Background()) c.Assert(err, IsNil) baseConn := conn.NewBaseConn(con, nil) - tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) + dbConn := &dbconn.DBConn{Cfg: s.cfg, BaseConn: baseConn} + tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) @@ -671,7 +680,8 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { con, err := db.Conn(context.Background()) c.Assert(err, IsNil) baseConn := conn.NewBaseConn(con, nil) - tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) + dbConn := &dbconn.DBConn{Cfg: s.cfg, BaseConn: baseConn} + tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) @@ -756,7 +766,8 @@ func (s *trackerSuite) TestReTrackDownStreamIndex(c *C) { con, err := db.Conn(context.Background()) c.Assert(err, IsNil) baseConn := conn.NewBaseConn(con, nil) - tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, baseConn) + dbConn := &dbconn.DBConn{Cfg: s.cfg, BaseConn: baseConn} + tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) @@ -772,7 +783,7 @@ func (s *trackerSuite) TestReTrackDownStreamIndex(c *C) { // just table targetTables := []*filter.Table{{Schema: "test", Name: "a"}, {Schema: "test", Name: "test"}} - tracker.ReTrackDownStreamIndex(targetTables) + tracker.RemoveDownstreamSchema(targetTables) _, ok = tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsFalse) @@ -786,7 +797,7 @@ func (s *trackerSuite) TestReTrackDownStreamIndex(c *C) { // just schema targetTables = []*filter.Table{{Schema: "test", Name: "a"}, {Schema: "test", Name: ""}} - tracker.ReTrackDownStreamIndex(targetTables) + tracker.RemoveDownstreamSchema(targetTables) _, ok = tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsFalse) } diff --git a/syncer/expr_filter_group_test.go b/syncer/expr_filter_group_test.go index 8ca2f1198d..6476856f04 100644 --- a/syncer/expr_filter_group_test.go +++ b/syncer/expr_filter_group_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/dm/dm/config" "github.com/pingcap/dm/pkg/log" "github.com/pingcap/dm/pkg/schema" + "github.com/pingcap/dm/syncer/dbconn" ) func (s *testFilterSuite) TestSkipDMLByExpression(c *C) { @@ -91,8 +92,9 @@ create table t ( ) c.Assert(log.InitLogger(&log.Config{Level: "debug"}), IsNil) + dbConn := &dbconn.DBConn{Cfg: &config.SubTaskConfig{}, BaseConn: s.baseConn} for _, ca := range cases { - schemaTracker, err := schema.NewTracker(ctx, "unit-test", defaultTestSessionCfg, s.baseConn) + schemaTracker, err := schema.NewTracker(ctx, "unit-test", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) c.Assert(schemaTracker.CreateSchemaIfNotExists(dbName), IsNil) c.Assert(schemaTracker.Exec(ctx, dbName, ca.tableStr), IsNil) @@ -348,9 +350,10 @@ create table t ( ) c.Assert(log.InitLogger(&log.Config{Level: "debug"}), IsNil) + dbConn := &dbconn.DBConn{Cfg: &config.SubTaskConfig{}, BaseConn: s.baseConn} for _, ca := range cases { c.Log(ca.tableStr) - schemaTracker, err := schema.NewTracker(ctx, "unit-test", defaultTestSessionCfg, s.baseConn) + schemaTracker, err := schema.NewTracker(ctx, "unit-test", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) c.Assert(schemaTracker.CreateSchemaIfNotExists(dbName), IsNil) c.Assert(schemaTracker.Exec(ctx, dbName, ca.tableStr), IsNil) @@ -398,7 +401,9 @@ create table t ( );` exprStr = "d > 1" ) - schemaTracker, err := schema.NewTracker(ctx, "unit-test", defaultTestSessionCfg, s.baseConn) + + dbConn := &dbconn.DBConn{Cfg: &config.SubTaskConfig{}, BaseConn: s.baseConn} + schemaTracker, err := schema.NewTracker(ctx, "unit-test", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) c.Assert(schemaTracker.CreateSchemaIfNotExists(dbName), IsNil) c.Assert(schemaTracker.Exec(ctx, dbName, tableStr), IsNil) diff --git a/syncer/syncer.go b/syncer/syncer.go index 640de74a87..b87152e53f 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -311,7 +311,7 @@ func (s *Syncer) Init(ctx context.Context) (err error) { } rollbackHolder.Add(fr.FuncRollback{Name: "close-DBs", Fn: s.closeDBs}) - s.schemaTracker, err = schema.NewTracker(ctx, s.cfg.Name, s.cfg.To.Session, s.downstreamTrackConn.BaseConn) + s.schemaTracker, err = schema.NewTracker(ctx, s.cfg.Name, s.cfg.To.Session, s.downstreamTrackConn) if err != nil { return terror.ErrSchemaTrackerInit.Delegate(err) } @@ -2730,7 +2730,7 @@ func (s *Syncer) trackDDL(usedSchema string, trackInfo *ddlInfo, ec *eventContex } if shouldReTrackDownstreamIndex { - s.schemaTracker.ReTrackDownStreamIndex(targetTables) + s.schemaTracker.RemoveDownstreamSchema(targetTables) } if shouldSchemaExist { diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go index 8644fc5bc8..6b4c3140b8 100644 --- a/syncer/syncer_test.go +++ b/syncer/syncer_test.go @@ -771,7 +771,7 @@ func (s *testSyncerSuite) TestRun(c *C) { } syncer.ddlDBConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} syncer.downstreamTrackConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} - syncer.schemaTracker, err = schema.NewTracker(context.Background(), s.cfg.Name, defaultTestSessionCfg, syncer.downstreamTrackConn.BaseConn) + syncer.schemaTracker, err = schema.NewTracker(context.Background(), s.cfg.Name, defaultTestSessionCfg, syncer.downstreamTrackConn) mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", schema.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) mock.ExpectQuery("SHOW CREATE TABLE " + "`test_1`.`t_1`").WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). @@ -1033,7 +1033,7 @@ func (s *testSyncerSuite) TestExitSafeModeByConfig(c *C) { sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("t_1", "create table t_1(id int primary key, name varchar(24))")) - syncer.schemaTracker, err = schema.NewTracker(context.Background(), s.cfg.Name, defaultTestSessionCfg, syncer.ddlDBConn.BaseConn) + syncer.schemaTracker, err = schema.NewTracker(context.Background(), s.cfg.Name, defaultTestSessionCfg, syncer.ddlDBConn) syncer.exprFilterGroup = NewExprFilterGroup(nil) c.Assert(err, IsNil) c.Assert(syncer.Type(), Equals, pb.UnitType_Sync) @@ -1221,7 +1221,7 @@ func (s *testSyncerSuite) TestTrackDDL(c *C) { } syncer.ddlDBConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} syncer.checkpoint.(*RemoteCheckPoint).dbConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(checkPointDBConn, &retry.FiniteRetryStrategy{})} - syncer.schemaTracker, err = schema.NewTracker(context.Background(), s.cfg.Name, defaultTestSessionCfg, syncer.ddlDBConn.BaseConn) + syncer.schemaTracker, err = schema.NewTracker(context.Background(), s.cfg.Name, defaultTestSessionCfg, syncer.ddlDBConn) syncer.exprFilterGroup = NewExprFilterGroup(nil) c.Assert(syncer.genRouter(), IsNil) c.Assert(err, IsNil) @@ -1480,7 +1480,8 @@ func (s *testSyncerSuite) TestTrackDownstreamTableWontOverwrite(c *C) { c.Assert(err, IsNil) baseConn := conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{}) syncer.ddlDBConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: baseConn} - syncer.schemaTracker, err = schema.NewTracker(ctx, s.cfg.Name, defaultTestSessionCfg, baseConn) + syncer.downstreamTrackConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} + syncer.schemaTracker, err = schema.NewTracker(ctx, s.cfg.Name, defaultTestSessionCfg, syncer.downstreamTrackConn) c.Assert(err, IsNil) upTable := &filter.Table{ @@ -1522,7 +1523,8 @@ func (s *testSyncerSuite) TestDownstreamTableHasAutoRandom(c *C) { c.Assert(err, IsNil) baseConn := conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{}) syncer.ddlDBConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: baseConn} - syncer.schemaTracker, err = schema.NewTracker(ctx, s.cfg.Name, defaultTestSessionCfg, baseConn) + syncer.downstreamTrackConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} + syncer.schemaTracker, err = schema.NewTracker(ctx, s.cfg.Name, defaultTestSessionCfg, syncer.downstreamTrackConn) c.Assert(err, IsNil) schemaName := "test" @@ -1562,7 +1564,8 @@ func (s *testSyncerSuite) TestDownstreamTableHasAutoRandom(c *C) { "tidb_skip_utf8_check": "0", schema.TiDBClusteredIndex: "ON", } - syncer.schemaTracker, err = schema.NewTracker(ctx, s.cfg.Name, sessionCfg, baseConn) + syncer.downstreamTrackConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} + syncer.schemaTracker, err = schema.NewTracker(ctx, s.cfg.Name, sessionCfg, syncer.downstreamTrackConn) c.Assert(err, IsNil) v, ok := syncer.schemaTracker.GetSystemVar(schema.TiDBClusteredIndex) c.Assert(v, Equals, "ON") From aeac93fe2c15b0c56f2fe4b15d98632923393428 Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Tue, 26 Oct 2021 17:16:55 +0800 Subject: [PATCH 13/14] commit-message: add sql check by failpoint --- pkg/schema/tracker.go | 18 +++++++---------- pkg/schema/tracker_test.go | 31 +++++++++++++++++++++--------- syncer/dml.go | 14 ++++++++++++-- syncer/syncer_test.go | 10 +++++++--- tests/downstream_diff_index/run.sh | 19 ++++++++++++++---- tests/others_integration_1.txt | 2 ++ 6 files changed, 65 insertions(+), 29 deletions(-) diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index b4330bb49d..71a5d032ae 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -49,8 +49,6 @@ const ( TiDBClusteredIndex = "tidb_enable_clustered_index" // downstream mock table id, consists of serial numbers of letters. mockTableID = 121402101900011104 - // DefaultSQLMode is downstream schema track and paser SQLMode. - DefaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION" ) var ( @@ -382,7 +380,7 @@ func (tr *Tracker) GetDownStreamIndexInfo(tctx *tcontext.Context, tableID string // GetAvailableDownStreamUKIndexInfo gets available downstream UK whose data is not null. // note. this function will not init downstreamTrack. -func (tr *Tracker) GetAvailableDownStreamUKIndexInfo(tableID string, originTi *model.TableInfo, data []interface{}) *model.IndexInfo { +func (tr *Tracker) GetAvailableDownStreamUKIndexInfo(tableID string, data []interface{}) *model.IndexInfo { dti, ok := tr.dsTracker.tableInfos[tableID] if !ok || len(dti.availableUKCache) == 0 { @@ -398,9 +396,7 @@ func (tr *Tracker) GetAvailableDownStreamUKIndexInfo(tableID string, originTi *m if isSpecifiedIndexColumn(uk, fn) { if i != 0 { // exchange available uk to the first of the array to reduce judgements for next row - temp := dti.availableUKCache[0] - dti.availableUKCache[0] = uk - dti.availableUKCache[i] = temp + dti.availableUKCache[0], dti.availableUKCache[i] = dti.availableUKCache[i], dti.availableUKCache[0] } return uk } @@ -472,14 +468,14 @@ func (tr *Tracker) getTableInfoByCreateStmt(tctx *tcontext.Context, tableID stri // initDownStreamTrackerParser init downstream tracker parser by default sql_mode. func (tr *Tracker) initDownStreamSQLModeAndParser(tctx *tcontext.Context) error { - setSQLMode := fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode) - _, err := tr.dsTracker.downstreamConn.BaseConn.DBConn.ExecContext(tctx.Ctx, setSQLMode) + setSQLMode := fmt.Sprintf("SET SESSION SQL_MODE = '%s'", mysql.DefaultSQLMode) + _, err := tr.dsTracker.downstreamConn.ExecuteSQL(tctx, []string{setSQLMode}) if err != nil { - return dterror.ErrSchemaTrackerCannotSetDownstreamSQLMode.Delegate(err, DefaultSQLMode) + return dterror.ErrSchemaTrackerCannotSetDownstreamSQLMode.Delegate(err, mysql.DefaultSQLMode) } - stmtParser, err := utils.GetParserFromSQLModeStr(DefaultSQLMode) + stmtParser, err := utils.GetParserFromSQLModeStr(mysql.DefaultSQLMode) if err != nil { - return dterror.ErrSchemaTrackerCannotInitDownstreamParser.Delegate(err, DefaultSQLMode) + return dterror.ErrSchemaTrackerCannotInitDownstreamParser.Delegate(err, mysql.DefaultSQLMode) } tr.dsTracker.stmtParser = stmtParser return nil diff --git a/pkg/schema/tracker_test.go b/pkg/schema/tracker_test.go index faa6d82dc0..175a5e3f93 100644 --- a/pkg/schema/tracker_test.go +++ b/pkg/schema/tracker_test.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/parser" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/model" + "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/ddl" timock "github.com/pingcap/tidb/util/mock" @@ -551,7 +552,10 @@ func (s *trackerSuite) TestInitDownStreamSQLModeAndParser(c *C) { tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) - mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectBegin() + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", mysql.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectCommit() + tctx := tcontext.NewContext(context.Background(), dlog.L()) err = tracker.initDownStreamSQLModeAndParser(tctx) @@ -580,7 +584,10 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { dbConn := &dbconn.DBConn{Cfg: s.cfg, BaseConn: baseConn} tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) - mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + + mock.ExpectBegin() + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", mysql.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectCommit() tableID := "`test`.`test`" @@ -683,7 +690,10 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { dbConn := &dbconn.DBConn{Cfg: s.cfg, BaseConn: baseConn} tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) - mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + + mock.ExpectBegin() + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", mysql.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectCommit() tableID := "`test`.`test`" @@ -695,7 +705,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { c.Assert(err, IsNil) c.Assert(indexinfo, IsNil) data := []interface{}{1, 2, 3} - indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) + indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, data) c.Assert(indexinfo, IsNil) delete(tracker.dsTracker.tableInfos, tableID) @@ -707,7 +717,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { c.Assert(err, IsNil) c.Assert(indexinfo, IsNil) data = []interface{}{nil, 2, 3} - indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) + indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, data) c.Assert(indexinfo, IsNil) delete(tracker.dsTracker.tableInfos, tableID) @@ -719,7 +729,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { c.Assert(err, IsNil) c.Assert(indexinfo, IsNil) data = []interface{}{1, 2, 3} - indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) + indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, data) c.Assert(indexinfo, NotNil) delete(tracker.dsTracker.tableInfos, tableID) @@ -731,7 +741,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { c.Assert(err, IsNil) c.Assert(indexinfo, IsNil) data = []interface{}{1, nil, 3} - indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) + indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, data) c.Assert(indexinfo, IsNil) delete(tracker.dsTracker.tableInfos, tableID) @@ -743,7 +753,7 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { c.Assert(err, IsNil) c.Assert(indexinfo, IsNil) data = []interface{}{1, 2, 3} - indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, oriTi, data) + indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, data) c.Assert(indexinfo, NotNil) delete(tracker.dsTracker.tableInfos, tableID) } @@ -769,7 +779,10 @@ func (s *trackerSuite) TestReTrackDownStreamIndex(c *C) { dbConn := &dbconn.DBConn{Cfg: s.cfg, BaseConn: baseConn} tracker, err := NewTracker(context.Background(), "test-tracker", defaultTestSessionCfg, dbConn) c.Assert(err, IsNil) - mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + + mock.ExpectBegin() + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", mysql.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectCommit() tableID := "`test`.`test`" diff --git a/syncer/dml.go b/syncer/dml.go index 4394165434..e4acd70527 100644 --- a/syncer/dml.go +++ b/syncer/dml.go @@ -19,6 +19,7 @@ import ( "strconv" "strings" + "github.com/pingcap/failpoint" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/types" @@ -173,7 +174,7 @@ RowLoop: } if defaultIndexColumns == nil { - defaultIndexColumns = s.schemaTracker.GetAvailableDownStreamUKIndexInfo(tableID, ti, oriOldValues) + defaultIndexColumns = s.schemaTracker.GetAvailableDownStreamUKIndexInfo(tableID, oriOldValues) } ks := genMultipleKeys(ti, oriOldValues, tableID) @@ -182,6 +183,9 @@ RowLoop: if param.safeMode { // generate delete sql from old data sql, value := genDeleteSQL(tableID, oriOldValues, ti.Columns, defaultIndexColumns) + failpoint.Inject("UpdateSqlCheck", func() { + log.L().Info("UpdateSqlCheck", zap.String("SQL", sql)) + }) sqls = append(sqls, sql) values = append(values, value) keys = append(keys, ks) @@ -216,6 +220,9 @@ RowLoop: value = append(value, whereValues...) sql := genUpdateSQL(tableID, updateColumns, whereColumns, whereValues) + failpoint.Inject("UpdateSqlCheck", func() { + log.L().Info("UpdateSqlCheck", zap.String("SQL", sql)) + }) sqls = append(sqls, sql) values = append(values, value) keys = append(keys, ks) @@ -260,11 +267,14 @@ RowLoop: } if defaultIndexColumns == nil { - defaultIndexColumns = s.schemaTracker.GetAvailableDownStreamUKIndexInfo(tableID, ti, value) + defaultIndexColumns = s.schemaTracker.GetAvailableDownStreamUKIndexInfo(tableID, value) } ks := genMultipleKeys(ti, value, tableID) sql, value := genDeleteSQL(tableID, value, ti.Columns, defaultIndexColumns) + failpoint.Inject("DeleteSqlCheck", func() { + log.L().Info("DeleteSqlCheck", zap.String("SQL", sql)) + }) sqls = append(sqls, sql) values = append(values, value) keys = append(keys, ks) diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go index 6b4c3140b8..97dabd30cd 100644 --- a/syncer/syncer_test.go +++ b/syncer/syncer_test.go @@ -49,6 +49,7 @@ import ( _ "github.com/go-sql-driver/mysql" . "github.com/pingcap/check" "github.com/pingcap/parser" + pmysql "github.com/pingcap/parser/mysql" bf "github.com/pingcap/tidb-tools/pkg/binlog-filter" cm "github.com/pingcap/tidb-tools/pkg/column-mapping" "github.com/pingcap/tidb-tools/pkg/filter" @@ -772,7 +773,9 @@ func (s *testSyncerSuite) TestRun(c *C) { syncer.ddlDBConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} syncer.downstreamTrackConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} syncer.schemaTracker, err = schema.NewTracker(context.Background(), s.cfg.Name, defaultTestSessionCfg, syncer.downstreamTrackConn) - mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", schema.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectBegin() + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", pmysql.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectCommit() mock.ExpectQuery("SHOW CREATE TABLE " + "`test_1`.`t_1`").WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow("t_1", "create table t_1(id int primary key, name varchar(24))")) @@ -1027,7 +1030,9 @@ func (s *testSyncerSuite) TestExitSafeModeByConfig(c *C) { } syncer.ddlDBConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} syncer.downstreamTrackConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} - mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", schema.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectBegin() + mock.ExpectExec(fmt.Sprintf("SET SESSION SQL_MODE = '%s'", pmysql.DefaultSQLMode)).WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectCommit() mock.ExpectQuery("SHOW CREATE TABLE " + "`test_1`.`t_1`").WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). @@ -1564,7 +1569,6 @@ func (s *testSyncerSuite) TestDownstreamTableHasAutoRandom(c *C) { "tidb_skip_utf8_check": "0", schema.TiDBClusteredIndex: "ON", } - syncer.downstreamTrackConn = &dbconn.DBConn{Cfg: s.cfg, BaseConn: conn.NewBaseConn(dbConn, &retry.FiniteRetryStrategy{})} syncer.schemaTracker, err = schema.NewTracker(ctx, s.cfg.Name, sessionCfg, syncer.downstreamTrackConn) c.Assert(err, IsNil) v, ok := syncer.schemaTracker.GetSystemVar(schema.TiDBClusteredIndex) diff --git a/tests/downstream_diff_index/run.sh b/tests/downstream_diff_index/run.sh index bd9cfa6e4f..d0df3e0ebe 100755 --- a/tests/downstream_diff_index/run.sh +++ b/tests/downstream_diff_index/run.sh @@ -22,14 +22,20 @@ function run() { # start DM worker and master run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT + + # worker will inject delete/update sql check + inject_points=( + "github.com/pingcap/dm/syncer/deleteSqlCheck=return()" + "github.com/pingcap/dm/syncer/updateSqlCheck=return()" + ) + export GO_FAILPOINTS="$(join_string \; ${inject_points[@]})" run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT - - # operate mysql config to worker cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 + + run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml + check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT cp $cur/conf/source2.yaml $WORK_DIR/source2.yaml dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2 @@ -45,8 +51,10 @@ function run() { run_sql_file $cur/data/db1.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 # check update data run_sql_tidb_with_retry "select count(1) from ${db}.${tb} where c1=1 and c3='111';" "count(1): 1" + check_log_contain_with_retry '[UpdateSqlCheck] [SQL="DELETE FROM `downstream_diff_index`.`t` WHERE `c2` = ? LIMIT 1"]' $WORK_DIR/worker1/log/dm-worker.log # check delete data run_sql_tidb_with_retry "select count(1) from ${db}.${tb} where c1=2;" "count(1): 1" + check_log_contain_with_retry '[DeleteSqlCheck] [SQL="DELETE FROM `downstream_diff_index`.`t` WHERE `c2` = ? LIMIT 1"]' $WORK_DIR/worker1/log/dm-worker.log # alter schema to test pk run_sql "alter table ${db}.${tb} add primary key(c3);" $TIDB_PORT $TIDB_PASSWORD @@ -57,8 +65,10 @@ function run() { run_sql_file $cur/data/db2.increment.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 # check update data run_sql_tidb_with_retry "select count(1) from ${db}.${tb} where c1=3 and c3='333';" "count(1): 1" + check_log_contain_with_retry '[UpdateSqlCheck] [SQL="DELETE FROM `downstream_diff_index`.`t` WHERE `c3` = ? LIMIT 1"]' $WORK_DIR/worker2/log/dm-worker.log # check delete data run_sql_tidb_with_retry "select count(1) from ${db}.${tb} where c1=1;" "count(1): 1" + check_log_contain_with_retry '[DeleteSqlCheck] [SQL="DELETE FROM `downstream_diff_index`.`t` WHERE `c3` = ? LIMIT 1"]' $WORK_DIR/worker2/log/dm-worker.log } cleanup_data downstream_diff_index @@ -66,5 +76,6 @@ cleanup_data downstream_diff_index cleanup_process $* run $* cleanup_process $* +export GO_FAILPOINTS='' echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>" diff --git a/tests/others_integration_1.txt b/tests/others_integration_1.txt index d5361c58d4..374b8bdeff 100644 --- a/tests/others_integration_1.txt +++ b/tests/others_integration_1.txt @@ -8,3 +8,5 @@ only_dml adjust_gtid checkpoint_transaction lightning_mode +downstream_diff_index + From 822e6c7781ca79c14f520719e3d08b281d8ae224 Mon Sep 17 00:00:00 2001 From: WizardXiao Date: Wed, 27 Oct 2021 16:57:29 +0800 Subject: [PATCH 14/14] commit-message: add UT --- _utils/terror_gen/errors_release.txt | 1 + errors.toml | 6 ++ pkg/schema/tracker.go | 24 +++--- pkg/schema/tracker_test.go | 111 +++++++++++++++++++++++++++ pkg/terror/error_list.go | 3 + 5 files changed, 132 insertions(+), 13 deletions(-) diff --git a/_utils/terror_gen/errors_release.txt b/_utils/terror_gen/errors_release.txt index 4aa57e94d4..e2fae9a557 100644 --- a/_utils/terror_gen/errors_release.txt +++ b/_utils/terror_gen/errors_release.txt @@ -495,6 +495,7 @@ ErrSchemaTrackerInit,[code=44012:class=schema-tracker:scope=internal:level=high] ErrSchemaTrackerCannotSetDownstreamSQLMode,[code=44013:class=schema-tracker:scope=internal:level=high], "Message: failed to set default downstream sql_mode %v in schema tracker" ErrSchemaTrackerCannotInitDownstreamParser,[code=44014:class=schema-tracker:scope=internal:level=high], "Message: failed to init downstream parser by sql_mode %v in schema tracker" ErrSchemaTrackerCannotMockDownstreamTable,[code=44015:class=schema-tracker:scope=internal:level=high], "Message: failed to mock downstream table by create table statement %v in schema tracker" +ErrSchemaTrackerCannotFetchDownstreamCreateTableStmt,[code=44016:class=schema-tracker:scope=internal:level=high], "Message: failed to fetch downstream table %v by show create table statement in schema tracker" ErrSchedulerNotStarted,[code=46001:class=scheduler:scope=internal:level=high], "Message: the scheduler has not started" ErrSchedulerStarted,[code=46002:class=scheduler:scope=internal:level=medium], "Message: the scheduler has already started" ErrSchedulerWorkerExist,[code=46003:class=scheduler:scope=internal:level=medium], "Message: dm-worker with name %s already exists" diff --git a/errors.toml b/errors.toml index b02d7ddb5b..8b35b06632 100644 --- a/errors.toml +++ b/errors.toml @@ -2980,6 +2980,12 @@ description = "" workaround = "" tags = ["internal", "high"] +[error.DM-schema-tracker-44016] +message = "failed to fetch downstream table %v by show create table statement in schema tracker" +description = "" +workaround = "" +tags = ["internal", "high"] + [error.DM-scheduler-46001] message = "the scheduler has not started" description = "" diff --git a/pkg/schema/tracker.go b/pkg/schema/tracker.go index 0628c85c40..1e77c6c00c 100644 --- a/pkg/schema/tracker.go +++ b/pkg/schema/tracker.go @@ -366,7 +366,7 @@ func (tr *Tracker) GetDownStreamIndexInfo(tctx *tcontext.Context, tableID string dti, ok := tr.dsTracker.tableInfos[tableID] if !ok { log.L().Info("Downstream schema tracker init. ", zap.String("tableID", tableID)) - ti, err := tr.getTableInfoByCreateStmt(tctx, tableID, originTi.Name.O) + ti, err := tr.getTableInfoByCreateStmt(tctx, tableID) if err != nil { log.L().Error("Init dowstream schema info error. ", zap.String("tableID", tableID), zap.Error(err)) return nil, err @@ -431,7 +431,7 @@ func (tr *Tracker) RemoveDownstreamSchema(targetTables []*filter.Table) { } // getTableInfoByCreateStmt get downstream tableInfo by "SHOW CREATE TABLE" stmt. -func (tr *Tracker) getTableInfoByCreateStmt(tctx *tcontext.Context, tableID string, originTableName string) (*model.TableInfo, error) { +func (tr *Tracker) getTableInfoByCreateStmt(tctx *tcontext.Context, tableID string) (*model.TableInfo, error) { if tr.dsTracker.stmtParser == nil { err := tr.initDownStreamSQLModeAndParser(tctx) if err != nil { @@ -442,7 +442,7 @@ func (tr *Tracker) getTableInfoByCreateStmt(tctx *tcontext.Context, tableID stri querySQL := fmt.Sprintf("SHOW CREATE TABLE %s", tableID) rows, err := tr.dsTracker.downstreamConn.QuerySQL(tctx, querySQL) if err != nil { - return nil, dterror.ErrSchemaTrackerCannotFetchDownstreamTable.Delegate(err, tableID, originTableName) + return nil, dterror.ErrSchemaTrackerCannotFetchDownstreamCreateTableStmt.Delegate(err, tableID) } defer rows.Close() var tableName, createStr string @@ -495,15 +495,19 @@ func getDownStreamTi(ti *model.TableInfo, originTi *model.TableInfo) *downstream } for _, idx := range ti.Indices { + indexRedict := redirectIndexKeys(idx, originTi) + if indexRedict == nil { + continue + } if idx.Primary { - indexCache = idx + indexCache = indexRedict hasPk = true } else if idx.Unique { // second check not null unique key if !hasPk && isSpecifiedIndexColumn(idx, fn) { - indexCache = idx + indexCache = indexRedict } else { - availableUKCache = append(availableUKCache, idx) + availableUKCache = append(availableUKCache, indexRedict) } } } @@ -511,18 +515,12 @@ func getDownStreamTi(ti *model.TableInfo, originTi *model.TableInfo) *downstream // handle pk exceptional case. // e.g. "create table t(a int primary key, b int)". if !hasPk { - exPk := handlePkExCase(ti) + exPk := redirectIndexKeys(handlePkExCase(ti), originTi) if exPk != nil { indexCache = exPk } } - // redirect column offset as originTi - indexCache = redirectIndexKeys(indexCache, originTi) - for i, uk := range availableUKCache { - availableUKCache[i] = redirectIndexKeys(uk, originTi) - } - return &downstreamTableInfo{ tableInfo: ti, indexCache: indexCache, diff --git a/pkg/schema/tracker_test.go b/pkg/schema/tracker_test.go index 9bc0aece71..88a920e183 100644 --- a/pkg/schema/tracker_test.go +++ b/pkg/schema/tracker_test.go @@ -613,6 +613,16 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { c.Assert(indexinfo, NotNil) delete(tracker.dsTracker.tableInfos, tableID) + mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("test", "create table t(a int primary key, b int, c varchar(10))")) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) + c.Assert(err, IsNil) + _, ok = tracker.dsTracker.tableInfos[tableID] + c.Assert(ok, IsTrue) + c.Assert(indexinfo, NotNil) + delete(tracker.dsTracker.tableInfos, tableID) + // downstream has composite pks mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). @@ -667,6 +677,64 @@ func (s *trackerSuite) TestGetDownStreamIndexInfo(c *C) { c.Assert(err, IsNil) c.Assert(indexinfo.Primary, IsTrue) delete(tracker.dsTracker.tableInfos, tableID) + + // downstream has more columns than upstream, and that column in used in PK + mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("test", "create table t(a int , d int PRIMARY KEY, c varchar(10), b int unique not null)")) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) + c.Assert(err, IsNil) + c.Assert(indexinfo, NotNil) + c.Assert(indexinfo.Primary, IsFalse) + delete(tracker.dsTracker.tableInfos, tableID) + + mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("test", "create table t(a int , d int PRIMARY KEY, c varchar(10), b int unique)")) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) + c.Assert(err, IsNil) + c.Assert(indexinfo, IsNil) + dti, ok = tracker.dsTracker.tableInfos[tableID] + c.Assert(ok, IsTrue) + c.Assert(len(dti.availableUKCache) == 1, IsTrue) + delete(tracker.dsTracker.tableInfos, tableID) + + mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("test", "create table t(a int , d int PRIMARY KEY, c varchar(10), b int)")) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) + c.Assert(err, IsNil) + c.Assert(indexinfo, IsNil) + delete(tracker.dsTracker.tableInfos, tableID) + + // downstream has more columns than upstream, and that column in used in UK(not null) + mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("test", "create table t(a int , d int unique not null, c varchar(10), b int unique not null)")) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) + c.Assert(err, IsNil) + c.Assert(indexinfo, NotNil) + c.Assert(indexinfo.Columns[0].Name.L == "b", IsTrue) + delete(tracker.dsTracker.tableInfos, tableID) + + mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("test", "create table t(a int , d int unique not null, c varchar(10), b int unique)")) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) + c.Assert(err, IsNil) + c.Assert(indexinfo, IsNil) + dti, ok = tracker.dsTracker.tableInfos[tableID] + c.Assert(ok, IsTrue) + c.Assert(len(dti.availableUKCache) == 1, IsTrue) + delete(tracker.dsTracker.tableInfos, tableID) + + mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("test", "create table t(a int , d int unique not null, c varchar(10), b int)")) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) + c.Assert(err, IsNil) + c.Assert(indexinfo, IsNil) + delete(tracker.dsTracker.tableInfos, tableID) } func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { @@ -745,6 +813,17 @@ func (s *trackerSuite) TestGetAvailableDownStreanUKIndexInfo(c *C) { c.Assert(indexinfo, IsNil) delete(tracker.dsTracker.tableInfos, tableID) + mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("test", "create table t(a int, b int, c varchar(10), unique key(a, b))")) + indexinfo, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) + c.Assert(err, IsNil) + c.Assert(indexinfo, IsNil) + data = []interface{}{1, nil, nil} + indexinfo = tracker.GetAvailableDownStreamUKIndexInfo(tableID, data) + c.Assert(indexinfo, IsNil) + delete(tracker.dsTracker.tableInfos, tableID) + // downstream has union uk but data has null mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( sqlmock.NewRows([]string{"Table", "Create Table"}). @@ -808,9 +887,41 @@ func (s *trackerSuite) TestReTrackDownStreamIndex(c *C) { _, ok = tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsTrue) + tracker.RemoveDownstreamSchema(targetTables) + _, ok = tracker.dsTracker.tableInfos[tableID] + c.Assert(ok, IsFalse) + + mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("test", "create table t(a int primary key, b int, c varchar(10))")) + _, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) + c.Assert(err, IsNil) + _, ok = tracker.dsTracker.tableInfos[tableID] + c.Assert(ok, IsTrue) + // just schema targetTables = []*filter.Table{{Schema: "test", Name: "a"}, {Schema: "test", Name: ""}} tracker.RemoveDownstreamSchema(targetTables) _, ok = tracker.dsTracker.tableInfos[tableID] c.Assert(ok, IsFalse) + + mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("test", "create table t(a int, b int, c varchar(10), PRIMARY KEY (a,b))")) + _, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) + c.Assert(err, IsNil) + _, ok = tracker.dsTracker.tableInfos[tableID] + c.Assert(ok, IsTrue) + + tracker.RemoveDownstreamSchema(targetTables) + _, ok = tracker.dsTracker.tableInfos[tableID] + c.Assert(ok, IsFalse) + + mock.ExpectQuery("SHOW CREATE TABLE " + tableID).WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow("test", "create table t(a int primary key, b int, c varchar(10))")) + _, err = tracker.GetDownStreamIndexInfo(tcontext.Background(), tableID, oriTi) + c.Assert(err, IsNil) + _, ok = tracker.dsTracker.tableInfos[tableID] + c.Assert(ok, IsTrue) } diff --git a/pkg/terror/error_list.go b/pkg/terror/error_list.go index dd56d129a8..22aa41abd0 100644 --- a/pkg/terror/error_list.go +++ b/pkg/terror/error_list.go @@ -608,6 +608,7 @@ const ( codeSchemaTrackerCannotSetDownstreamSQLMode codeSchemaTrackerCannotInitDownstreamParser codeSchemaTrackerCannotMockDownstreamTable + codeSchemaTrackerCannotFetchDownstreamCreateTableStmt ) // HA scheduler. @@ -1247,6 +1248,8 @@ var ( "failed to init downstream parser by sql_mode %v in schema tracker", "") ErrSchemaTrackerCannotMockDownstreamTable = New(codeSchemaTrackerCannotMockDownstreamTable, ClassSchemaTracker, ScopeInternal, LevelHigh, "failed to mock downstream table by create table statement %v in schema tracker", "") + ErrSchemaTrackerCannotFetchDownstreamCreateTableStmt = New(codeSchemaTrackerCannotFetchDownstreamCreateTableStmt, ClassSchemaTracker, ScopeInternal, LevelHigh, + "failed to fetch downstream table %v by show create table statement in schema tracker", "") // HA scheduler. ErrSchedulerNotStarted = New(codeSchedulerNotStarted, ClassScheduler, ScopeInternal, LevelHigh, "the scheduler has not started", "")