From ba3ca9f7c88bad13d9819837dc3193898bd1e1b2 Mon Sep 17 00:00:00 2001 From: gauss Date: Sat, 21 Mar 2020 00:10:14 +0800 Subject: [PATCH 01/36] Support the operation of adding multi-columns --- ddl/column.go | 125 ++++++++++++++++- ddl/ddl_api.go | 284 ++++++++++++++++++++++++++++++++++++++- ddl/ddl_worker.go | 2 + ddl/util/event.go | 16 ++- executor/ddl_test.go | 25 ++++ statistics/handle/ddl.go | 11 +- 6 files changed, 447 insertions(+), 16 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index 2533bf2f79861..61c9e2d905c14 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -222,7 +222,7 @@ func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) // Finish this job. job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) - asyncNotifyEvent(d, &util.Event{Tp: model.ActionAddColumn, TableInfo: tblInfo, ColumnInfo: columnInfo}) + asyncNotifyEvent(d, &util.Event{Tp: model.ActionAddColumn, TableInfo: tblInfo, ColumnInfos: []*model.ColumnInfo{columnInfo}}) default: err = ErrInvalidDDLState.GenWithStackByArgs("column", columnInfo.State) } @@ -230,6 +230,129 @@ func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) return ver, errors.Trace(err) } +func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, []int, error) { + schemaID := job.SchemaID + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + columns := []*model.ColumnInfo{} + positions := []*ast.ColumnPosition{} + offsets := []int{} + err = job.DecodeArgs(&columns, &positions, &offsets) + for i := range positions { + if positions[i] == nil { + positions[i] = &ast.ColumnPosition{} + } + } + if err != nil { + job.State = model.JobStateCancelled + return nil, nil, nil, errors.Trace(err) + } + + var columnInfos []*model.ColumnInfo + + for i, col := range columns { + columnInfo := model.FindColumnInfo(tblInfo.Columns, col.Name.L) + if columnInfo != nil { + columnInfos = append(columnInfos, columnInfo) + if columnInfo.State == model.StatePublic { + // We already have a column with the same column name. + job.State = model.JobStateCancelled + return nil, nil, nil, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name) + } + } else { + columnInfo, offset, err := createColumnInfo(tblInfo, col, positions[i]) + if err != nil { + job.State = model.JobStateCancelled + return nil, nil, nil, errors.Trace(err) + } + logutil.BgLogger().Info("[ddl] run add columns job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offsets[i])) + + // Set offset arg to job. + if offset != 0 { + offsets[i] = offset + } + if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil { + job.State = model.JobStateCancelled + return nil, nil, nil, errors.Trace(err) + } + columnInfos = append(columnInfos, columnInfo) + } + } + job.Args = []interface{}{columnInfos, positions, offsets} + return tblInfo, columnInfos, offsets, nil +} + +func setColumnsState(columnInfos []*model.ColumnInfo, state model.SchemaState) { + for i := range columnInfos { + columnInfos[i].State = state + } +} + +func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { + // Handle the rolling back job. + if job.IsRollingback() { + ver, err = onDropColumn(t, job) + if err != nil { + return ver, errors.Trace(err) + } + return ver, nil + } + + failpoint.Inject("errorBeforeDecodeArgs", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(ver, errors.New("occur an error before decode args")) + } + }) + + tblInfo, columnInfos, offsets, err := checkAddAndCreateColumnInfos(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + originalState := columnInfos[0].State + switch columnInfos[0].State { + case model.StateNone: + // none -> delete only + job.SchemaState = model.StateDeleteOnly + setColumnsState(columnInfos, model.StateDeleteOnly) + ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != columnInfos[0].State) + case model.StateDeleteOnly: + // delete only -> write only + job.SchemaState = model.StateWriteOnly + setColumnsState(columnInfos, model.StateWriteOnly) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) + case model.StateWriteOnly: + // write only -> reorganization + job.SchemaState = model.StateWriteReorganization + setColumnsState(columnInfos, model.StateWriteReorganization) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) + case model.StateWriteReorganization: + // reorganization -> public + // Adjust table column offsets. + oldCols := tblInfo.Columns + for i := range offsets { + tmpCols := oldCols[:len(oldCols)-len(offsets)+i+1] + tblInfo.Columns = tmpCols + adjustColumnInfoInAddColumn(tblInfo, offsets[i]) + oldCols = append(tblInfo.Columns, oldCols[len(oldCols)-len(offsets)+i+1:]...) + } + setColumnsState(columnInfos, model.StatePublic) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) + if err != nil { + return ver, errors.Trace(err) + } + // Finish this job. + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + asyncNotifyEvent(d, &util.Event{Tp: model.ActionAddColumns, TableInfo: tblInfo, ColumnInfos: columnInfos}) + default: + err = ErrInvalidDDLState.GenWithStackByArgs("column", columnInfos[0].State) + } + + return ver, errors.Trace(err) +} + func onDropColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { tblInfo, colInfo, err := checkDropColumn(t, job) if err != nil { diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 6a4f075486d02..821d7f81c494f 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -1963,11 +1963,6 @@ func resolveAlterTableSpec(ctx sessionctx.Context, specs []*ast.AlterTableSpec) validSpecs = append(validSpecs, spec) } - if len(validSpecs) > 1 { - // Now we only allow one schema changing at the same time. - return nil, errRunMultiSchemaChanges - } - // Verify whether the algorithm is supported. for _, spec := range validSpecs { resolvedAlgorithm, err := ResolveAlterAlgorithm(spec, algorithm) @@ -1987,6 +1982,17 @@ func resolveAlterTableSpec(ctx sessionctx.Context, specs []*ast.AlterTableSpec) return validSpecs, nil } +func isSameTypeMultiSpecs(specs []*ast.AlterTableSpec) bool { + specType := specs[0].Tp + var count int = 0 + for _, spec := range specs { + if spec.Tp == specType { + count++ + } + } + return len(specs) == count +} + func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.AlterTableSpec) (err error) { validSpecs, err := resolveAlterTableSpec(ctx, specs) if err != nil { @@ -1998,14 +2004,32 @@ func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.A return ErrWrongObject.GenWithStackByArgs(ident.Schema, ident.Name, "BASE TABLE") } + if len(validSpecs) > 1 { + if isSameTypeMultiSpecs(validSpecs) { + switch validSpecs[0].Tp { + case ast.AlterTableAddColumns: + err = d.AddSpecsColumn(ctx, ident, validSpecs) + default: + return errRunMultiSchemaChanges + } + if err != nil { + return errors.Trace(err) + } + return nil + } + // Now we only allow one schema changing at the same time(except adding multi-columns). + return errRunMultiSchemaChanges + } + for _, spec := range validSpecs { var handledCharsetOrCollate bool switch spec.Tp { case ast.AlterTableAddColumns: if len(spec.NewColumns) != 1 { - return errRunMultiSchemaChanges + err = d.AddSpecColumns(ctx, ident, spec) + } else { + err = d.AddColumn(ctx, ident, spec) } - err = d.AddColumn(ctx, ident, spec) case ast.AlterTableAddPartitions: err = d.AddTablePartitions(ctx, ident, spec) case ast.AlterTableCoalescePartitions: @@ -2296,6 +2320,252 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab return errors.Trace(err) } +// AddSpecsColumn will add multi new columns to the table, e.g. alter table t add column b int, add column c int +func (d *ddl) AddSpecsColumn(ctx sessionctx.Context, ti ast.Ident, specs []*ast.AlterTableSpec) error { + schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) + if err != nil { + return errors.Trace(err) + } + var columns []*table.Column + var positions []*ast.ColumnPosition + var offsets []int + + // Check all the columns at once + if err = checkAddColumnTooManyColumns(len(t.Cols()) + len(specs)); err != nil { + return errors.Trace(err) + } + set := make(map[string]bool) + for _, spec := range specs { + specNewColumn := spec.NewColumns[0] + if set[specNewColumn.Name.Name.O] != true { + set[specNewColumn.Name.Name.O] = true + } else { + return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O)) + } + } + + // Check the columns one by one + for _, spec := range specs { + if len(spec.NewColumns) != 1 { + return errRunMultiSchemaChanges + } + specNewColumn := spec.NewColumns[0] + err := checkUnsupportedColumnConstraint(specNewColumn, ti) + if err != nil { + return errors.Trace(err) + } + + colName := specNewColumn.Name.Name.O + if err = checkColumnAttributes(colName, specNewColumn.Tp); err != nil { + return errors.Trace(err) + } + if len(colName) > mysql.MaxColumnNameLength { + return ErrTooLongIdent.GenWithStackByArgs(colName) + } + + // If new column is a generated column, do validation. + // NOTE: we do check whether the column refers other generated + // columns occurring later in a table, but we don't handle the col offset. + for _, option := range specNewColumn.Options { + if option.Tp == ast.ColumnOptionGenerated { + if err := checkIllegalFn4GeneratedColumn(specNewColumn.Name.Name.L, option.Expr); err != nil { + return errors.Trace(err) + } + + if option.Stored { + return errUnsupportedOnGeneratedColumn.GenWithStackByArgs("Adding generated stored column through ALTER TABLE") + } + + _, dependColNames := findDependedColumnNames(specNewColumn) + if err = checkAutoIncrementRef(specNewColumn.Name.Name.L, dependColNames, t.Meta()); err != nil { + return errors.Trace(err) + } + duplicateColNames := make(map[string]struct{}, len(dependColNames)) + for k := range dependColNames { + duplicateColNames[k] = struct{}{} + } + cols := t.Cols() + + if err = checkDependedColExist(dependColNames, cols); err != nil { + return errors.Trace(err) + } + + if err = verifyColumnGenerationSingle(duplicateColNames, cols, spec.Position); err != nil { + return errors.Trace(err) + } + } + } + + // Check whether added column has existed. + col := table.FindCol(t.Cols(), colName) + if col != nil { + err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) + if spec.IfNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + return err + } + + // Ignore table constraints now, maybe return error later. + // We use length(t.Cols()) as the default offset firstly, we will change the + // column's offset later. + col, _, err = buildColumnAndConstraint(ctx, len(t.Cols()), specNewColumn, nil, t.Meta().Charset, t.Meta().Collate, schema.Charset, schema.Collate) + if err != nil { + return errors.Trace(err) + } + + col.OriginDefaultValue, err = generateOriginDefaultValue(col.ToInfo()) + if err != nil { + return errors.Trace(err) + } + + columns = append(columns, col) + positions = append(positions, spec.Position) + offsets = append(offsets, 0) + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: t.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionAddColumns, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{columns, positions, offsets}, + } + + err = d.doDDLJob(ctx, job) + // column exists, but if_not_exists flags is true, so we ignore this error. + // if infoschema.ErrColumnExists.Equal(err) && specs[0].IfNotExists { + // ctx.GetSessionVars().StmtCtx.AppendNote(err) + // return nil + // } + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + +// AddSpecColumns will add columns to the table, e.g. alter table t add column(b int, c int) +func (d *ddl) AddSpecColumns(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTableSpec) error { + schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) + if err != nil { + return errors.Trace(err) + } + + offset := 0 + var columns []*table.Column + var positions []*ast.ColumnPosition + var offsets []int + + // Check all the columns at once + if err = checkAddColumnTooManyColumns(len(t.Cols()) + len(spec.NewColumns)); err != nil { + return errors.Trace(err) + } + set := make(map[string]bool) + for _, specNewColumn := range spec.NewColumns { + if set[specNewColumn.Name.Name.O] != true { + set[specNewColumn.Name.Name.O] = true + } else { + return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O)) + } + } + + // Check the columns one by one + for _, specNewColumn := range spec.NewColumns { + err := checkUnsupportedColumnConstraint(specNewColumn, ti) + if err != nil { + return errors.Trace(err) + } + + colName := specNewColumn.Name.Name.O + if err = checkColumnAttributes(colName, specNewColumn.Tp); err != nil { + return errors.Trace(err) + } + if len(colName) > mysql.MaxColumnNameLength { + return ErrTooLongIdent.GenWithStackByArgs(colName) + } + + // If new column is a generated column, do validation. + // NOTE: we do check whether the column refers other generated + // columns occurring later in a table, but we don't handle the col offset. + for _, option := range specNewColumn.Options { + if option.Tp == ast.ColumnOptionGenerated { + if err := checkIllegalFn4GeneratedColumn(specNewColumn.Name.Name.L, option.Expr); err != nil { + return errors.Trace(err) + } + + if option.Stored { + return errUnsupportedOnGeneratedColumn.GenWithStackByArgs("Adding generated stored column through ALTER TABLE") + } + + _, dependColNames := findDependedColumnNames(specNewColumn) + if err = checkAutoIncrementRef(specNewColumn.Name.Name.L, dependColNames, t.Meta()); err != nil { + return errors.Trace(err) + } + duplicateColNames := make(map[string]struct{}, len(dependColNames)) + for k := range dependColNames { + duplicateColNames[k] = struct{}{} + } + cols := t.Cols() + + if err = checkDependedColExist(dependColNames, cols); err != nil { + return errors.Trace(err) + } + + if err = verifyColumnGenerationSingle(duplicateColNames, cols, spec.Position); err != nil { + return errors.Trace(err) + } + } + } + + // Check whether added column has existed. + col := table.FindCol(t.Cols(), colName) + if col != nil { + err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) + if spec.IfNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + continue + } + return err + } + + // Ignore table constraints now, maybe return error later. + // We use length(t.Cols()) as the default offset firstly, we will change the + // column's offset later. + col, _, err = buildColumnAndConstraint(ctx, len(t.Cols()), specNewColumn, nil, t.Meta().Charset, t.Meta().Collate, schema.Charset, schema.Collate) + if err != nil { + return errors.Trace(err) + } + + col.OriginDefaultValue, err = generateOriginDefaultValue(col.ToInfo()) + if err != nil { + return errors.Trace(err) + } + + columns = append(columns, col) + positions = append(positions, spec.Position) + offsets = append(offsets, offset) + offset++ + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: t.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionAddColumns, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{columns, positions, offsets}, + } + + err = d.doDDLJob(ctx, job) + // column exists, but if_not_exists flags is true, so we ignore this error. + if infoschema.ErrColumnExists.Equal(err) && spec.IfNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + // AddTablePartitions will add a new partition to the table. func (d *ddl) AddTablePartitions(ctx sessionctx.Context, ident ast.Ident, spec *ast.AlterTableSpec) error { is := d.infoHandle.Get() diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 83af5c404f70a..b20efd8e9c46f 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -583,6 +583,8 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, ver, err = onTruncateTablePartition(d, t, job) case model.ActionAddColumn: ver, err = onAddColumn(d, t, job) + case model.ActionAddColumns: + ver, err = onAddColumns(d, t, job) case model.ActionDropColumn: ver, err = onDropColumn(t, job) case model.ActionModifyColumn: diff --git a/ddl/util/event.go b/ddl/util/event.go index 925d0ee9fc6e7..2966931c1eef2 100644 --- a/ddl/util/event.go +++ b/ddl/util/event.go @@ -21,11 +21,11 @@ import ( // Event is an event that a ddl operation happened. type Event struct { - Tp model.ActionType - TableInfo *model.TableInfo - PartInfo *model.PartitionInfo - ColumnInfo *model.ColumnInfo - IndexInfo *model.IndexInfo + Tp model.ActionType + TableInfo *model.TableInfo + PartInfo *model.PartitionInfo + ColumnInfos []*model.ColumnInfo + IndexInfo *model.IndexInfo } // String implements fmt.Stringer interface. @@ -41,8 +41,10 @@ func (e *Event) String() string { } ret += fmt.Sprintf(", Partition IDs: %v", ids) } - if e.ColumnInfo != nil { - ret += fmt.Sprintf(", Column ID: %d, Column Name %s", e.ColumnInfo.ID, e.ColumnInfo.Name) + for _, columnInfo := range e.ColumnInfos { + if columnInfo != nil { + ret += fmt.Sprintf(", Column ID: %d, Column Name %s", columnInfo.ID, columnInfo.Name) + } } if e.IndexInfo != nil { ret += fmt.Sprintf(", Index ID: %d, Index Name %s", e.IndexInfo.ID, e.IndexInfo.Name) diff --git a/executor/ddl_test.go b/executor/ddl_test.go index 7d0245621160e..c04435a4925ce 100644 --- a/executor/ddl_test.go +++ b/executor/ddl_test.go @@ -401,6 +401,31 @@ func (s *testSuite6) TestAlterTableAddColumn(c *C) { tk.MustExec("drop view alter_view") } +func (s *testSuite6) TestAlterTableAddColumns(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("create table if not exists alter_test (c1 int)") + tk.MustExec("insert into alter_test values(1)") + tk.MustExec("alter table alter_test add column c2 timestamp default current_timestamp, add column c8 varchar(50) default 'CURRENT_TIMESTAMP'") + tk.MustExec("alter table alter_test add column (c7 timestamp default current_timestamp, c3 varchar(50) default 'CURRENT_TIMESTAMP')") + time.Sleep(1 * time.Millisecond) + now := time.Now().Add(-time.Duration(1 * time.Millisecond)).Format(types.TimeFormat) + r, err := tk.Exec("select c2 from alter_test") + c.Assert(err, IsNil) + req := r.NewChunk() + err = r.Next(context.Background(), req) + c.Assert(err, IsNil) + row := req.GetRow(0) + c.Assert(row.Len(), Equals, 1) + c.Assert(now, GreaterEqual, row.GetTime(0).String()) + r.Close() + tk.MustQuery("select c3 from alter_test").Check(testkit.Rows("CURRENT_TIMESTAMP")) + tk.MustExec("create or replace view alter_view as select c1,c2 from alter_test") + _, err = tk.Exec("alter table alter_view add column (c4 varchar(50), c5 varchar(50))") + c.Assert(err.Error(), Equals, ddl.ErrWrongObject.GenWithStackByArgs("test", "alter_view", "BASE TABLE").Error()) + tk.MustExec("drop view alter_view") +} + func (s *testSuite6) TestAddNotNullColumnNoDefault(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") diff --git a/statistics/handle/ddl.go b/statistics/handle/ddl.go index a974b857bb0e0..86459a25a13de 100644 --- a/statistics/handle/ddl.go +++ b/statistics/handle/ddl.go @@ -39,10 +39,19 @@ func (h *Handle) HandleDDLEvent(t *util.Event) error { case model.ActionAddColumn: ids := getPhysicalIDs(t.TableInfo) for _, id := range ids { - if err := h.insertColStats2KV(id, t.ColumnInfo); err != nil { + if err := h.insertColStats2KV(id, t.ColumnInfos[0]); err != nil { return err } } + case model.ActionAddColumns: + ids := getPhysicalIDs(t.TableInfo) + for _, id := range ids { + for _, columnInfo := range t.ColumnInfos { + if err := h.insertColStats2KV(id, columnInfo); err != nil { + return err + } + } + } case model.ActionAddTablePartition, model.ActionTruncateTablePartition: for _, def := range t.PartInfo.Definitions { if err := h.insertTableStats2KV(t.TableInfo, def.ID); err != nil { From 154668ad1a36212691678787fd2aa4440798d8f6 Mon Sep 17 00:00:00 2001 From: gauss Date: Sat, 21 Mar 2020 09:29:44 +0800 Subject: [PATCH 02/36] clean code --- ddl/ddl_api.go | 273 ++++++++++++++----------------------------------- 1 file changed, 75 insertions(+), 198 deletions(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 821d7f81c494f..e51564fba3544 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2008,7 +2008,7 @@ func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.A if isSameTypeMultiSpecs(validSpecs) { switch validSpecs[0].Tp { case ast.AlterTableAddColumns: - err = d.AddSpecsColumn(ctx, ident, validSpecs) + err = d.AddColumns(ctx, ident, validSpecs) default: return errRunMultiSchemaChanges } @@ -2026,7 +2026,7 @@ func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.A switch spec.Tp { case ast.AlterTableAddColumns: if len(spec.NewColumns) != 1 { - err = d.AddSpecColumns(ctx, ident, spec) + err = d.AddColumns(ctx, ident, []*ast.AlterTableSpec{spec}) } else { err = d.AddColumn(ctx, ident, spec) } @@ -2320,8 +2320,8 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab return errors.Trace(err) } -// AddSpecsColumn will add multi new columns to the table, e.g. alter table t add column b int, add column c int -func (d *ddl) AddSpecsColumn(ctx sessionctx.Context, ti ast.Ident, specs []*ast.AlterTableSpec) error { +// AddColumns will add multi new columns to the table. +func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.AlterTableSpec) error { schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) if err != nil { return errors.Trace(err) @@ -2331,220 +2331,99 @@ func (d *ddl) AddSpecsColumn(ctx sessionctx.Context, ti ast.Ident, specs []*ast. var offsets []int // Check all the columns at once - if err = checkAddColumnTooManyColumns(len(t.Cols()) + len(specs)); err != nil { - return errors.Trace(err) - } + newColumnsCount := 0 set := make(map[string]bool) for _, spec := range specs { - specNewColumn := spec.NewColumns[0] - if set[specNewColumn.Name.Name.O] != true { - set[specNewColumn.Name.Name.O] = true - } else { - return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O)) + for _, specNewColumn := range spec.NewColumns { + newColumnsCount++ + if set[specNewColumn.Name.Name.O] != true { + set[specNewColumn.Name.Name.O] = true + } else { + return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O)) + } } } + if err = checkAddColumnTooManyColumns(len(t.Cols()) + newColumnsCount); err != nil { + return errors.Trace(err) + } // Check the columns one by one for _, spec := range specs { - if len(spec.NewColumns) != 1 { - return errRunMultiSchemaChanges - } - specNewColumn := spec.NewColumns[0] - err := checkUnsupportedColumnConstraint(specNewColumn, ti) - if err != nil { - return errors.Trace(err) - } - - colName := specNewColumn.Name.Name.O - if err = checkColumnAttributes(colName, specNewColumn.Tp); err != nil { - return errors.Trace(err) - } - if len(colName) > mysql.MaxColumnNameLength { - return ErrTooLongIdent.GenWithStackByArgs(colName) - } - - // If new column is a generated column, do validation. - // NOTE: we do check whether the column refers other generated - // columns occurring later in a table, but we don't handle the col offset. - for _, option := range specNewColumn.Options { - if option.Tp == ast.ColumnOptionGenerated { - if err := checkIllegalFn4GeneratedColumn(specNewColumn.Name.Name.L, option.Expr); err != nil { - return errors.Trace(err) - } - - if option.Stored { - return errUnsupportedOnGeneratedColumn.GenWithStackByArgs("Adding generated stored column through ALTER TABLE") - } - - _, dependColNames := findDependedColumnNames(specNewColumn) - if err = checkAutoIncrementRef(specNewColumn.Name.Name.L, dependColNames, t.Meta()); err != nil { - return errors.Trace(err) - } - duplicateColNames := make(map[string]struct{}, len(dependColNames)) - for k := range dependColNames { - duplicateColNames[k] = struct{}{} - } - cols := t.Cols() - - if err = checkDependedColExist(dependColNames, cols); err != nil { - return errors.Trace(err) - } - - if err = verifyColumnGenerationSingle(duplicateColNames, cols, spec.Position); err != nil { - return errors.Trace(err) - } + for _, specNewColumn := range spec.NewColumns { + err := checkUnsupportedColumnConstraint(specNewColumn, ti) + if err != nil { + return errors.Trace(err) } - } - // Check whether added column has existed. - col := table.FindCol(t.Cols(), colName) - if col != nil { - err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) - if spec.IfNotExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - return nil + colName := specNewColumn.Name.Name.O + if err = checkColumnAttributes(colName, specNewColumn.Tp); err != nil { + return errors.Trace(err) + } + if len(colName) > mysql.MaxColumnNameLength { + return ErrTooLongIdent.GenWithStackByArgs(colName) } - return err - } - - // Ignore table constraints now, maybe return error later. - // We use length(t.Cols()) as the default offset firstly, we will change the - // column's offset later. - col, _, err = buildColumnAndConstraint(ctx, len(t.Cols()), specNewColumn, nil, t.Meta().Charset, t.Meta().Collate, schema.Charset, schema.Collate) - if err != nil { - return errors.Trace(err) - } - - col.OriginDefaultValue, err = generateOriginDefaultValue(col.ToInfo()) - if err != nil { - return errors.Trace(err) - } - - columns = append(columns, col) - positions = append(positions, spec.Position) - offsets = append(offsets, 0) - } - - job := &model.Job{ - SchemaID: schema.ID, - TableID: t.Meta().ID, - SchemaName: schema.Name.L, - Type: model.ActionAddColumns, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{columns, positions, offsets}, - } - - err = d.doDDLJob(ctx, job) - // column exists, but if_not_exists flags is true, so we ignore this error. - // if infoschema.ErrColumnExists.Equal(err) && specs[0].IfNotExists { - // ctx.GetSessionVars().StmtCtx.AppendNote(err) - // return nil - // } - err = d.callHookOnChanged(err) - return errors.Trace(err) -} - -// AddSpecColumns will add columns to the table, e.g. alter table t add column(b int, c int) -func (d *ddl) AddSpecColumns(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTableSpec) error { - schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) - if err != nil { - return errors.Trace(err) - } - - offset := 0 - var columns []*table.Column - var positions []*ast.ColumnPosition - var offsets []int - - // Check all the columns at once - if err = checkAddColumnTooManyColumns(len(t.Cols()) + len(spec.NewColumns)); err != nil { - return errors.Trace(err) - } - set := make(map[string]bool) - for _, specNewColumn := range spec.NewColumns { - if set[specNewColumn.Name.Name.O] != true { - set[specNewColumn.Name.Name.O] = true - } else { - return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O)) - } - } - - // Check the columns one by one - for _, specNewColumn := range spec.NewColumns { - err := checkUnsupportedColumnConstraint(specNewColumn, ti) - if err != nil { - return errors.Trace(err) - } - colName := specNewColumn.Name.Name.O - if err = checkColumnAttributes(colName, specNewColumn.Tp); err != nil { - return errors.Trace(err) - } - if len(colName) > mysql.MaxColumnNameLength { - return ErrTooLongIdent.GenWithStackByArgs(colName) - } + // If new column is a generated column, do validation. + // NOTE: we do check whether the column refers other generated + // columns occurring later in a table, but we don't handle the col offset. + for _, option := range specNewColumn.Options { + if option.Tp == ast.ColumnOptionGenerated { + if err := checkIllegalFn4GeneratedColumn(specNewColumn.Name.Name.L, option.Expr); err != nil { + return errors.Trace(err) + } - // If new column is a generated column, do validation. - // NOTE: we do check whether the column refers other generated - // columns occurring later in a table, but we don't handle the col offset. - for _, option := range specNewColumn.Options { - if option.Tp == ast.ColumnOptionGenerated { - if err := checkIllegalFn4GeneratedColumn(specNewColumn.Name.Name.L, option.Expr); err != nil { - return errors.Trace(err) - } + if option.Stored { + return errUnsupportedOnGeneratedColumn.GenWithStackByArgs("Adding generated stored column through ALTER TABLE") + } - if option.Stored { - return errUnsupportedOnGeneratedColumn.GenWithStackByArgs("Adding generated stored column through ALTER TABLE") - } + _, dependColNames := findDependedColumnNames(specNewColumn) + if err = checkAutoIncrementRef(specNewColumn.Name.Name.L, dependColNames, t.Meta()); err != nil { + return errors.Trace(err) + } + duplicateColNames := make(map[string]struct{}, len(dependColNames)) + for k := range dependColNames { + duplicateColNames[k] = struct{}{} + } + cols := t.Cols() - _, dependColNames := findDependedColumnNames(specNewColumn) - if err = checkAutoIncrementRef(specNewColumn.Name.Name.L, dependColNames, t.Meta()); err != nil { - return errors.Trace(err) - } - duplicateColNames := make(map[string]struct{}, len(dependColNames)) - for k := range dependColNames { - duplicateColNames[k] = struct{}{} - } - cols := t.Cols() + if err = checkDependedColExist(dependColNames, cols); err != nil { + return errors.Trace(err) + } - if err = checkDependedColExist(dependColNames, cols); err != nil { - return errors.Trace(err) + if err = verifyColumnGenerationSingle(duplicateColNames, cols, spec.Position); err != nil { + return errors.Trace(err) + } } + } - if err = verifyColumnGenerationSingle(duplicateColNames, cols, spec.Position); err != nil { - return errors.Trace(err) + // Check whether added column has existed. + col := table.FindCol(t.Cols(), colName) + if col != nil { + err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) + if spec.IfNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil } + return err } - } - // Check whether added column has existed. - col := table.FindCol(t.Cols(), colName) - if col != nil { - err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) - if spec.IfNotExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - continue + // Ignore table constraints now, maybe return error later. + // We use length(t.Cols()) as the default offset firstly, we will change the + // column's offset later. + col, _, err = buildColumnAndConstraint(ctx, len(t.Cols()), specNewColumn, nil, t.Meta().Charset, t.Meta().Collate, schema.Charset, schema.Collate) + if err != nil { + return errors.Trace(err) } - return err - } - // Ignore table constraints now, maybe return error later. - // We use length(t.Cols()) as the default offset firstly, we will change the - // column's offset later. - col, _, err = buildColumnAndConstraint(ctx, len(t.Cols()), specNewColumn, nil, t.Meta().Charset, t.Meta().Collate, schema.Charset, schema.Collate) - if err != nil { - return errors.Trace(err) - } + col.OriginDefaultValue, err = generateOriginDefaultValue(col.ToInfo()) + if err != nil { + return errors.Trace(err) + } - col.OriginDefaultValue, err = generateOriginDefaultValue(col.ToInfo()) - if err != nil { - return errors.Trace(err) + columns = append(columns, col) + positions = append(positions, spec.Position) + offsets = append(offsets, 0) } - - columns = append(columns, col) - positions = append(positions, spec.Position) - offsets = append(offsets, offset) - offset++ } job := &model.Job{ @@ -2557,10 +2436,8 @@ func (d *ddl) AddSpecColumns(ctx sessionctx.Context, ti ast.Ident, spec *ast.Alt } err = d.doDDLJob(ctx, job) - // column exists, but if_not_exists flags is true, so we ignore this error. - if infoschema.ErrColumnExists.Equal(err) && spec.IfNotExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - return nil + if err != nil { + return errors.Trace(err) } err = d.callHookOnChanged(err) return errors.Trace(err) From eba0feeaadd9f362cde051a1797a197117d34a19 Mon Sep 17 00:00:00 2001 From: gauss Date: Sat, 21 Mar 2020 19:14:56 +0800 Subject: [PATCH 03/36] update go.mod --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 4c0fa122de3be..13f4ce71c9af9 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5 github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd - github.com/pingcap/parser v0.0.0-20200317021010-cd90cc2a7d87 + github.com/pingcap/parser v0.0.0-20200321072941-b03eb7174165 github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 github.com/pingcap/sysutil v0.0.0-20200309085538-962fd285f3bb github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible diff --git a/go.sum b/go.sum index 79473a8cb3cd0..ad61d8b61c771 100644 --- a/go.sum +++ b/go.sum @@ -269,6 +269,8 @@ github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfE github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/parser v0.0.0-20200317021010-cd90cc2a7d87 h1:533jEUp3mtfWjk0el+awLbyGVxiHcUIGWcR1Y7gB+fg= github.com/pingcap/parser v0.0.0-20200317021010-cd90cc2a7d87/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/parser v0.0.0-20200321072941-b03eb7174165 h1:8U66zIPckyZ7aTVXrFKlMczXBMvDjzmJRp+VBxICxsE= +github.com/pingcap/parser v0.0.0-20200321072941-b03eb7174165/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 h1:Yrp99FnjHAEuDrSBql2l0IqCtJX7KwJbTsD5hIArkvk= github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3/go.mod h1:25GfNw6+Jcr9kca5rtmTb4gKCJ4jOpow2zV2S9Dgafs= github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= From 7708e6e5d00d7260ab7e4a1d14c6c747847da91d Mon Sep 17 00:00:00 2001 From: gauss Date: Sun, 22 Mar 2020 10:22:30 +0800 Subject: [PATCH 04/36] add drop columns --- ddl/column.go | 96 +++++++++++++++++++++++++++++++++++++++++- ddl/ddl_worker_test.go | 9 ++++ 2 files changed, 104 insertions(+), 1 deletion(-) diff --git a/ddl/column.go b/ddl/column.go index 61c9e2d905c14..e434734e4f069 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -293,7 +293,7 @@ func setColumnsState(columnInfos []*model.ColumnInfo, state model.SchemaState) { func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { // Handle the rolling back job. if job.IsRollingback() { - ver, err = onDropColumn(t, job) + ver, err = onDropColumns(t, job) if err != nil { return ver, errors.Trace(err) } @@ -353,6 +353,100 @@ func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error return ver, errors.Trace(err) } +func onDropColumns(t *meta.Meta, job *model.Job) (ver int64, _ error) { + tblInfo, colInfos, delCount, err := checkDropColumns(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + originalState := colInfos[0].State + switch colInfos[0].State { + case model.StatePublic: + // public -> write only + job.SchemaState = model.StateWriteOnly + setColumnsState(colInfos, model.StateWriteOnly) + for _, colInfo := range colInfos { + // Set this column's offset to the last and reset all following columns' offsets. + adjustColumnInfoInDropColumn(tblInfo, colInfo.Offset) + // When the dropping column has not-null flag and it hasn't the default value, we can backfill the column value like "add column". + // NOTE: If the state of StateWriteOnly can be rollbacked, we'd better reconsider the original default value. + // And we need consider the column without not-null flag. + if colInfo.OriginDefaultValue == nil && mysql.HasNotNullFlag(colInfo.Flag) { + // If the column is timestamp default current_timestamp, and DDL owner is new version TiDB that set column.Version to 1, + // then old TiDB update record in the column write only stage will uses the wrong default value of the dropping column. + // Because new version of the column default value is UTC time, but old version TiDB will think the default value is the time in system timezone. + // But currently will be ok, because we can't cancel the drop column job when the job is running, + // so the column will be dropped succeed and client will never see the wrong default value of the dropped column. + // More info about this problem, see PR#9115. + colInfo.OriginDefaultValue, err = generateOriginDefaultValue(colInfo) + if err != nil { + return ver, errors.Trace(err) + } + } + } + ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != colInfos[0].State) + case model.StateWriteOnly: + // write only -> delete only + job.SchemaState = model.StateDeleteOnly + setColumnsState(colInfos, model.StateDeleteOnly) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State) + case model.StateDeleteOnly: + // delete only -> reorganization + job.SchemaState = model.StateDeleteReorganization + setColumnsState(colInfos, model.StateDeleteReorganization) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State) + case model.StateDeleteReorganization: + // reorganization -> absent + // All reorganization jobs are done, drop this column. + tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-delCount] + setColumnsState(colInfos, model.StateNone) + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != colInfos[0].State) + if err != nil { + return ver, errors.Trace(err) + } + + // Finish this job. + if job.IsRollingback() { + job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo) + } else { + job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) + } + default: + err = errInvalidDDLJob.GenWithStackByArgs("table", tblInfo.State) + } + return ver, errors.Trace(err) +} + +func checkDropColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, int, error) { + schemaID := job.SchemaID + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + if err != nil { + return nil, nil, 0, errors.Trace(err) + } + + var colNames []model.CIStr + err = job.DecodeArgs(&colNames) + if err != nil { + job.State = model.JobStateCancelled + return nil, nil, 0, errors.Trace(err) + } + + var colInfos []*model.ColumnInfo + for _, colName := range colNames { + colInfo := model.FindColumnInfo(tblInfo.Columns, colName.L) + if colInfo == nil || colInfo.Hidden { + job.State = model.JobStateCancelled + return nil, nil, 0, ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) + } + if err = isDroppableColumn(tblInfo, colName); err != nil { + job.State = model.JobStateCancelled + return nil, nil, 0, errors.Trace(err) + } + colInfos = append(colInfos, colInfo) + } + return tblInfo, colInfos, len(colNames), nil +} + func onDropColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { tblInfo, colInfo, err := checkDropColumn(t, job) if err != nil { diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index fe18c7b13d3ef..f16715877dda4 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -293,6 +293,9 @@ func (s *testDDLSuite) TestColumnError(c *C) { col.FieldType = *types.NewFieldType(mysql.TypeLong) pos := &ast.ColumnPosition{Tp: ast.ColumnPositionAfter, RelativeColumn: &ast.ColumnName{Name: model.NewCIStr("c5")}} + cols := &[]*model.ColumnInfo{col} + positions := &[]*ast.ColumnPosition{pos} + // for adding column doDDLJobErr(c, -1, tblInfo.ID, model.ActionAddColumn, []interface{}{col, pos, 0}, ctx, d) doDDLJobErr(c, dbInfo.ID, -1, model.ActionAddColumn, []interface{}{col, pos, 0}, ctx, d) @@ -304,6 +307,12 @@ func (s *testDDLSuite) TestColumnError(c *C) { doDDLJobErr(c, dbInfo.ID, -1, model.ActionDropColumn, []interface{}{col, pos, 0}, ctx, d) doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionDropColumn, []interface{}{0}, ctx, d) doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionDropColumn, []interface{}{model.NewCIStr("c5")}, ctx, d) + + // for adding columns + doDDLJobErr(c, -1, tblInfo.ID, model.ActionAddColumns, []interface{}{cols, positions, 0}, ctx, d) + doDDLJobErr(c, dbInfo.ID, -1, model.ActionAddColumns, []interface{}{cols, positions, 0}, ctx, d) + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionAddColumns, []interface{}{0}, ctx, d) + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionAddColumns, []interface{}{cols, positions, 0}, ctx, d) } func testCheckOwner(c *C, d *ddl, expectedVal bool) { From 15af5d4638f6c0b4bb6ac1216f0e66b47a0fab32 Mon Sep 17 00:00:00 2001 From: gauss Date: Sun, 22 Mar 2020 20:05:54 +0800 Subject: [PATCH 05/36] fix offsets for multiple after positions --- ddl/column.go | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index e434734e4f069..3c07869cc60e6 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -230,11 +230,11 @@ func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) return ver, errors.Trace(err) } -func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, []int, error) { +func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, []*ast.ColumnPosition, []int, error) { schemaID := job.SchemaID tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { - return nil, nil, nil, errors.Trace(err) + return nil, nil, nil, nil, errors.Trace(err) } columns := []*model.ColumnInfo{} positions := []*ast.ColumnPosition{} @@ -247,7 +247,7 @@ func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInf } if err != nil { job.State = model.JobStateCancelled - return nil, nil, nil, errors.Trace(err) + return nil, nil, nil, nil, errors.Trace(err) } var columnInfos []*model.ColumnInfo @@ -259,13 +259,13 @@ func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInf if columnInfo.State == model.StatePublic { // We already have a column with the same column name. job.State = model.JobStateCancelled - return nil, nil, nil, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name) + return nil, nil, nil, nil, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name) } } else { columnInfo, offset, err := createColumnInfo(tblInfo, col, positions[i]) if err != nil { job.State = model.JobStateCancelled - return nil, nil, nil, errors.Trace(err) + return nil, nil, nil, nil, errors.Trace(err) } logutil.BgLogger().Info("[ddl] run add columns job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offsets[i])) @@ -275,13 +275,13 @@ func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInf } if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil { job.State = model.JobStateCancelled - return nil, nil, nil, errors.Trace(err) + return nil, nil, nil, nil, errors.Trace(err) } columnInfos = append(columnInfos, columnInfo) } } job.Args = []interface{}{columnInfos, positions, offsets} - return tblInfo, columnInfos, offsets, nil + return tblInfo, columnInfos, positions, offsets, nil } func setColumnsState(columnInfos []*model.ColumnInfo, state model.SchemaState) { @@ -306,7 +306,7 @@ func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error } }) - tblInfo, columnInfos, offsets, err := checkAddAndCreateColumnInfos(t, job) + tblInfo, columnInfos, positions, offsets, err := checkAddAndCreateColumnInfos(t, job) if err != nil { return ver, errors.Trace(err) } @@ -335,6 +335,17 @@ func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error for i := range offsets { tmpCols := oldCols[:len(oldCols)-len(offsets)+i+1] tblInfo.Columns = tmpCols + // For multiple after positions. + // e.g. create table t(a int); + // alter table t add column b int after a, add column c int after a; + // alter table t add column a1 int after a, add column b1 int after b, add column c1 int after c; + if positions[i].Tp == ast.ColumnPositionAfter { + for j := 0; j < i; j++ { + if positions[j].Tp == ast.ColumnPositionAfter && offsets[j] < offsets[i] { + offsets[i]++ + } + } + } adjustColumnInfoInAddColumn(tblInfo, offsets[i]) oldCols = append(tblInfo.Columns, oldCols[len(oldCols)-len(offsets)+i+1:]...) } From c7a40419ecca629f0be6a32497ecbfbceacef38b Mon Sep 17 00:00:00 2001 From: gauss Date: Sun, 22 Mar 2020 23:13:52 +0800 Subject: [PATCH 06/36] add test --- ddl/column_test.go | 123 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) diff --git a/ddl/column_test.go b/ddl/column_test.go index 141befc85425c..5e34b3214686d 100644 --- a/ddl/column_test.go +++ b/ddl/column_test.go @@ -90,6 +90,37 @@ func testCreateColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo return job } +func testCreateColumns(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, + colNames []string, positions []*ast.ColumnPosition, defaultValue interface{}) *model.Job { + + var colInfos []*model.ColumnInfo + offsets := make([]int, len(colNames)) + for _, colName := range colNames { + col := &model.ColumnInfo{ + Name: model.NewCIStr(colName), + Offset: len(tblInfo.Columns), + DefaultValue: defaultValue, + OriginDefaultValue: defaultValue, + } + col.ID = allocateColumnID(tblInfo) + col.FieldType = *types.NewFieldType(mysql.TypeLong) + colInfos = append(colInfos, col) + } + + job := &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionAddColumns, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{colInfos, positions, offsets}, + } + err := d.doDDLJob(ctx, job) + c.Assert(err, IsNil) + v := getSchemaVer(c, ctx) + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + return job +} + func buildDropColumnJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string) *model.Job { return &model.Job{ SchemaID: dbInfo.ID, @@ -845,6 +876,98 @@ func (s *testColumnSuite) TestAddColumn(c *C) { d.Stop() } +func (s *testColumnSuite) TestAddColumns(c *C) { + d := newDDL( + context.Background(), + WithStore(s.store), + WithLease(testLease), + ) + tblInfo := testTableInfo(c, d, "t", 3) + ctx := testNewContext(d) + + err := ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + + testCreateTable(c, ctx, d, s.dbInfo, tblInfo) + t := testGetTable(c, d, s.dbInfo.ID, tblInfo.ID) + + oldRow := types.MakeDatums(int64(1), int64(2), int64(3)) + handle, err := t.AddRecord(ctx, oldRow) + c.Assert(err, IsNil) + + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + newColNames := []string{"c4,c5,c6"} + positions := make([]*ast.ColumnPosition, 3) + for i := range positions { + positions[i] = &ast.ColumnPosition{Tp: ast.ColumnPositionNone} + } + defaultColValue := int64(4) + + var mu sync.Mutex + var hookErr error + checkOK := false + + tc := &TestDDLCallback{} + tc.onJobUpdated = func(job *model.Job) { + mu.Lock() + defer mu.Unlock() + if checkOK { + return + } + + t, err1 := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) + if err1 != nil { + hookErr = errors.Trace(err1) + return + } + for _, newColName := range newColNames { + newCol := table.FindCol(t.(*tables.TableCommon).Columns, newColName) + if newCol == nil { + return + } + + err1 = s.checkAddColumn(newCol.State, d, tblInfo, handle, newCol, oldRow, defaultColValue) + if err1 != nil { + hookErr = errors.Trace(err1) + return + } + + if newCol.State == model.StatePublic { + checkOK = true + } + } + } + + d.SetHook(tc) + + job := testCreateColumns(c, ctx, d, s.dbInfo, tblInfo, newColNames, positions, defaultColValue) + + testCheckJobDone(c, d, job, true) + mu.Lock() + hErr := hookErr + ok := checkOK + mu.Unlock() + c.Assert(errors.ErrorStack(hErr), Equals, "") + c.Assert(ok, IsTrue) + + err = ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + + job = testDropTable(c, ctx, d, s.dbInfo, tblInfo) + testCheckJobDone(c, d, job, false) + + txn, err = ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + d.Stop() +} + func (s *testColumnSuite) TestDropColumn(c *C) { d := newDDL( context.Background(), From e156feb4feee9bd5b5cbb4155136f242f496b9e9 Mon Sep 17 00:00:00 2001 From: gauss Date: Tue, 24 Mar 2020 10:50:23 +0800 Subject: [PATCH 07/36] fix tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index ad61d8b61c771..80055ff096674 100644 --- a/go.sum +++ b/go.sum @@ -267,8 +267,6 @@ github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5/go.mod h1:IOdRDPLy github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20200317021010-cd90cc2a7d87 h1:533jEUp3mtfWjk0el+awLbyGVxiHcUIGWcR1Y7gB+fg= -github.com/pingcap/parser v0.0.0-20200317021010-cd90cc2a7d87/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= github.com/pingcap/parser v0.0.0-20200321072941-b03eb7174165 h1:8U66zIPckyZ7aTVXrFKlMczXBMvDjzmJRp+VBxICxsE= github.com/pingcap/parser v0.0.0-20200321072941-b03eb7174165/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 h1:Yrp99FnjHAEuDrSBql2l0IqCtJX7KwJbTsD5hIArkvk= From b83a160484ff30b6d56fcc907a9425e38d6e7a13 Mon Sep 17 00:00:00 2001 From: gauss Date: Tue, 24 Mar 2020 10:56:42 +0800 Subject: [PATCH 08/36] fix check --- ddl/ddl_api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 58473c98bda13..64fa7a76d6b2c 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2339,7 +2339,7 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte for _, spec := range specs { for _, specNewColumn := range spec.NewColumns { newColumnsCount++ - if set[specNewColumn.Name.Name.O] != true { + if !set[specNewColumn.Name.Name.O] { set[specNewColumn.Name.Name.O] = true } else { return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O)) From 244e3086bbb53fb06b94400799f5d33e4816544a Mon Sep 17 00:00:00 2001 From: gauss Date: Tue, 24 Mar 2020 14:05:17 +0800 Subject: [PATCH 09/36] extract a function to handle the duplicated code --- ddl/ddl_api.go | 135 ++++++++++++++++--------------------------------- 1 file changed, 44 insertions(+), 91 deletions(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index dc3c053565ed3..287d265e0b7e6 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2228,36 +2228,18 @@ func checkUnsupportedColumnConstraint(col *ast.ColumnDef, ti ast.Ident) error { return nil } -// AddColumn will add a new column to the table. -func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTableSpec) error { - specNewColumn := spec.NewColumns[0] - +func checkNewColumn(ctx sessionctx.Context, ti ast.Ident, schema *model.DBInfo, spec *ast.AlterTableSpec, t table.Table, specNewColumn *ast.ColumnDef) (*table.Column, error) { err := checkUnsupportedColumnConstraint(specNewColumn, ti) if err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } colName := specNewColumn.Name.Name.O if err = checkColumnAttributes(colName, specNewColumn.Tp); err != nil { - return errors.Trace(err) - } - - schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) - if err != nil { - return errors.Trace(err) - } - if err = checkAddColumnTooManyColumns(len(t.Cols()) + 1); err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } - // Check whether added column has existed. - col := table.FindCol(t.Cols(), colName) - if col != nil { - err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) - if spec.IfNotExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - return nil - } - return err + if len(colName) > mysql.MaxColumnNameLength { + return nil, ErrTooLongIdent.GenWithStackByArgs(colName) } // If new column is a generated column, do validation. @@ -2266,16 +2248,16 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab for _, option := range specNewColumn.Options { if option.Tp == ast.ColumnOptionGenerated { if err := checkIllegalFn4GeneratedColumn(specNewColumn.Name.Name.L, option.Expr); err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } if option.Stored { - return errUnsupportedOnGeneratedColumn.GenWithStackByArgs("Adding generated stored column through ALTER TABLE") + return nil, errUnsupportedOnGeneratedColumn.GenWithStackByArgs("Adding generated stored column through ALTER TABLE") } _, dependColNames := findDependedColumnNames(specNewColumn) if err = checkAutoIncrementRef(specNewColumn.Name.Name.L, dependColNames, t.Meta()); err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } duplicateColNames := make(map[string]struct{}, len(dependColNames)) for k := range dependColNames { @@ -2284,28 +2266,53 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab cols := t.Cols() if err = checkDependedColExist(dependColNames, cols); err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } if err = verifyColumnGenerationSingle(duplicateColNames, cols, spec.Position); err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } } } - if len(colName) > mysql.MaxColumnNameLength { - return ErrTooLongIdent.GenWithStackByArgs(colName) - } - // Ignore table constraints now, maybe return error later. // We use length(t.Cols()) as the default offset firstly, we will change the // column's offset later. - col, _, err = buildColumnAndConstraint(ctx, len(t.Cols()), specNewColumn, nil, t.Meta().Charset, t.Meta().Collate, schema.Charset, schema.Collate) + col, _, err := buildColumnAndConstraint(ctx, len(t.Cols()), specNewColumn, nil, t.Meta().Charset, t.Meta().Collate, schema.Charset, schema.Collate) if err != nil { - return errors.Trace(err) + return nil, errors.Trace(err) } col.OriginDefaultValue, err = generateOriginDefaultValue(col.ToInfo()) + if err != nil { + return nil, errors.Trace(err) + } + return col, nil +} + +// AddColumn will add a new column to the table. +func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTableSpec) error { + specNewColumn := spec.NewColumns[0] + schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) + if err != nil { + return errors.Trace(err) + } + if err = checkAddColumnTooManyColumns(len(t.Cols()) + 1); err != nil { + return errors.Trace(err) + } + colName := specNewColumn.Name.Name.O + // Check whether added column has existed. + col := table.FindCol(t.Cols(), colName) + if col != nil { + err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) + if spec.IfNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + return err + } + + col, err = checkNewColumn(ctx, ti, schema, spec, t, specNewColumn) if err != nil { return errors.Trace(err) } @@ -2339,7 +2346,7 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte var positions []*ast.ColumnPosition var offsets []int - // Check all the columns at once + // Check all the columns at once. newColumnsCount := 0 set := make(map[string]bool) for _, spec := range specs { @@ -2356,55 +2363,10 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte return errors.Trace(err) } - // Check the columns one by one + // Check the columns one by one. for _, spec := range specs { for _, specNewColumn := range spec.NewColumns { - err := checkUnsupportedColumnConstraint(specNewColumn, ti) - if err != nil { - return errors.Trace(err) - } - colName := specNewColumn.Name.Name.O - if err = checkColumnAttributes(colName, specNewColumn.Tp); err != nil { - return errors.Trace(err) - } - if len(colName) > mysql.MaxColumnNameLength { - return ErrTooLongIdent.GenWithStackByArgs(colName) - } - - // If new column is a generated column, do validation. - // NOTE: we do check whether the column refers other generated - // columns occurring later in a table, but we don't handle the col offset. - for _, option := range specNewColumn.Options { - if option.Tp == ast.ColumnOptionGenerated { - if err := checkIllegalFn4GeneratedColumn(specNewColumn.Name.Name.L, option.Expr); err != nil { - return errors.Trace(err) - } - - if option.Stored { - return errUnsupportedOnGeneratedColumn.GenWithStackByArgs("Adding generated stored column through ALTER TABLE") - } - - _, dependColNames := findDependedColumnNames(specNewColumn) - if err = checkAutoIncrementRef(specNewColumn.Name.Name.L, dependColNames, t.Meta()); err != nil { - return errors.Trace(err) - } - duplicateColNames := make(map[string]struct{}, len(dependColNames)) - for k := range dependColNames { - duplicateColNames[k] = struct{}{} - } - cols := t.Cols() - - if err = checkDependedColExist(dependColNames, cols); err != nil { - return errors.Trace(err) - } - - if err = verifyColumnGenerationSingle(duplicateColNames, cols, spec.Position); err != nil { - return errors.Trace(err) - } - } - } - // Check whether added column has existed. col := table.FindCol(t.Cols(), colName) if col != nil { @@ -2416,19 +2378,10 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte return err } - // Ignore table constraints now, maybe return error later. - // We use length(t.Cols()) as the default offset firstly, we will change the - // column's offset later. - col, _, err = buildColumnAndConstraint(ctx, len(t.Cols()), specNewColumn, nil, t.Meta().Charset, t.Meta().Collate, schema.Charset, schema.Collate) - if err != nil { - return errors.Trace(err) - } - - col.OriginDefaultValue, err = generateOriginDefaultValue(col.ToInfo()) + col, err = checkNewColumn(ctx, ti, schema, spec, t, specNewColumn) if err != nil { return errors.Trace(err) } - columns = append(columns, col) positions = append(positions, spec.Position) offsets = append(offsets, 0) From de6964c4d8eff3fb33a39b2292b036b4da6c9048 Mon Sep 17 00:00:00 2001 From: gauss Date: Tue, 24 Mar 2020 15:18:07 +0800 Subject: [PATCH 10/36] add test case --- ddl/db_integration_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index f1adc44c75e49..f24c76ebd58e7 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -462,6 +462,17 @@ func (s *testIntegrationSuite5) TestErrnoErrorCode(c *C) { sql = "create table test_on_update_2(c int on update current_timestamp);" tk.MustGetErrCode(sql, errno.ErrInvalidOnUpdate) + // add columns + sql = "alter table test_error_code_succ add column c1 int, add column c1 int" + tk.MustGetErrCode(sql, errno.ErrDupFieldName) + sql = "alter table test_error_code_succ add column (aa int, aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa int)" + tk.MustGetErrCode(sql, errno.ErrTooLongIdent) + sql = "alter table test_error_code_succ add column `a ` int, add column `b ` int;" + tk.MustGetErrCode(sql, errno.ErrWrongColumnName) + tk.MustExec("create table test_on_update (c1 int, c2 int);") + sql = "alter table test_on_update add column cc int, add column c3 int on update current_timestamp;" + tk.MustGetErrCode(sql, errno.ErrInvalidOnUpdate) + // drop column sql = "alter table test_error_code_succ drop c_not_exist" tk.MustGetErrCode(sql, errno.ErrCantDropFieldOrKey) From 8521775f9b8b95286fb7f2471f680c390ed4d824 Mon Sep 17 00:00:00 2001 From: gauss Date: Tue, 24 Mar 2020 15:27:46 +0800 Subject: [PATCH 11/36] fix test --- ddl/db_integration_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index f24c76ebd58e7..a29bc726cd274 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -469,8 +469,8 @@ func (s *testIntegrationSuite5) TestErrnoErrorCode(c *C) { tk.MustGetErrCode(sql, errno.ErrTooLongIdent) sql = "alter table test_error_code_succ add column `a ` int, add column `b ` int;" tk.MustGetErrCode(sql, errno.ErrWrongColumnName) - tk.MustExec("create table test_on_update (c1 int, c2 int);") - sql = "alter table test_on_update add column cc int, add column c3 int on update current_timestamp;" + tk.MustExec("create table test_add_columns_on_update (c1 int, c2 int);") + sql = "alter table test_add_columns_on_update add column cc int, add column c3 int on update current_timestamp;" tk.MustGetErrCode(sql, errno.ErrInvalidOnUpdate) // drop column From 2426141a05b77d7ba29ec10b10962903cdb78ec4 Mon Sep 17 00:00:00 2001 From: gauss Date: Wed, 25 Mar 2020 14:24:35 +0800 Subject: [PATCH 12/36] add drop multi-columns --- ddl/column_test.go | 116 +++++++++++++++++++++++++++++++++++++++++-- ddl/ddl_api.go | 65 +++++++++++++++++++++--- ddl/ddl_worker.go | 2 + executor/ddl_test.go | 3 -- go.mod | 2 +- go.sum | 4 +- 6 files changed, 176 insertions(+), 16 deletions(-) diff --git a/ddl/column_test.go b/ddl/column_test.go index 43e002549e37f..ac1f6029e5741 100644 --- a/ddl/column_test.go +++ b/ddl/column_test.go @@ -90,9 +90,8 @@ func testCreateColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo return job } -func testCreateColumns(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, - colNames []string, positions []*ast.ColumnPosition, defaultValue interface{}) *model.Job { - +func buildCreateColumnJobs(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string, + positions []*ast.ColumnPosition, defaultValue interface{}) *model.Job { var colInfos []*model.ColumnInfo offsets := make([]int, len(colNames)) for _, colName := range colNames { @@ -114,6 +113,12 @@ func testCreateColumns(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInf BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{colInfos, positions, offsets}, } + return job +} + +func testCreateColumns(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, + colNames []string, positions []*ast.ColumnPosition, defaultValue interface{}) *model.Job { + job := buildCreateColumnJobs(dbInfo, tblInfo, colNames, positions, defaultValue) err := d.doDDLJob(ctx, job) c.Assert(err, IsNil) v := getSchemaVer(c, ctx) @@ -144,6 +149,34 @@ func testDropColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, return job } +func buildDropColumnJobs(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string) *model.Job { + var columnNames []model.CIStr + for _, colName := range colNames { + columnNames = append(columnNames, model.NewCIStr(colName)) + } + job := &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionDropColumns, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{columnNames}, + } + return job +} + +func testDropColumns(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string, isError bool) *model.Job { + job := buildDropColumnJobs(dbInfo, tblInfo, colNames) + err := d.doDDLJob(ctx, job) + if isError { + c.Assert(err, NotNil) + return nil + } + c.Assert(errors.ErrorStack(err), Equals, "") + v := getSchemaVer(c, ctx) + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + return job +} + func (s *testColumnSuite) TestColumn(c *C) { d := newDDL( context.Background(), @@ -1042,6 +1075,83 @@ func (s *testColumnSuite) TestDropColumn(c *C) { d.Stop() } +func (s *testColumnSuite) TestDropColumns(c *C) { + d := newDDL( + context.Background(), + WithStore(s.store), + WithLease(testLease), + ) + tblInfo := testTableInfo(c, d, "t2", 4) + ctx := testNewContext(d) + + err := ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + + testCreateTable(c, ctx, d, s.dbInfo, tblInfo) + t := testGetTable(c, d, s.dbInfo.ID, tblInfo.ID) + + colNames := []string{"c3", "c4"} + defaultColValue := int64(4) + row := types.MakeDatums(int64(1), int64(2), int64(3)) + _, err = t.AddRecord(ctx, append(row, types.NewDatum(defaultColValue))) + c.Assert(err, IsNil) + + txn, err := ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + checkOK := false + var hookErr error + var mu sync.Mutex + + tc := &TestDDLCallback{} + tc.onJobUpdated = func(job *model.Job) { + mu.Lock() + defer mu.Unlock() + if checkOK { + return + } + t, err1 := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) + if err1 != nil { + hookErr = errors.Trace(err1) + return + } + for _, colName := range colNames { + + col := table.FindCol(t.(*tables.TableCommon).Columns, colName) + if col == nil { + checkOK = true + return + } + } + } + + d.SetHook(tc) + + job := testDropColumns(c, ctx, d, s.dbInfo, tblInfo, colNames, false) + testCheckJobDone(c, d, job, false) + mu.Lock() + hErr := hookErr + ok := checkOK + mu.Unlock() + c.Assert(hErr, IsNil) + c.Assert(ok, IsTrue) + + err = ctx.NewTxn(context.Background()) + c.Assert(err, IsNil) + + job = testDropTable(c, ctx, d, s.dbInfo, tblInfo) + testCheckJobDone(c, d, job, false) + + txn, err = ctx.Txn(true) + c.Assert(err, IsNil) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) + + d.Stop() +} + func (s *testColumnSuite) TestModifyColumn(c *C) { d := newDDL( context.Background(), diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 287d265e0b7e6..be28bdd3ee8bd 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2018,6 +2018,8 @@ func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.A switch validSpecs[0].Tp { case ast.AlterTableAddColumns: err = d.AddColumns(ctx, ident, validSpecs) + case ast.AlterTableDropColumn: + err = d.DropColumns(ctx, ident, validSpecs) default: return errRunMultiSchemaChanges } @@ -2228,7 +2230,7 @@ func checkUnsupportedColumnConstraint(col *ast.ColumnDef, ti ast.Ident) error { return nil } -func checkNewColumn(ctx sessionctx.Context, ti ast.Ident, schema *model.DBInfo, spec *ast.AlterTableSpec, t table.Table, specNewColumn *ast.ColumnDef) (*table.Column, error) { +func checkAndCreateNewColumn(ctx sessionctx.Context, ti ast.Ident, schema *model.DBInfo, spec *ast.AlterTableSpec, t table.Table, specNewColumn *ast.ColumnDef) (*table.Column, error) { err := checkUnsupportedColumnConstraint(specNewColumn, ti) if err != nil { return nil, errors.Trace(err) @@ -2312,7 +2314,7 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab return err } - col, err = checkNewColumn(ctx, ti, schema, spec, t, specNewColumn) + col, err = checkAndCreateNewColumn(ctx, ti, schema, spec, t, specNewColumn) if err != nil { return errors.Trace(err) } @@ -2348,12 +2350,12 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte // Check all the columns at once. newColumnsCount := 0 - set := make(map[string]bool) + addingColumnNames := make(map[string]bool) for _, spec := range specs { for _, specNewColumn := range spec.NewColumns { newColumnsCount++ - if !set[specNewColumn.Name.Name.O] { - set[specNewColumn.Name.Name.O] = true + if !addingColumnNames[specNewColumn.Name.Name.O] { + addingColumnNames[specNewColumn.Name.Name.O] = true } else { return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O)) } @@ -2373,12 +2375,12 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) if spec.IfNotExists { ctx.GetSessionVars().StmtCtx.AppendNote(err) - return nil + continue } return err } - col, err = checkNewColumn(ctx, ti, schema, spec, t, specNewColumn) + col, err = checkAndCreateNewColumn(ctx, ti, schema, spec, t, specNewColumn) if err != nil { return errors.Trace(err) } @@ -2633,6 +2635,55 @@ func (d *ddl) DropColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTa return errors.Trace(err) } +// DropColumns will drop multi-columns from the table, now we don't support drop the column with index covered. +func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.AlterTableSpec) error { + schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) + if err != nil { + return errors.Trace(err) + } + tblInfo := t.Meta() + + var colNames []model.CIStr + for _, spec := range specs { + // Check whether dropped column has existed. + colName := spec.OldColumnName.Name + col := table.FindCol(t.VisibleCols(), colName.L) + if col == nil { + err = ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) + if spec.IfExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil + } + return err + } + + if err = isDroppableColumn(tblInfo, colName); err != nil { + return errors.Trace(err) + } + // We don't support dropping column with PK handle covered now. + if col.IsPKHandleColumn(tblInfo) { + return errUnsupportedPKHandle + } + colNames = append(colNames, colName) + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: t.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionDropColumns, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{colNames}, + } + + err = d.doDDLJob(ctx, job) + if err != nil { + return nil + } + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + // checkModifyCharsetAndCollation returns error when the charset or collation is not modifiable. // needRewriteCollationData is used when trying to modify the collation of a column, it is true when the column is with // index because index of a string column is collation-aware. diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 68512aed0b9fd..55cefcaa7344a 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -593,6 +593,8 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, ver, err = onAddColumns(d, t, job) case model.ActionDropColumn: ver, err = onDropColumn(t, job) + case model.ActionDropColumns: + ver, err = onDropColumns(t, job) case model.ActionModifyColumn: ver, err = w.onModifyColumn(t, job) case model.ActionSetDefaultValue: diff --git a/executor/ddl_test.go b/executor/ddl_test.go index 8d15fbfe988ad..f985ec2ac5d00 100644 --- a/executor/ddl_test.go +++ b/executor/ddl_test.go @@ -417,8 +417,6 @@ func (s *testSuite6) TestAlterTableAddColumns(c *C) { tk.MustExec("insert into alter_test values(1)") tk.MustExec("alter table alter_test add column c2 timestamp default current_timestamp, add column c8 varchar(50) default 'CURRENT_TIMESTAMP'") tk.MustExec("alter table alter_test add column (c7 timestamp default current_timestamp, c3 varchar(50) default 'CURRENT_TIMESTAMP')") - time.Sleep(1 * time.Millisecond) - now := time.Now().Add(-time.Duration(1 * time.Millisecond)).Format(types.TimeFormat) r, err := tk.Exec("select c2 from alter_test") c.Assert(err, IsNil) req := r.NewChunk() @@ -426,7 +424,6 @@ func (s *testSuite6) TestAlterTableAddColumns(c *C) { c.Assert(err, IsNil) row := req.GetRow(0) c.Assert(row.Len(), Equals, 1) - c.Assert(now, GreaterEqual, row.GetTime(0).String()) r.Close() tk.MustQuery("select c3 from alter_test").Check(testkit.Rows("CURRENT_TIMESTAMP")) tk.MustExec("create or replace view alter_view as select c1,c2 from alter_test") diff --git a/go.mod b/go.mod index 13f4ce71c9af9..ff9efee4373e9 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5 github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd - github.com/pingcap/parser v0.0.0-20200321072941-b03eb7174165 + github.com/pingcap/parser v0.0.0-20200325032611-b7d1e7e1b93d github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 github.com/pingcap/sysutil v0.0.0-20200309085538-962fd285f3bb github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible diff --git a/go.sum b/go.sum index 80055ff096674..bd9864f8a78e4 100644 --- a/go.sum +++ b/go.sum @@ -267,8 +267,8 @@ github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5/go.mod h1:IOdRDPLy github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20200321072941-b03eb7174165 h1:8U66zIPckyZ7aTVXrFKlMczXBMvDjzmJRp+VBxICxsE= -github.com/pingcap/parser v0.0.0-20200321072941-b03eb7174165/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/parser v0.0.0-20200325032611-b7d1e7e1b93d h1:rTWwJg7aC5ct8QVTcFZoRLiJ2dtjFOUFRlIdK5481bk= +github.com/pingcap/parser v0.0.0-20200325032611-b7d1e7e1b93d/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 h1:Yrp99FnjHAEuDrSBql2l0IqCtJX7KwJbTsD5hIArkvk= github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3/go.mod h1:25GfNw6+Jcr9kca5rtmTb4gKCJ4jOpow2zV2S9Dgafs= github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= From c3b1469e9e0788a0b4a576d32045dbc8b2765efb Mon Sep 17 00:00:00 2001 From: gauss Date: Wed, 25 Mar 2020 23:09:44 +0800 Subject: [PATCH 13/36] fix offset bug and drop columns bug --- ddl/column.go | 2 +- ddl/db_integration_test.go | 7 +++++++ ddl/ddl_api.go | 7 ++++++- ddl/ddl_test.go | 15 +++++++++++++++ 4 files changed, 29 insertions(+), 2 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index 3c07869cc60e6..2c37391b1653e 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -341,7 +341,7 @@ func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error // alter table t add column a1 int after a, add column b1 int after b, add column c1 int after c; if positions[i].Tp == ast.ColumnPositionAfter { for j := 0; j < i; j++ { - if positions[j].Tp == ast.ColumnPositionAfter && offsets[j] < offsets[i] { + if (positions[j].Tp == ast.ColumnPositionAfter && offsets[j] < offsets[i]) || positions[j].Tp == ast.ColumnPositionFirst { offsets[i]++ } } diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index a29bc726cd274..e5e53c632fe4a 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -479,6 +479,13 @@ func (s *testIntegrationSuite5) TestErrnoErrorCode(c *C) { tk.MustExec("create table test_drop_column (c1 int );") sql = "alter table test_drop_column drop column c1;" tk.MustGetErrCode(sql, errno.ErrCantRemoveAllFields) + // drop columns + sql = "alter table test_error_code_succ drop c_not_exist, drop cc_not_exist" + tk.MustGetErrCode(sql, errno.ErrCantDropFieldOrKey) + tk.MustExec("create table test_drop_columns (c1 int);") + tk.MustExec("alter table test_drop_columns add column c2 int first, add column c3 int after c1") + sql = "alter table test_drop_columns drop column c1, drop column c2, drop column c3;" + tk.MustGetErrCode(sql, errno.ErrCantRemoveAllFields) // add index sql = "alter table test_error_code_succ add index idx (c_not_exist)" tk.MustGetErrCode(sql, errno.ErrKeyColumnDoesNotExits) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index be28bdd3ee8bd..b51588723e8ae 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2643,6 +2643,11 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt } tblInfo := t.Meta() + if len(tblInfo.Columns) == len(specs) { + return ErrCantRemoveAllFields.GenWithStack("can't drop all columns in table %s", + tblInfo.Name) + } + var colNames []model.CIStr for _, spec := range specs { // Check whether dropped column has existed. @@ -2652,7 +2657,7 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt err = ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) if spec.IfExists { ctx.GetSessionVars().StmtCtx.AppendNote(err) - return nil + continue } return err } diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index 64e67bcfb8065..1e519aa505e6e 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -239,6 +239,21 @@ func testAddColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, t return job } +func testAddColumns(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, args []interface{}) *model.Job { + job := &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionAddColumns, + Args: args, + BinlogInfo: &model.HistoryInfo{}, + } + err := d.doDDLJob(ctx, job) + c.Assert(err, IsNil) + v := getSchemaVer(c, ctx) + checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + return job +} + func buildDropIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job { tp := model.ActionDropIndex if indexName == "primary" { From d5cfbbae7db952249fa11540e2b69cc0843e0d3d Mon Sep 17 00:00:00 2001 From: gauss Date: Wed, 25 Mar 2020 23:46:49 +0800 Subject: [PATCH 14/36] add rollingback test --- ddl/ddl_worker_test.go | 96 ++++++++++++++++++++++++++++++++---------- ddl/rollingback.go | 49 +++++++++++++++++++++ 2 files changed, 122 insertions(+), 23 deletions(-) diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index f16715877dda4..cf3897398955f 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/admin" "github.com/pingcap/tidb/util/mock" @@ -458,6 +459,11 @@ func buildCancelJobTests(firstID int64) []testCancelJob { {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 35}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 35)}, cancelState: model.StatePublic}, {act: model.ActionDropPrimaryKey, jobIDs: []int64{firstID + 36}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, {act: model.ActionDropPrimaryKey, jobIDs: []int64{firstID + 37}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 37)}, cancelState: model.StateDeleteOnly}, + + {act: model.ActionAddColumns, jobIDs: []int64{firstID + 38}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, + {act: model.ActionAddColumns, jobIDs: []int64{firstID + 39}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, + {act: model.ActionAddColumns, jobIDs: []int64{firstID + 40}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, + {act: model.ActionAddColumns, jobIDs: []int64{firstID + 41}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 41)}, cancelState: model.StatePublic}, } return tests @@ -483,28 +489,32 @@ func checkIdxExist(c *C, d *ddl, schemaID int64, tableID int64, idxName string, c.Assert(found, Equals, expectedExist) } -func (s *testDDLSuite) checkAddColumn(c *C, d *ddl, schemaID int64, tableID int64, colName string, success bool) { +func (s *testDDLSuite) checkAddColumns(c *C, d *ddl, schemaID int64, tableID int64, colNames []string, success bool) { changedTable := testGetTable(c, d, schemaID, tableID) var found bool - for _, colInfo := range changedTable.Meta().Columns { - if colInfo.Name.O == colName { - found = true - break + for _, colName := range colNames { + for _, colInfo := range changedTable.Meta().Columns { + if colInfo.Name.O == colName { + found = true + break + } } + c.Assert(found, Equals, success) } - c.Assert(found, Equals, success) } -func (s *testDDLSuite) checkCancelDropColumn(c *C, d *ddl, schemaID int64, tableID int64, colName string, success bool) { +func (s *testDDLSuite) checkCancelDropColumns(c *C, d *ddl, schemaID int64, tableID int64, colNames []string, success bool) { changedTable := testGetTable(c, d, schemaID, tableID) - notFound := true - for _, colInfo := range changedTable.Meta().Columns { - if colInfo.Name.O == colName { - notFound = false - break + for _, colName := range colNames { + notFound := true + for _, colInfo := range changedTable.Meta().Columns { + if colInfo.Name.O == colName { + notFound = false + break + } } + c.Assert(notFound, Equals, success) } - c.Assert(notFound, Equals, success) } func (s *testDDLSuite) TestCancelJob(c *C) { @@ -643,22 +653,22 @@ func (s *testDDLSuite) TestCancelJob(c *C) { addColumnArgs := []interface{}{col, &ast.ColumnPosition{Tp: ast.ColumnPositionNone}, 0} doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) c.Check(errors.ErrorStack(checkErr), Equals, "") - s.checkAddColumn(c, d, dbInfo.ID, tblInfo.ID, addingColName, false) + s.checkAddColumns(c, d, dbInfo.ID, tblInfo.ID, []string{addingColName}, false) updateTest(&tests[5]) doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) c.Check(errors.ErrorStack(checkErr), Equals, "") - s.checkAddColumn(c, d, dbInfo.ID, tblInfo.ID, addingColName, false) + s.checkAddColumns(c, d, dbInfo.ID, tblInfo.ID, []string{addingColName}, false) updateTest(&tests[6]) doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) c.Check(errors.ErrorStack(checkErr), Equals, "") - s.checkAddColumn(c, d, dbInfo.ID, tblInfo.ID, addingColName, false) + s.checkAddColumns(c, d, dbInfo.ID, tblInfo.ID, []string{addingColName}, false) updateTest(&tests[7]) testAddColumn(c, ctx, d, dbInfo, tblInfo, addColumnArgs) c.Check(errors.ErrorStack(checkErr), Equals, "") - s.checkAddColumn(c, d, dbInfo.ID, tblInfo.ID, addingColName, true) + s.checkAddColumns(c, d, dbInfo.ID, tblInfo.ID, []string{addingColName}, true) // for create table tblInfo1 := testTableInfo(c, d, "t1", 2) @@ -677,24 +687,24 @@ func (s *testDDLSuite) TestCancelJob(c *C) { // for drop column. updateTest(&tests[10]) dropColName := "c3" - s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, false) + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, []string{dropColName}, false) testDropColumn(c, ctx, d, dbInfo, tblInfo, dropColName, false) c.Check(errors.ErrorStack(checkErr), Equals, "") - s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, true) + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, []string{dropColName}, true) updateTest(&tests[11]) dropColName = "c4" - s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, false) + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, []string{dropColName}, false) testDropColumn(c, ctx, d, dbInfo, tblInfo, dropColName, false) c.Check(errors.ErrorStack(checkErr), Equals, "") - s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, true) + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, []string{dropColName}, true) updateTest(&tests[12]) dropColName = "c5" - s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, false) + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, []string{dropColName}, false) testDropColumn(c, ctx, d, dbInfo, tblInfo, dropColName, false) c.Check(errors.ErrorStack(checkErr), Equals, "") - s.checkCancelDropColumn(c, d, dbInfo.ID, tblInfo.ID, dropColName, true) + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, []string{dropColName}, true) // cancel rebase auto id updateTest(&tests[13]) @@ -858,6 +868,46 @@ func (s *testDDLSuite) TestCancelJob(c *C) { testDropIndex(c, ctx, d, dbInfo, tblInfo, idxOrigName) c.Check(errors.ErrorStack(checkErr), Equals, "") s.checkDropIdx(c, d, dbInfo.ID, tblInfo.ID, idxOrigName, true) + + // for add columns + updateTest(&tests[34]) + addingColNames := []string{"colA", "colB"} + var cols []*table.Column + for _, addingColName := range addingColNames { + newColumnDef := &ast.ColumnDef{ + Name: &ast.ColumnName{Name: model.NewCIStr(addingColName)}, + Tp: &types.FieldType{Tp: mysql.TypeLonglong}, + Options: []*ast.ColumnOption{}, + } + col, _, err := buildColumnAndConstraint(ctx, 0, newColumnDef, nil, mysql.DefaultCharset, "", mysql.DefaultCharset, "") + c.Assert(err, IsNil) + cols = append(cols, col) + } + offsets := make([]int, len(cols)) + positions := make([]*ast.ColumnPosition, len(cols)) + for i := range positions { + positions[i] = &ast.ColumnPosition{Tp: ast.ColumnPositionNone} + } + + addColumnArgs = []interface{}{cols, positions, offsets} + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddColumns, addColumnArgs, &cancelState) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkAddColumns(c, d, dbInfo.ID, tblInfo.ID, addingColNames, false) + + updateTest(&tests[35]) + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddColumns, addColumnArgs, &cancelState) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkAddColumns(c, d, dbInfo.ID, tblInfo.ID, addingColNames, false) + + updateTest(&tests[36]) + doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddColumns, addColumnArgs, &cancelState) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkAddColumns(c, d, dbInfo.ID, tblInfo.ID, addingColNames, false) + + updateTest(&tests[37]) + testAddColumns(c, ctx, d, dbInfo, tblInfo, addColumnArgs) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkAddColumns(c, d, dbInfo.ID, tblInfo.ID, addingColNames, true) } func (s *testDDLSuite) TestIgnorableSpec(c *C) { diff --git a/ddl/rollingback.go b/ddl/rollingback.go index a977b9c043167..2ca087d12c534 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -126,6 +126,33 @@ func rollingbackAddColumn(t *meta.Meta, job *model.Job) (ver int64, err error) { return ver, errCancelledDDLJob } +func rollingbackAddColumns(t *meta.Meta, job *model.Job) (ver int64, err error) { + job.State = model.JobStateRollingback + tblInfo, columnInfos, _, _, err := checkAddAndCreateColumnInfos(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + var colNames []model.CIStr + originalState := columnInfos[0].State + for i, columnInfo := range columnInfos { + if columnInfo == nil { + job.State = model.JobStateCancelled + return ver, errCancelledDDLJob + } + columnInfos[i].State = model.StateDeleteOnly + colNames = append(colNames, columnInfo.Name) + } + + job.SchemaState = model.StateDeleteOnly + job.Args = []interface{}{colNames} + ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) + if err != nil { + return ver, errors.Trace(err) + } + return ver, errCancelledDDLJob +} + func rollingbackDropColumn(t *meta.Meta, job *model.Job) (ver int64, err error) { tblInfo, colInfo, err := checkDropColumn(t, job) if err != nil { @@ -144,6 +171,24 @@ func rollingbackDropColumn(t *meta.Meta, job *model.Job) (ver int64, err error) return ver, nil } +func rollingbackDropColumns(t *meta.Meta, job *model.Job) (ver int64, err error) { + tblInfo, colInfos, _, err := checkDropColumns(t, job) + if err != nil { + return ver, errors.Trace(err) + } + + // StatePublic means when the job is not running yet. + if colInfos[0].State == model.StatePublic { + job.State = model.JobStateCancelled + job.FinishTableJob(model.JobStateRollbackDone, model.StatePublic, ver, tblInfo) + return ver, errCancelledDDLJob + } + // In the state of drop columns `write only -> delete only -> reorganization`, + // We can not rollback now, so just continue to drop columns. + job.State = model.JobStateRunning + return ver, nil +} + func rollingbackDropIndex(t *meta.Meta, job *model.Job) (ver int64, err error) { tblInfo, indexInfo, err := checkDropIndex(t, job) if err != nil { @@ -273,6 +318,8 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) switch job.Type { case model.ActionAddColumn: ver, err = rollingbackAddColumn(t, job) + case model.ActionAddColumns: + ver, err = rollingbackAddColumns(t, job) case model.ActionAddIndex: ver, err = rollingbackAddIndex(w, d, t, job, false) case model.ActionAddPrimaryKey: @@ -281,6 +328,8 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) ver, err = rollingbackAddTablePartition(t, job) case model.ActionDropColumn: ver, err = rollingbackDropColumn(t, job) + case model.ActionDropColumns: + ver, err = rollingbackDropColumns(t, job) case model.ActionDropIndex, model.ActionDropPrimaryKey: ver, err = rollingbackDropIndex(t, job) case model.ActionDropTable, model.ActionDropView, model.ActionDropSequence: From 3b0afad62f531ac21e22d29431f2dde3fee60166 Mon Sep 17 00:00:00 2001 From: gauss Date: Thu, 26 Mar 2020 11:35:06 +0800 Subject: [PATCH 15/36] add more test case --- ddl/db_test.go | 97 +++++++++++++++++++++++++++++++++++++++- ddl/ddl_api.go | 3 -- ddl/ddl_worker.go | 2 +- ddl/ddl_worker_test.go | 24 ++++++++-- util/admin/admin.go | 4 +- util/admin/admin_test.go | 1 + 6 files changed, 120 insertions(+), 11 deletions(-) diff --git a/ddl/db_test.go b/ddl/db_test.go index 6c70e1b400928..01122ad769724 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -1383,6 +1383,99 @@ func (s *testDBSuite3) TestCancelDropColumn(c *C) { s.mustExec(c, "alter table test_drop_column drop column c3") } +// TestCancelDropColumns tests cancel ddl job which type is drop multi-columns. +func (s *testDBSuite3) TestCancelDropColumns(c *C) { + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use " + s.schemaName) + s.mustExec(c, "drop table if exists test_drop_column") + s.mustExec(c, "create table test_drop_column(c1 int, c2 int)") + defer s.mustExec(c, "drop table test_drop_column;") + testCases := []struct { + needAddColumn bool + jobState model.JobState + JobSchemaState model.SchemaState + cancelSucc bool + }{ + {true, model.JobStateNone, model.StateNone, true}, + {false, model.JobStateRunning, model.StateWriteOnly, false}, + {true, model.JobStateRunning, model.StateDeleteOnly, false}, + {true, model.JobStateRunning, model.StateDeleteReorganization, false}, + } + var checkErr error + hook := &ddl.TestDDLCallback{} + var jobID int64 + testCase := &testCases[0] + hook.OnJobRunBeforeExported = func(job *model.Job) { + if job.Type == model.ActionDropColumns && job.State == testCase.jobState && job.SchemaState == testCase.JobSchemaState { + jobIDs := []int64{job.ID} + jobID = job.ID + hookCtx := mock.NewContext() + hookCtx.Store = s.store + err := hookCtx.NewTxn(context.TODO()) + if err != nil { + checkErr = errors.Trace(err) + return + } + txn, err := hookCtx.Txn(true) + if err != nil { + checkErr = errors.Trace(err) + return + } + errs, err := admin.CancelJobs(txn, jobIDs) + if err != nil { + checkErr = errors.Trace(err) + return + } + if errs[0] != nil { + checkErr = errors.Trace(errs[0]) + return + } + checkErr = txn.Commit(context.Background()) + } + } + + originalHook := s.dom.DDL().GetHook() + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + var err1 error + for i := range testCases { + testCase = &testCases[i] + if testCase.needAddColumn { + s.mustExec(c, "alter table test_drop_column add column c3 int, add column c4 int") + } + _, err1 = s.tk.Exec("alter table test_drop_column drop column c3, drop column c4") + var col3 *table.Column + var col4 *table.Column + t := s.testGetTable(c, "test_drop_column") + for _, col := range t.Cols() { + if strings.EqualFold(col.Name.L, "c3") { + col3 = col + continue + } + if strings.EqualFold(col.Name.L, "c4") { + col4 = col + continue + } + } + if testCase.cancelSucc { + c.Assert(checkErr, IsNil) + c.Assert(col3, NotNil) + c.Assert(col4, NotNil) + c.Assert(col3.Name.L, Equals, "c3") + c.Assert(col4.Name.L, Equals, "c4") + c.Assert(err1.Error(), Equals, "[ddl:8214]Cancelled DDL job") + } else { + c.Assert(col3, IsNil) + c.Assert(col4, IsNil) + c.Assert(err1, IsNil) + c.Assert(checkErr, NotNil) + c.Assert(checkErr.Error(), Equals, admin.ErrCannotCancelDDLJob.GenWithStackByArgs(jobID).Error()) + } + } + s.dom.DDL().(ddl.DDLForTest).SetHook(originalHook) + s.mustExec(c, "alter table test_drop_column add column c3 int, add column c4 int") + s.mustExec(c, "alter table test_drop_column drop column c3, drop column c4") +} + func checkDelRangeDone(c *C, ctx sessionctx.Context, idx table.Index) { startTime := time.Now() f := func() map[int64]struct{} { @@ -1969,7 +2062,9 @@ func (s *testDBSuite4) TestCreateTableWithLike2(c *C) { hook := &ddl.TestDDLCallback{} var onceChecker sync.Map hook.OnJobRunBeforeExported = func(job *model.Job) { - if job.Type != model.ActionAddColumn && job.Type != model.ActionDropColumn && job.Type != model.ActionAddIndex && job.Type != model.ActionDropIndex { + if job.Type != model.ActionAddColumn && job.Type != model.ActionDropColumn && + job.Type != model.ActionAddColumns && job.Type != model.ActionDropColumns && + job.Type != model.ActionAddIndex && job.Type != model.ActionDropIndex { return } if job.TableID != tbl1.Meta().ID { diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index b51588723e8ae..e086c5c7d9100 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2682,9 +2682,6 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt } err = d.doDDLJob(ctx, job) - if err != nil { - return nil - } err = d.callHookOnChanged(err) return errors.Trace(err) } diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 55cefcaa7344a..5eae07dc7812b 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -516,7 +516,7 @@ func writeBinlog(binlogCli *pumpcli.PumpsClient, txn kv.Transaction, job *model. // When this column is in the "delete only" and "delete reorg" states, the binlog of "drop column" has not been written yet, // but the column has been removed from the binlog of the write operation. // So we add this binlog to enable downstream components to handle DML correctly in this schema state. - (job.Type == model.ActionDropColumn && job.SchemaState == model.StateDeleteOnly) { + ((job.Type == model.ActionDropColumn || job.Type == model.ActionDropColumns) && job.SchemaState == model.StateDeleteOnly) { binloginfo.SetDDLBinlog(binlogCli, txn, job.ID, int32(job.SchemaState), job.Query) } } diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index cf3897398955f..b65e1af6ca45f 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -314,6 +314,12 @@ func (s *testDDLSuite) TestColumnError(c *C) { doDDLJobErr(c, dbInfo.ID, -1, model.ActionAddColumns, []interface{}{cols, positions, 0}, ctx, d) doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionAddColumns, []interface{}{0}, ctx, d) doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionAddColumns, []interface{}{cols, positions, 0}, ctx, d) + + // for dropping columns + doDDLJobErr(c, -1, tblInfo.ID, model.ActionDropColumns, []interface{}{col, pos, 0}, ctx, d) + doDDLJobErr(c, dbInfo.ID, -1, model.ActionDropColumns, []interface{}{col, pos, 0}, ctx, d) + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionDropColumns, []interface{}{0}, ctx, d) + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionDropColumns, []interface{}{[]model.CIStr{model.NewCIStr("c5"), model.NewCIStr("c6")}}, ctx, d) } func testCheckOwner(c *C, d *ddl, expectedVal bool) { @@ -464,6 +470,8 @@ func buildCancelJobTests(firstID int64) []testCancelJob { {act: model.ActionAddColumns, jobIDs: []int64{firstID + 39}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, {act: model.ActionAddColumns, jobIDs: []int64{firstID + 40}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, {act: model.ActionAddColumns, jobIDs: []int64{firstID + 41}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 41)}, cancelState: model.StatePublic}, + + {act: model.ActionDropColumns, jobIDs: []int64{firstID + 42}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 42)}, cancelState: model.StateDeleteOnly}, } return tests @@ -532,8 +540,8 @@ func (s *testDDLSuite) TestCancelJob(c *C) { partitionTblInfo := testTableInfoWithPartition(c, d, "t_partition", 5) // Skip using sessPool. Make sure adding primary key can be successful. partitionTblInfo.Columns[0].Flag |= mysql.NotNullFlag - // create table t (c1 int, c2 int, c3 int, c4 int, c5 int); - tblInfo := testTableInfo(c, d, "t", 5) + // create table t (c1 int, c2 int, c3 int, c4 int, c5 int, c6 int, c7 int, c8 int); + tblInfo := testTableInfo(c, d, "t", 8) ctx := testNewContext(d) err := ctx.NewTxn(context.Background()) c.Assert(err, IsNil) @@ -543,9 +551,9 @@ func (s *testDDLSuite) TestCancelJob(c *C) { tblInfo.AutoIncID = tableAutoID tblInfo.ShardRowIDBits = shardRowIDBits job := testCreateTable(c, ctx, d, dbInfo, tblInfo) - // insert t values (1, 2, 3, 4, 5); + // insert t values (1, 2, 3, 4, 5, 6, 7, 8); originTable := testGetTable(c, d, dbInfo.ID, tblInfo.ID) - row := types.MakeDatums(1, 2, 3, 4, 5) + row := types.MakeDatums(1, 2, 3, 4, 5, 6, 7, 8) _, err = originTable.AddRecord(ctx, row) c.Assert(err, IsNil) txn, err := ctx.Txn(true) @@ -908,6 +916,14 @@ func (s *testDDLSuite) TestCancelJob(c *C) { testAddColumns(c, ctx, d, dbInfo, tblInfo, addColumnArgs) c.Check(errors.ErrorStack(checkErr), Equals, "") s.checkAddColumns(c, d, dbInfo.ID, tblInfo.ID, addingColNames, true) + + // for drop columns + updateTest(&tests[38]) + dropColNames := []string{"c3", "c4"} + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, dropColNames, false) + testDropColumns(c, ctx, d, dbInfo, tblInfo, dropColNames, false) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, dropColNames, true) } func (s *testDDLSuite) TestIgnorableSpec(c *C) { diff --git a/util/admin/admin.go b/util/admin/admin.go index 2bb1e7dd44261..025298f143829 100644 --- a/util/admin/admin.go +++ b/util/admin/admin.go @@ -34,7 +34,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/logutil" - "github.com/pingcap/tidb/util/rowDecoder" + decoder "github.com/pingcap/tidb/util/rowDecoder" "github.com/pingcap/tidb/util/sqlexec" "go.uber.org/zap" ) @@ -104,7 +104,7 @@ func IsJobRollbackable(job *model.Job) bool { job.SchemaState == model.StateDeleteOnly { return false } - case model.ActionDropColumn, model.ActionModifyColumn, + case model.ActionDropColumn, model.ActionDropColumns, model.ActionModifyColumn, model.ActionDropTablePartition, model.ActionAddTablePartition, model.ActionRebaseAutoID, model.ActionShardRowID, model.ActionTruncateTable, model.ActionAddForeignKey, diff --git a/util/admin/admin_test.go b/util/admin/admin_test.go index d8d18bbcb757b..2a78e0a013715 100644 --- a/util/admin/admin_test.go +++ b/util/admin/admin_test.go @@ -336,6 +336,7 @@ func (s *testSuite) TestIsJobRollbackable(c *C) { {model.ActionDropIndex, model.StateDeleteOnly, false}, {model.ActionDropSchema, model.StateDeleteOnly, false}, {model.ActionDropColumn, model.StateDeleteOnly, false}, + {model.ActionDropColumns, model.StateDeleteOnly, false}, } job := &model.Job{} for _, ca := range cases { From d6cb32887dac223b760df3102aedfb8a45330c6f Mon Sep 17 00:00:00 2001 From: gauss Date: Thu, 26 Mar 2020 13:32:06 +0800 Subject: [PATCH 16/36] polish test code --- ddl/db_integration_test.go | 2 ++ ddl/ddl_worker_test.go | 40 ++++++++++++++++++++++++++++---------- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index e5e53c632fe4a..c006227f9d341 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -486,6 +486,8 @@ func (s *testIntegrationSuite5) TestErrnoErrorCode(c *C) { tk.MustExec("alter table test_drop_columns add column c2 int first, add column c3 int after c1") sql = "alter table test_drop_columns drop column c1, drop column c2, drop column c3;" tk.MustGetErrCode(sql, errno.ErrCantRemoveAllFields) + sql = "alter table test_drop_columns drop column c1, add column c2 int;" + tk.MustGetErrCode(sql, errno.ErrUnsupportedDDLOperation) // add index sql = "alter table test_error_code_succ add index idx (c_not_exist)" tk.MustGetErrCode(sql, errno.ErrKeyColumnDoesNotExits) diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index b65e1af6ca45f..a05171ff01e23 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -472,6 +472,8 @@ func buildCancelJobTests(firstID int64) []testCancelJob { {act: model.ActionAddColumns, jobIDs: []int64{firstID + 41}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 41)}, cancelState: model.StatePublic}, {act: model.ActionDropColumns, jobIDs: []int64{firstID + 42}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 42)}, cancelState: model.StateDeleteOnly}, + {act: model.ActionDropColumns, jobIDs: []int64{firstID + 43}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 43)}, cancelState: model.StateWriteOnly}, + {act: model.ActionDropColumns, jobIDs: []int64{firstID + 44}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 44)}, cancelState: model.StateWriteReorganization}, } return tests @@ -513,16 +515,20 @@ func (s *testDDLSuite) checkAddColumns(c *C, d *ddl, schemaID int64, tableID int func (s *testDDLSuite) checkCancelDropColumns(c *C, d *ddl, schemaID int64, tableID int64, colNames []string, success bool) { changedTable := testGetTable(c, d, schemaID, tableID) + notFound := checkColumnsNotFound(changedTable, colNames) + c.Assert(notFound, Equals, success) +} + +func checkColumnsNotFound(t table.Table, colNames []string) bool { + notFound := true for _, colName := range colNames { - notFound := true - for _, colInfo := range changedTable.Meta().Columns { + for _, colInfo := range t.Meta().Columns { if colInfo.Name.O == colName { notFound = false - break } } - c.Assert(notFound, Equals, success) } + return notFound } func (s *testDDLSuite) TestCancelJob(c *C) { @@ -540,8 +546,8 @@ func (s *testDDLSuite) TestCancelJob(c *C) { partitionTblInfo := testTableInfoWithPartition(c, d, "t_partition", 5) // Skip using sessPool. Make sure adding primary key can be successful. partitionTblInfo.Columns[0].Flag |= mysql.NotNullFlag - // create table t (c1 int, c2 int, c3 int, c4 int, c5 int, c6 int, c7 int, c8 int); - tblInfo := testTableInfo(c, d, "t", 8) + // create table t (c1 int, c2 int, c3 int, c4 int, c5 int); + tblInfo := testTableInfo(c, d, "t", 5) ctx := testNewContext(d) err := ctx.NewTxn(context.Background()) c.Assert(err, IsNil) @@ -551,9 +557,9 @@ func (s *testDDLSuite) TestCancelJob(c *C) { tblInfo.AutoIncID = tableAutoID tblInfo.ShardRowIDBits = shardRowIDBits job := testCreateTable(c, ctx, d, dbInfo, tblInfo) - // insert t values (1, 2, 3, 4, 5, 6, 7, 8); + // insert t values (1, 2, 3, 4, 5); originTable := testGetTable(c, d, dbInfo.ID, tblInfo.ID) - row := types.MakeDatums(1, 2, 3, 4, 5, 6, 7, 8) + row := types.MakeDatums(1, 2, 3, 4, 5) _, err = originTable.AddRecord(ctx, row) c.Assert(err, IsNil) txn, err := ctx.Txn(true) @@ -879,7 +885,7 @@ func (s *testDDLSuite) TestCancelJob(c *C) { // for add columns updateTest(&tests[34]) - addingColNames := []string{"colA", "colB"} + addingColNames := []string{"colA", "colB", "colC", "colD", "colE", "colF"} var cols []*table.Column for _, addingColName := range addingColNames { newColumnDef := &ast.ColumnDef{ @@ -919,7 +925,21 @@ func (s *testDDLSuite) TestCancelJob(c *C) { // for drop columns updateTest(&tests[38]) - dropColNames := []string{"c3", "c4"} + dropColNames := []string{"colA", "colB"} + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, dropColNames, false) + testDropColumns(c, ctx, d, dbInfo, tblInfo, dropColNames, false) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, dropColNames, true) + + updateTest(&tests[39]) + dropColNames = []string{"colC", "colD"} + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, dropColNames, false) + testDropColumns(c, ctx, d, dbInfo, tblInfo, dropColNames, false) + c.Check(errors.ErrorStack(checkErr), Equals, "") + s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, dropColNames, true) + + updateTest(&tests[40]) + dropColNames = []string{"colE", "colF"} s.checkCancelDropColumns(c, d, dbInfo.ID, tblInfo.ID, dropColNames, false) testDropColumns(c, ctx, d, dbInfo, tblInfo, dropColNames, false) c.Check(errors.ErrorStack(checkErr), Equals, "") From 1e594591fe71d6100f7050559d595f1328c1976b Mon Sep 17 00:00:00 2001 From: gauss Date: Thu, 26 Mar 2020 14:19:42 +0800 Subject: [PATCH 17/36] refactor insertColStats2KV --- statistics/handle/ddl.go | 42 +++++++++++++++++------------------ statistics/handle/ddl_test.go | 11 +++++++++ 2 files changed, 32 insertions(+), 21 deletions(-) diff --git a/statistics/handle/ddl.go b/statistics/handle/ddl.go index 86459a25a13de..acc6f80685a34 100644 --- a/statistics/handle/ddl.go +++ b/statistics/handle/ddl.go @@ -39,17 +39,15 @@ func (h *Handle) HandleDDLEvent(t *util.Event) error { case model.ActionAddColumn: ids := getPhysicalIDs(t.TableInfo) for _, id := range ids { - if err := h.insertColStats2KV(id, t.ColumnInfos[0]); err != nil { + if err := h.insertColStats2KV(id, []*model.ColumnInfo{t.ColumnInfos[0]}); err != nil { return err } } case model.ActionAddColumns: ids := getPhysicalIDs(t.TableInfo) for _, id := range ids { - for _, columnInfo := range t.ColumnInfos { - if err := h.insertColStats2KV(id, columnInfo); err != nil { - return err - } + if err := h.insertColStats2KV(id, t.ColumnInfos); err != nil { + return err } } case model.ActionAddTablePartition, model.ActionTruncateTablePartition: @@ -110,7 +108,7 @@ func (h *Handle) insertTableStats2KV(info *model.TableInfo, physicalID int64) (e // insertColStats2KV insert a record to stats_histograms with distinct_count 1 and insert a bucket to stats_buckets with default value. // This operation also updates version. -func (h *Handle) insertColStats2KV(physicalID int64, colInfo *model.ColumnInfo) (err error) { +func (h *Handle) insertColStats2KV(physicalID int64, colInfos []*model.ColumnInfo) (err error) { h.mu.Lock() defer h.mu.Unlock() @@ -150,24 +148,26 @@ func (h *Handle) insertColStats2KV(physicalID int64, colInfo *model.ColumnInfo) return } count := req.GetRow(0).GetInt64(0) - value := types.NewDatum(colInfo.OriginDefaultValue) - value, err = value.ConvertTo(h.mu.ctx.GetSessionVars().StmtCtx, &colInfo.FieldType) - if err != nil { - return - } - sqls := make([]string, 0, 1) - if value.IsNull() { - // If the adding column has default value null, all the existing rows have null value on the newly added column. - sqls = append(sqls, fmt.Sprintf("insert into mysql.stats_histograms (version, table_id, is_index, hist_id, distinct_count, null_count) values (%d, %d, 0, %d, 0, %d)", startTS, physicalID, colInfo.ID, count)) - } else { - // If this stats exists, we insert histogram meta first, the distinct_count will always be one. - sqls = append(sqls, fmt.Sprintf("insert into mysql.stats_histograms (version, table_id, is_index, hist_id, distinct_count, tot_col_size) values (%d, %d, 0, %d, 1, %d)", startTS, physicalID, colInfo.ID, int64(len(value.GetBytes()))*count)) - value, err = value.ConvertTo(h.mu.ctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeBlob)) + sqls := make([]string, 0, len(colInfos)) + for _, colInfo := range colInfos { + value := types.NewDatum(colInfo.OriginDefaultValue) + value, err = value.ConvertTo(h.mu.ctx.GetSessionVars().StmtCtx, &colInfo.FieldType) if err != nil { return } - // There must be only one bucket for this new column and the value is the default value. - sqls = append(sqls, fmt.Sprintf("insert into mysql.stats_buckets (table_id, is_index, hist_id, bucket_id, repeats, count, lower_bound, upper_bound) values (%d, 0, %d, 0, %d, %d, X'%X', X'%X')", physicalID, colInfo.ID, count, count, value.GetBytes(), value.GetBytes())) + if value.IsNull() { + // If the adding column has default value null, all the existing rows have null value on the newly added column. + sqls = append(sqls, fmt.Sprintf("insert into mysql.stats_histograms (version, table_id, is_index, hist_id, distinct_count, null_count) values (%d, %d, 0, %d, 0, %d)", startTS, physicalID, colInfo.ID, count)) + } else { + // If this stats exists, we insert histogram meta first, the distinct_count will always be one. + sqls = append(sqls, fmt.Sprintf("insert into mysql.stats_histograms (version, table_id, is_index, hist_id, distinct_count, tot_col_size) values (%d, %d, 0, %d, 1, %d)", startTS, physicalID, colInfo.ID, int64(len(value.GetBytes()))*count)) + value, err = value.ConvertTo(h.mu.ctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeBlob)) + if err != nil { + return + } + // There must be only one bucket for this new column and the value is the default value. + sqls = append(sqls, fmt.Sprintf("insert into mysql.stats_buckets (table_id, is_index, hist_id, bucket_id, repeats, count, lower_bound, upper_bound) values (%d, 0, %d, 0, %d, %d, X'%X', X'%X')", physicalID, colInfo.ID, count, count, value.GetBytes(), value.GetBytes())) + } } return execSQLs(context.Background(), exec, sqls) } diff --git a/statistics/handle/ddl_test.go b/statistics/handle/ddl_test.go index bc94b1d8bdf38..c385e850e3df7 100644 --- a/statistics/handle/ddl_test.go +++ b/statistics/handle/ddl_test.go @@ -162,6 +162,17 @@ func (s *testStatsSuite) TestDDLHistogram(c *C) { c.Assert(statsTbl.Pseudo, IsFalse) c.Check(statsTbl.Columns[tableInfo.Columns[5].ID].AvgColSize(statsTbl.Count, false), Equals, 3.0) + testKit.MustExec("alter table t add column c6 varchar(15) DEFAULT '123', add column c7 varchar(15) DEFAULT '123'") + err = h.HandleDDLEvent(<-h.DDLEventCh()) + c.Assert(err, IsNil) + is = do.InfoSchema() + c.Assert(h.Update(is), IsNil) + tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + tableInfo = tbl.Meta() + statsTbl = do.StatsHandle().GetTableStats(tableInfo) + c.Assert(statsTbl.Pseudo, IsFalse) + testKit.MustExec("create index i on t(c2, c1)") testKit.MustExec("analyze table t") rs := testKit.MustQuery("select count(*) from mysql.stats_histograms where table_id = ? and hist_id = 1 and is_index =1", tableInfo.ID) From 5ee862b9ab52abceb8a50d6e17496f1c4fabd4bb Mon Sep 17 00:00:00 2001 From: gauss Date: Thu, 26 Mar 2020 18:23:57 +0800 Subject: [PATCH 18/36] remove useless if --- ddl/util/event.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ddl/util/event.go b/ddl/util/event.go index 2966931c1eef2..4003ab010db98 100644 --- a/ddl/util/event.go +++ b/ddl/util/event.go @@ -42,9 +42,7 @@ func (e *Event) String() string { ret += fmt.Sprintf(", Partition IDs: %v", ids) } for _, columnInfo := range e.ColumnInfos { - if columnInfo != nil { - ret += fmt.Sprintf(", Column ID: %d, Column Name %s", columnInfo.ID, columnInfo.Name) - } + ret += fmt.Sprintf(", Column ID: %d, Column Name %s", columnInfo.ID, columnInfo.Name) } if e.IndexInfo != nil { ret += fmt.Sprintf(", Index ID: %d, Index Name %s", e.IndexInfo.ID, e.IndexInfo.Name) From c963f42e60cab1cf52bfe1c603347acde6ee3894 Mon Sep 17 00:00:00 2001 From: gauss Date: Thu, 26 Mar 2020 18:27:48 +0800 Subject: [PATCH 19/36] fix HandleDDLEvent bug --- statistics/handle/ddl.go | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/statistics/handle/ddl.go b/statistics/handle/ddl.go index acc6f80685a34..744291596015c 100644 --- a/statistics/handle/ddl.go +++ b/statistics/handle/ddl.go @@ -36,14 +36,7 @@ func (h *Handle) HandleDDLEvent(t *util.Event) error { return err } } - case model.ActionAddColumn: - ids := getPhysicalIDs(t.TableInfo) - for _, id := range ids { - if err := h.insertColStats2KV(id, []*model.ColumnInfo{t.ColumnInfos[0]}); err != nil { - return err - } - } - case model.ActionAddColumns: + case model.ActionAddColumn, model.ActionAddColumns: ids := getPhysicalIDs(t.TableInfo) for _, id := range ids { if err := h.insertColStats2KV(id, t.ColumnInfos); err != nil { From 86b8deaa25b3d3330e8a96dd779c3b7150f89d21 Mon Sep 17 00:00:00 2001 From: gauss Date: Thu, 26 Mar 2020 18:57:12 +0800 Subject: [PATCH 20/36] add comments and clean code --- ddl/column.go | 63 ++++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index 2c37391b1653e..29f2a8ae40d1e 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -240,6 +240,7 @@ func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInf positions := []*ast.ColumnPosition{} offsets := []int{} err = job.DecodeArgs(&columns, &positions, &offsets) + // Should initialize positions[i] when it is nil, otherwise the createColumnInfo function will panic. for i := range positions { if positions[i] == nil { positions[i] = &ast.ColumnPosition{} @@ -335,10 +336,11 @@ func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error for i := range offsets { tmpCols := oldCols[:len(oldCols)-len(offsets)+i+1] tblInfo.Columns = tmpCols - // For multiple after positions. + // For multiple columns with after position, should adjust offsets. // e.g. create table t(a int); // alter table t add column b int after a, add column c int after a; // alter table t add column a1 int after a, add column b1 int after b, add column c1 int after c; + // alter table t add column a1 int after a, add column b1 int first; if positions[i].Tp == ast.ColumnPositionAfter { for j := 0; j < i; j++ { if (positions[j].Tp == ast.ColumnPositionAfter && offsets[j] < offsets[i]) || positions[j].Tp == ast.ColumnPositionFirst { @@ -377,22 +379,9 @@ func onDropColumns(t *meta.Meta, job *model.Job) (ver int64, _ error) { job.SchemaState = model.StateWriteOnly setColumnsState(colInfos, model.StateWriteOnly) for _, colInfo := range colInfos { - // Set this column's offset to the last and reset all following columns' offsets. - adjustColumnInfoInDropColumn(tblInfo, colInfo.Offset) - // When the dropping column has not-null flag and it hasn't the default value, we can backfill the column value like "add column". - // NOTE: If the state of StateWriteOnly can be rollbacked, we'd better reconsider the original default value. - // And we need consider the column without not-null flag. - if colInfo.OriginDefaultValue == nil && mysql.HasNotNullFlag(colInfo.Flag) { - // If the column is timestamp default current_timestamp, and DDL owner is new version TiDB that set column.Version to 1, - // then old TiDB update record in the column write only stage will uses the wrong default value of the dropping column. - // Because new version of the column default value is UTC time, but old version TiDB will think the default value is the time in system timezone. - // But currently will be ok, because we can't cancel the drop column job when the job is running, - // so the column will be dropped succeed and client will never see the wrong default value of the dropped column. - // More info about this problem, see PR#9115. - colInfo.OriginDefaultValue, err = generateOriginDefaultValue(colInfo) - if err != nil { - return ver, errors.Trace(err) - } + err = checkDropColumnForStatePublic(tblInfo, colInfo) + if err != nil { + return ver, errors.Trace(err) } } ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != colInfos[0].State) @@ -458,6 +447,27 @@ func checkDropColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model. return tblInfo, colInfos, len(colNames), nil } +func checkDropColumnForStatePublic(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) (err error) { + // Set this column's offset to the last and reset all following columns' offsets. + adjustColumnInfoInDropColumn(tblInfo, colInfo.Offset) + // When the dropping column has not-null flag and it hasn't the default value, we can backfill the column value like "add column". + // NOTE: If the state of StateWriteOnly can be rollbacked, we'd better reconsider the original default value. + // And we need consider the column without not-null flag. + if colInfo.OriginDefaultValue == nil && mysql.HasNotNullFlag(colInfo.Flag) { + // If the column is timestamp default current_timestamp, and DDL owner is new version TiDB that set column.Version to 1, + // then old TiDB update record in the column write only stage will uses the wrong default value of the dropping column. + // Because new version of the column default value is UTC time, but old version TiDB will think the default value is the time in system timezone. + // But currently will be ok, because we can't cancel the drop column job when the job is running, + // so the column will be dropped succeed and client will never see the wrong default value of the dropped column. + // More info about this problem, see PR#9115. + colInfo.OriginDefaultValue, err = generateOriginDefaultValue(colInfo) + if err != nil { + return err + } + } + return nil +} + func onDropColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { tblInfo, colInfo, err := checkDropColumn(t, job) if err != nil { @@ -470,22 +480,9 @@ func onDropColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { // public -> write only job.SchemaState = model.StateWriteOnly colInfo.State = model.StateWriteOnly - // Set this column's offset to the last and reset all following columns' offsets. - adjustColumnInfoInDropColumn(tblInfo, colInfo.Offset) - // When the dropping column has not-null flag and it hasn't the default value, we can backfill the column value like "add column". - // NOTE: If the state of StateWriteOnly can be rollbacked, we'd better reconsider the original default value. - // And we need consider the column without not-null flag. - if colInfo.OriginDefaultValue == nil && mysql.HasNotNullFlag(colInfo.Flag) { - // If the column is timestamp default current_timestamp, and DDL owner is new version TiDB that set column.Version to 1, - // then old TiDB update record in the column write only stage will uses the wrong default value of the dropping column. - // Because new version of the column default value is UTC time, but old version TiDB will think the default value is the time in system timezone. - // But currently will be ok, because we can't cancel the drop column job when the job is running, - // so the column will be dropped succeed and client will never see the wrong default value of the dropped column. - // More info about this problem, see PR#9115. - colInfo.OriginDefaultValue, err = generateOriginDefaultValue(colInfo) - if err != nil { - return ver, errors.Trace(err) - } + err = checkDropColumnForStatePublic(tblInfo, colInfo) + if err != nil { + return ver, errors.Trace(err) } ver, err = updateVersionAndTableInfoWithCheck(t, job, tblInfo, originalState != colInfo.State) case model.StateWriteOnly: From 538878ce87fdb7f79c4426043755c08ddb8da357 Mon Sep 17 00:00:00 2001 From: gauss Date: Thu, 26 Mar 2020 22:32:12 +0800 Subject: [PATCH 21/36] fix bug --- ddl/column.go | 8 ++--- ddl/db_integration_test.go | 2 ++ ddl/ddl_api.go | 69 ++++++++++++++++++++------------------ ddl/ddl_worker_test.go | 12 ++----- 4 files changed, 45 insertions(+), 46 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index 29f2a8ae40d1e..6851a4b337c7d 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -240,16 +240,16 @@ func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInf positions := []*ast.ColumnPosition{} offsets := []int{} err = job.DecodeArgs(&columns, &positions, &offsets) + if err != nil { + job.State = model.JobStateCancelled + return nil, nil, nil, nil, errors.Trace(err) + } // Should initialize positions[i] when it is nil, otherwise the createColumnInfo function will panic. for i := range positions { if positions[i] == nil { positions[i] = &ast.ColumnPosition{} } } - if err != nil { - job.State = model.JobStateCancelled - return nil, nil, nil, nil, errors.Trace(err) - } var columnInfos []*model.ColumnInfo diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index c006227f9d341..7a8e56fb0f619 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -488,6 +488,8 @@ func (s *testIntegrationSuite5) TestErrnoErrorCode(c *C) { tk.MustGetErrCode(sql, errno.ErrCantRemoveAllFields) sql = "alter table test_drop_columns drop column c1, add column c2 int;" tk.MustGetErrCode(sql, errno.ErrUnsupportedDDLOperation) + sql = "alter table test_drop_columns drop column c1, drop column c1;" + tk.MustGetErrCode(sql, errno.ErrDupFieldName) // add index sql = "alter table test_error_code_succ add index idx (c_not_exist)" tk.MustGetErrCode(sql, errno.ErrKeyColumnDoesNotExits) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index e086c5c7d9100..f98d394a0e0f6 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2237,6 +2237,16 @@ func checkAndCreateNewColumn(ctx sessionctx.Context, ti ast.Ident, schema *model } colName := specNewColumn.Name.Name.O + // Check whether added column has existed. + col := table.FindCol(t.Cols(), colName) + if col != nil { + err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) + if spec.IfNotExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return nil, nil + } + return nil, err + } if err = checkColumnAttributes(colName, specNewColumn.Tp); err != nil { return nil, errors.Trace(err) } @@ -2280,7 +2290,7 @@ func checkAndCreateNewColumn(ctx sessionctx.Context, ti ast.Ident, schema *model // Ignore table constraints now, maybe return error later. // We use length(t.Cols()) as the default offset firstly, we will change the // column's offset later. - col, _, err := buildColumnAndConstraint(ctx, len(t.Cols()), specNewColumn, nil, t.Meta().Charset, t.Meta().Collate, schema.Charset, schema.Collate) + col, _, err = buildColumnAndConstraint(ctx, len(t.Cols()), specNewColumn, nil, t.Meta().Charset, t.Meta().Collate, schema.Charset, schema.Collate) if err != nil { return nil, errors.Trace(err) } @@ -2302,22 +2312,14 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab if err = checkAddColumnTooManyColumns(len(t.Cols()) + 1); err != nil { return errors.Trace(err) } - colName := specNewColumn.Name.Name.O - // Check whether added column has existed. - col := table.FindCol(t.Cols(), colName) - if col != nil { - err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) - if spec.IfNotExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - return nil - } - return err - } - - col, err = checkAndCreateNewColumn(ctx, ti, schema, spec, t, specNewColumn) + col, err := checkAndCreateNewColumn(ctx, ti, schema, spec, t, specNewColumn) if err != nil { return errors.Trace(err) } + // Added column has existed and if_not_exists flag is true. + if col == nil { + return nil + } job := &model.Job{ SchemaID: schema.ID, @@ -2344,9 +2346,6 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte if err != nil { return errors.Trace(err) } - var columns []*table.Column - var positions []*ast.ColumnPosition - var offsets []int // Check all the columns at once. newColumnsCount := 0 @@ -2354,8 +2353,8 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte for _, spec := range specs { for _, specNewColumn := range spec.NewColumns { newColumnsCount++ - if !addingColumnNames[specNewColumn.Name.Name.O] { - addingColumnNames[specNewColumn.Name.Name.O] = true + if !addingColumnNames[specNewColumn.Name.Name.L] { + addingColumnNames[specNewColumn.Name.Name.L] = true } else { return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O)) } @@ -2364,26 +2363,21 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte if err = checkAddColumnTooManyColumns(len(t.Cols()) + newColumnsCount); err != nil { return errors.Trace(err) } + columns := make([]*table.Column, 0) + positions := make([]*ast.ColumnPosition, 0) + offsets := make([]int, 0) // Check the columns one by one. for _, spec := range specs { for _, specNewColumn := range spec.NewColumns { - colName := specNewColumn.Name.Name.O - // Check whether added column has existed. - col := table.FindCol(t.Cols(), colName) - if col != nil { - err = infoschema.ErrColumnExists.GenWithStackByArgs(colName) - if spec.IfNotExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - continue - } - return err - } - - col, err = checkAndCreateNewColumn(ctx, ti, schema, spec, t, specNewColumn) + col, err := checkAndCreateNewColumn(ctx, ti, schema, spec, t, specNewColumn) if err != nil { return errors.Trace(err) } + // Added column has existed and if_not_exists flag is true. + if col == nil { + break + } columns = append(columns, col) positions = append(positions, spec.Position) offsets = append(offsets, 0) @@ -2643,6 +2637,14 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt } tblInfo := t.Meta() + dropingColumnNames := make(map[string]bool) + for _, spec := range specs { + if !dropingColumnNames[spec.OldColumnName.Name.L] { + dropingColumnNames[spec.OldColumnName.Name.L] = true + } else { + return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(spec.OldColumnName.Name.O)) + } + } if len(tblInfo.Columns) == len(specs) { return ErrCantRemoveAllFields.GenWithStack("can't drop all columns in table %s", tblInfo.Name) @@ -2682,6 +2684,9 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt } err = d.doDDLJob(ctx, job) + if err != nil { + return errors.Trace(err) + } err = d.callHookOnChanged(err) return errors.Trace(err) } diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index a05171ff01e23..9c9a13e2d7a5b 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -501,16 +501,8 @@ func checkIdxExist(c *C, d *ddl, schemaID int64, tableID int64, idxName string, func (s *testDDLSuite) checkAddColumns(c *C, d *ddl, schemaID int64, tableID int64, colNames []string, success bool) { changedTable := testGetTable(c, d, schemaID, tableID) - var found bool - for _, colName := range colNames { - for _, colInfo := range changedTable.Meta().Columns { - if colInfo.Name.O == colName { - found = true - break - } - } - c.Assert(found, Equals, success) - } + found := !checkColumnsNotFound(changedTable, colNames) + c.Assert(found, Equals, success) } func (s *testDDLSuite) checkCancelDropColumns(c *C, d *ddl, schemaID int64, tableID int64, colNames []string, success bool) { From c9b8dd02f287efd3d8a98915bee5e98691141f49 Mon Sep 17 00:00:00 2001 From: gauss Date: Thu, 26 Mar 2020 22:51:17 +0800 Subject: [PATCH 22/36] polish code --- ddl/ddl_api.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index f98d394a0e0f6..e5383cad42265 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -1993,13 +1993,12 @@ func resolveAlterTableSpec(ctx sessionctx.Context, specs []*ast.AlterTableSpec) func isSameTypeMultiSpecs(specs []*ast.AlterTableSpec) bool { specType := specs[0].Tp - var count int = 0 for _, spec := range specs { - if spec.Tp == specType { - count++ + if spec.Tp != specType { + return false } } - return len(specs) == count + return true } func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.AlterTableSpec) (err error) { @@ -2363,10 +2362,10 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte if err = checkAddColumnTooManyColumns(len(t.Cols()) + newColumnsCount); err != nil { return errors.Trace(err) } - columns := make([]*table.Column, 0) - positions := make([]*ast.ColumnPosition, 0) - offsets := make([]int, 0) - + columns := make([]*table.Column, newColumnsCount) + positions := make([]*ast.ColumnPosition, newColumnsCount) + offsets := make([]int, newColumnsCount) + index := 0 // Check the columns one by one. for _, spec := range specs { for _, specNewColumn := range spec.NewColumns { @@ -2378,9 +2377,10 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte if col == nil { break } - columns = append(columns, col) - positions = append(positions, spec.Position) - offsets = append(offsets, 0) + columns[index] = col + positions[index] = spec.Position + offsets[index] = 0 + index++ } } From c2bf0b22b7f3b9259e25830d0e41fd0cc1237fe4 Mon Sep 17 00:00:00 2001 From: gauss1314 Date: Thu, 26 Mar 2020 23:16:54 +0800 Subject: [PATCH 23/36] Update ddl/ddl_api.go Co-Authored-By: tangenta --- ddl/ddl_api.go | 1 - 1 file changed, 1 deletion(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index e5383cad42265..4fdd0ef14f1bb 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2027,7 +2027,6 @@ func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.A } return nil } - // Now we only allow one schema changing at the same time(except adding multi-columns). return errRunMultiSchemaChanges } From b744c867697ebf470fb24a1c97715c93207e4b3e Mon Sep 17 00:00:00 2001 From: gauss Date: Fri, 27 Mar 2020 10:29:11 +0800 Subject: [PATCH 24/36] add checkIsDroppableColumn function --- ddl/ddl_api.go | 67 +++++++++++++++++++++++++------------------------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 4fdd0ef14f1bb..c335942cf4850 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2588,26 +2588,14 @@ func (d *ddl) DropColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTa return errors.Trace(err) } - // Check whether dropped column has existed. - colName := spec.OldColumnName.Name - col := table.FindCol(t.VisibleCols(), colName.L) - if col == nil { - err = ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) - if spec.IfExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - return nil - } + isDropable, err := checkIsDroppableColumn(ctx, t, spec) + if err != nil { return err } - - tblInfo := t.Meta() - if err = isDroppableColumn(tblInfo, colName); err != nil { - return errors.Trace(err) - } - // We don't support dropping column with PK handle covered now. - if col.IsPKHandleColumn(tblInfo) { - return errUnsupportedPKHandle + if !isDropable { + return nil } + colName := spec.OldColumnName.Name job := &model.Job{ SchemaID: schema.ID, @@ -2651,26 +2639,13 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt var colNames []model.CIStr for _, spec := range specs { - // Check whether dropped column has existed. - colName := spec.OldColumnName.Name - col := table.FindCol(t.VisibleCols(), colName.L) - if col == nil { - err = ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) - if spec.IfExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - continue - } + isDropable, err := checkIsDroppableColumn(ctx, t, spec) + if err != nil { return err } - - if err = isDroppableColumn(tblInfo, colName); err != nil { - return errors.Trace(err) - } - // We don't support dropping column with PK handle covered now. - if col.IsPKHandleColumn(tblInfo) { - return errUnsupportedPKHandle + if isDropable { + colNames = append(colNames, spec.OldColumnName.Name) } - colNames = append(colNames, colName) } job := &model.Job{ @@ -2690,6 +2665,30 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt return errors.Trace(err) } +func checkIsDroppableColumn(ctx sessionctx.Context, t table.Table, spec *ast.AlterTableSpec) (isDrapable bool, err error) { + tblInfo := t.Meta() + // Check whether dropped column has existed. + colName := spec.OldColumnName.Name + col := table.FindCol(t.VisibleCols(), colName.L) + if col == nil { + err = ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) + if spec.IfExists { + ctx.GetSessionVars().StmtCtx.AppendNote(err) + return false, nil + } + return false, err + } + + if err = isDroppableColumn(tblInfo, colName); err != nil { + return false, errors.Trace(err) + } + // We don't support dropping column with PK handle covered now. + if col.IsPKHandleColumn(tblInfo) { + return false, errUnsupportedPKHandle + } + return true, nil +} + // checkModifyCharsetAndCollation returns error when the charset or collation is not modifiable. // needRewriteCollationData is used when trying to modify the collation of a column, it is true when the column is with // index because index of a string column is collation-aware. From 0f6c25a9bdc10fee2c73ea20af80bbf30b7ec12f Mon Sep 17 00:00:00 2001 From: gauss Date: Fri, 27 Mar 2020 15:57:21 +0800 Subject: [PATCH 25/36] support if_not_exists and if_exists, add integration test cases --- ddl/db_integration_test.go | 42 ++++++++++++++++++++++- ddl/ddl_api.go | 70 ++++++++++++++++++++++++++------------ 2 files changed, 89 insertions(+), 23 deletions(-) diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index 7a8e56fb0f619..3a3dd8eb0d877 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -357,6 +357,46 @@ func (s *testIntegrationSuite1) TestIssue4432(c *C) { tk.MustExec("drop table tx") } +func (s *testIntegrationSuite1) TestIssue5092(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + + tk.MustExec("create table t_issue_5092 (a int)") + tk.MustExec("alter table t_issue_5092 add column (b int, c int)") + tk.MustExec("alter table t_issue_5092 add column if not exists (b int, c int)") + tk.MustExec("alter table t_issue_5092 add column b1 int after b, add column c1 int after c") + // The following two statements are consistent with MariaDB. + tk.MustGetErrCode("alter table t_issue_5092 add column if not exists d int, add column d int", errno.ErrDupFieldName) + tk.MustExec("alter table t_issue_5092 add column d int, add column if not exists d int") + tk.MustExec("alter table t_issue_5092 add column if not exists (d int, e int), add column f int") + tk.MustExec("alter table t_issue_5092 add column b2 int after b1, add column c2 int first") + tk.MustExec("drop table t_issue_5092") + + tk.MustExec("create table t_issue_5092 (a int default 1)") + tk.MustExec("alter table t_issue_5092 add column (b int default 2, c int default 3)") + tk.MustExec("alter table t_issue_5092 add column b1 int default 22 after b, add column c1 int default 33 after c") + tk.MustExec("insert into t_issue_5092 value ()") + tk.MustQuery("select * from t_issue_5092").Check(testkit.Rows("1 2 22 3 33")) + tk.MustExec("alter table t_issue_5092 add column d int default 4 after c1, add column aa int default 0 first") + tk.MustQuery("select * from t_issue_5092").Check(testkit.Rows("0 1 2 22 3 33 4")) + tk.MustExec("drop table t_issue_5092") + + tk.MustExec("create table t_issue_5092 (a int)") + tk.MustExec("alter table t_issue_5092 add column (b int, c int)") + tk.MustExec("alter table t_issue_5092 drop column b,drop column c") + tk.MustGetErrCode("alter table t_issue_5092 drop column c, drop column c", errno.ErrCantDropFieldOrKey) + tk.MustExec("alter table t_issue_5092 drop column if exists b,drop column if exists c") + tk.MustGetErrCode("alter table t_issue_5092 drop column g, drop column d", errno.ErrCantDropFieldOrKey) + tk.MustExec("drop table t_issue_5092") + + tk.MustExec("create table t_issue_5092 (a int)") + tk.MustExec("alter table t_issue_5092 add column (b int, c int)") + tk.MustGetErrCode("alter table t_issue_5092 drop column if exists a, drop column b, drop column c", errno.ErrCantRemoveAllFields) + tk.MustGetErrCode("alter table t_issue_5092 drop column if exists c, drop column c", errno.ErrCantDropFieldOrKey) + tk.MustExec("alter table t_issue_5092 drop column c, drop column if exists c") + tk.MustExec("drop table t_issue_5092") +} + func (s *testIntegrationSuite5) TestErrnoErrorCode(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test_db") @@ -489,7 +529,7 @@ func (s *testIntegrationSuite5) TestErrnoErrorCode(c *C) { sql = "alter table test_drop_columns drop column c1, add column c2 int;" tk.MustGetErrCode(sql, errno.ErrUnsupportedDDLOperation) sql = "alter table test_drop_columns drop column c1, drop column c1;" - tk.MustGetErrCode(sql, errno.ErrDupFieldName) + tk.MustGetErrCode(sql, errno.ErrCantDropFieldOrKey) // add index sql = "alter table test_error_code_succ add index idx (c_not_exist)" tk.MustGetErrCode(sql, errno.ErrKeyColumnDoesNotExits) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index c335942cf4850..dfc49d86eef61 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2346,42 +2346,53 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte } // Check all the columns at once. - newColumnsCount := 0 addingColumnNames := make(map[string]bool) + dupColumnNames := make(map[string]bool) for _, spec := range specs { for _, specNewColumn := range spec.NewColumns { - newColumnsCount++ if !addingColumnNames[specNewColumn.Name.Name.L] { addingColumnNames[specNewColumn.Name.Name.L] = true } else { + if spec.IfNotExists { + dupColumnNames[specNewColumn.Name.Name.L] = true + continue + } return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O)) } } } - if err = checkAddColumnTooManyColumns(len(t.Cols()) + newColumnsCount); err != nil { - return errors.Trace(err) - } - columns := make([]*table.Column, newColumnsCount) - positions := make([]*ast.ColumnPosition, newColumnsCount) - offsets := make([]int, newColumnsCount) - index := 0 + columns := make([]*table.Column, 0) + positions := make([]*ast.ColumnPosition, 0) + offsets := make([]int, 0) + newColumnsCount := 0 // Check the columns one by one. for _, spec := range specs { for _, specNewColumn := range spec.NewColumns { + if spec.IfNotExists && dupColumnNames[specNewColumn.Name.Name.L] { + err = infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O) + ctx.GetSessionVars().StmtCtx.AppendNote(err) + continue + } col, err := checkAndCreateNewColumn(ctx, ti, schema, spec, t, specNewColumn) if err != nil { return errors.Trace(err) } // Added column has existed and if_not_exists flag is true. - if col == nil { - break + if col == nil && spec.IfNotExists { + continue } - columns[index] = col - positions[index] = spec.Position - offsets[index] = 0 - index++ + columns = append(columns, col) + positions = append(positions, spec.Position) + offsets = append(offsets, 0) + newColumnsCount++ } } + if newColumnsCount == 0 { + return nil + } + if err = checkAddColumnTooManyColumns(len(t.Cols()) + newColumnsCount); err != nil { + return errors.Trace(err) + } job := &model.Job{ SchemaID: schema.ID, @@ -2625,27 +2636,42 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt tblInfo := t.Meta() dropingColumnNames := make(map[string]bool) + dupColumnNames := make(map[string]bool) for _, spec := range specs { if !dropingColumnNames[spec.OldColumnName.Name.L] { dropingColumnNames[spec.OldColumnName.Name.L] = true } else { - return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(spec.OldColumnName.Name.O)) + if spec.IfExists { + dupColumnNames[spec.OldColumnName.Name.L] = true + continue + } + return errors.Trace(ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", spec.OldColumnName.Name.O)) } } - if len(tblInfo.Columns) == len(specs) { - return ErrCantRemoveAllFields.GenWithStack("can't drop all columns in table %s", - tblInfo.Name) - } var colNames []model.CIStr for _, spec := range specs { + if spec.IfExists && dupColumnNames[spec.OldColumnName.Name.L] { + err = ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", spec.OldColumnName.Name.L) + ctx.GetSessionVars().StmtCtx.AppendNote(err) + continue + } isDropable, err := checkIsDroppableColumn(ctx, t, spec) if err != nil { return err } - if isDropable { - colNames = append(colNames, spec.OldColumnName.Name) + // Column can't drop and if_exists flag is true. + if !isDropable && spec.IfExists { + continue } + colNames = append(colNames, spec.OldColumnName.Name) + } + if len(colNames) == 0 { + return nil + } + if len(tblInfo.Columns) == len(colNames) { + return ErrCantRemoveAllFields.GenWithStack("can't drop all columns in table %s", + tblInfo.Name) } job := &model.Job{ From 32d6358750dac5ce14253b1486539bea81b35edc Mon Sep 17 00:00:00 2001 From: gauss Date: Fri, 27 Mar 2020 17:37:25 +0800 Subject: [PATCH 26/36] improve readability --- ddl/column.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index 6851a4b337c7d..cd8680f70b2f2 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -332,10 +332,10 @@ func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error case model.StateWriteReorganization: // reorganization -> public // Adjust table column offsets. - oldCols := tblInfo.Columns + oldCols := tblInfo.Columns[:len(tblInfo.Columns)-len(offsets)] + newCols := tblInfo.Columns[len(tblInfo.Columns)-len(offsets):] + tblInfo.Columns = oldCols for i := range offsets { - tmpCols := oldCols[:len(oldCols)-len(offsets)+i+1] - tblInfo.Columns = tmpCols // For multiple columns with after position, should adjust offsets. // e.g. create table t(a int); // alter table t add column b int after a, add column c int after a; @@ -348,8 +348,8 @@ func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error } } } + tblInfo.Columns = append(tblInfo.Columns, newCols[i]) adjustColumnInfoInAddColumn(tblInfo, offsets[i]) - oldCols = append(tblInfo.Columns, oldCols[len(oldCols)-len(offsets)+i+1:]...) } setColumnsState(columnInfos, model.StatePublic) ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) From 00350ac4b027f17a355f0709e06e3dccb3f16b03 Mon Sep 17 00:00:00 2001 From: gauss Date: Mon, 30 Mar 2020 10:55:33 +0800 Subject: [PATCH 27/36] polish code --- ddl/column.go | 11 ++++------- ddl/ddl_api.go | 25 ++++++++++++++----------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index cd8680f70b2f2..7a8dee3e95d6b 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -251,12 +251,11 @@ func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInf } } - var columnInfos []*model.ColumnInfo - + columnInfos := make([]*model.ColumnInfo, len(columns)) for i, col := range columns { columnInfo := model.FindColumnInfo(tblInfo.Columns, col.Name.L) if columnInfo != nil { - columnInfos = append(columnInfos, columnInfo) + columnInfos[i] = columnInfo if columnInfo.State == model.StatePublic { // We already have a column with the same column name. job.State = model.JobStateCancelled @@ -271,14 +270,12 @@ func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInf logutil.BgLogger().Info("[ddl] run add columns job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offsets[i])) // Set offset arg to job. - if offset != 0 { - offsets[i] = offset - } + offsets[i] = offset if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil { job.State = model.JobStateCancelled return nil, nil, nil, nil, errors.Trace(err) } - columnInfos = append(columnInfos, columnInfo) + columnInfos[i] = columnInfo } } job.Args = []interface{}{columnInfos, positions, offsets} diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index dfc49d86eef61..46cec7ebbe3e0 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2352,18 +2352,17 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte for _, specNewColumn := range spec.NewColumns { if !addingColumnNames[specNewColumn.Name.Name.L] { addingColumnNames[specNewColumn.Name.Name.L] = true - } else { - if spec.IfNotExists { - dupColumnNames[specNewColumn.Name.Name.L] = true - continue - } + continue + } + if !spec.IfNotExists { return errors.Trace(infoschema.ErrColumnExists.GenWithStackByArgs(specNewColumn.Name.Name.O)) } + dupColumnNames[specNewColumn.Name.Name.L] = true } } - columns := make([]*table.Column, 0) - positions := make([]*ast.ColumnPosition, 0) - offsets := make([]int, 0) + columns := make([]*table.Column, len(addingColumnNames)) + positions := make([]*ast.ColumnPosition, len(addingColumnNames)) + offsets := make([]int, len(addingColumnNames)) newColumnsCount := 0 // Check the columns one by one. for _, spec := range specs { @@ -2381,15 +2380,19 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte if col == nil && spec.IfNotExists { continue } - columns = append(columns, col) - positions = append(positions, spec.Position) - offsets = append(offsets, 0) + columns[newColumnsCount] = col + positions[newColumnsCount] = spec.Position + offsets[newColumnsCount] = 0 newColumnsCount++ } } if newColumnsCount == 0 { return nil } + // Since the newColumnsCount can be less than len(addingColumnNames) + columns = columns[:newColumnsCount] + positions = positions[:newColumnsCount] + offsets = offsets[:newColumnsCount] if err = checkAddColumnTooManyColumns(len(t.Cols()) + newColumnsCount); err != nil { return errors.Trace(err) } From 1db980c693d31fad7015c0ffdff0be401cb01ef9 Mon Sep 17 00:00:00 2001 From: gauss Date: Tue, 31 Mar 2020 09:57:04 +0800 Subject: [PATCH 28/36] polish code --- ddl/column.go | 6 +++--- ddl/column_test.go | 20 ++++++++++---------- ddl/ddl_worker_test.go | 6 +++--- ddl/rollingback.go | 4 ++-- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index 7a8dee3e95d6b..0128df59c22e7 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -428,8 +428,8 @@ func checkDropColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model. return nil, nil, 0, errors.Trace(err) } - var colInfos []*model.ColumnInfo - for _, colName := range colNames { + colInfos := make([]*model.ColumnInfo, len(colNames)) + for i, colName := range colNames { colInfo := model.FindColumnInfo(tblInfo.Columns, colName.L) if colInfo == nil || colInfo.Hidden { job.State = model.JobStateCancelled @@ -439,7 +439,7 @@ func checkDropColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model. job.State = model.JobStateCancelled return nil, nil, 0, errors.Trace(err) } - colInfos = append(colInfos, colInfo) + colInfos[i] = colInfo } return tblInfo, colInfos, len(colNames), nil } diff --git a/ddl/column_test.go b/ddl/column_test.go index ac1f6029e5741..43f19cf2bdb74 100644 --- a/ddl/column_test.go +++ b/ddl/column_test.go @@ -90,11 +90,11 @@ func testCreateColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo return job } -func buildCreateColumnJobs(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string, +func buildCreateColumnsJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string, positions []*ast.ColumnPosition, defaultValue interface{}) *model.Job { - var colInfos []*model.ColumnInfo + colInfos := make([]*model.ColumnInfo, len(colNames)) offsets := make([]int, len(colNames)) - for _, colName := range colNames { + for i, colName := range colNames { col := &model.ColumnInfo{ Name: model.NewCIStr(colName), Offset: len(tblInfo.Columns), @@ -103,7 +103,7 @@ func buildCreateColumnJobs(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNa } col.ID = allocateColumnID(tblInfo) col.FieldType = *types.NewFieldType(mysql.TypeLong) - colInfos = append(colInfos, col) + colInfos[i] = col } job := &model.Job{ @@ -118,7 +118,7 @@ func buildCreateColumnJobs(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNa func testCreateColumns(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string, positions []*ast.ColumnPosition, defaultValue interface{}) *model.Job { - job := buildCreateColumnJobs(dbInfo, tblInfo, colNames, positions, defaultValue) + job := buildCreateColumnsJob(dbInfo, tblInfo, colNames, positions, defaultValue) err := d.doDDLJob(ctx, job) c.Assert(err, IsNil) v := getSchemaVer(c, ctx) @@ -149,10 +149,10 @@ func testDropColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, return job } -func buildDropColumnJobs(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string) *model.Job { - var columnNames []model.CIStr - for _, colName := range colNames { - columnNames = append(columnNames, model.NewCIStr(colName)) +func buildDropColumnsJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string) *model.Job { + columnNames := make([]model.CIStr, len(colNames)) + for i, colName := range colNames { + columnNames[i] = model.NewCIStr(colName) } job := &model.Job{ SchemaID: dbInfo.ID, @@ -165,7 +165,7 @@ func buildDropColumnJobs(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName } func testDropColumns(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string, isError bool) *model.Job { - job := buildDropColumnJobs(dbInfo, tblInfo, colNames) + job := buildDropColumnsJob(dbInfo, tblInfo, colNames) err := d.doDDLJob(ctx, job) if isError { c.Assert(err, NotNil) diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index 9c9a13e2d7a5b..721314412253f 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -878,8 +878,8 @@ func (s *testDDLSuite) TestCancelJob(c *C) { // for add columns updateTest(&tests[34]) addingColNames := []string{"colA", "colB", "colC", "colD", "colE", "colF"} - var cols []*table.Column - for _, addingColName := range addingColNames { + cols := make([]*table.Column, len(addingColNames)) + for i, addingColName := range addingColNames { newColumnDef := &ast.ColumnDef{ Name: &ast.ColumnName{Name: model.NewCIStr(addingColName)}, Tp: &types.FieldType{Tp: mysql.TypeLonglong}, @@ -887,7 +887,7 @@ func (s *testDDLSuite) TestCancelJob(c *C) { } col, _, err := buildColumnAndConstraint(ctx, 0, newColumnDef, nil, mysql.DefaultCharset, "", mysql.DefaultCharset, "") c.Assert(err, IsNil) - cols = append(cols, col) + cols[i] = col } offsets := make([]int, len(cols)) positions := make([]*ast.ColumnPosition, len(cols)) diff --git a/ddl/rollingback.go b/ddl/rollingback.go index 2ca087d12c534..63c6cfaa90994 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -133,7 +133,7 @@ func rollingbackAddColumns(t *meta.Meta, job *model.Job) (ver int64, err error) return ver, errors.Trace(err) } - var colNames []model.CIStr + colNames := make([]model.CIStr, len(columnInfos)) originalState := columnInfos[0].State for i, columnInfo := range columnInfos { if columnInfo == nil { @@ -141,7 +141,7 @@ func rollingbackAddColumns(t *meta.Meta, job *model.Job) (ver int64, err error) return ver, errCancelledDDLJob } columnInfos[i].State = model.StateDeleteOnly - colNames = append(colNames, columnInfo.Name) + colNames[i] = columnInfo.Name } job.SchemaState = model.StateDeleteOnly From 97f69330ce08d9d146b13571ba3543f959ceaaac Mon Sep 17 00:00:00 2001 From: gauss Date: Thu, 2 Apr 2020 11:20:01 +0800 Subject: [PATCH 29/36] polish code and add test cases --- ddl/column.go | 35 +++++++++++++++---------------- ddl/column_test.go | 19 ----------------- ddl/db_integration_test.go | 42 ++++++++++++++++++++++++++++++++++++-- ddl/db_test.go | 14 ++----------- ddl/ddl_api.go | 22 ++++++++++++-------- ddl/index.go | 4 ++-- 6 files changed, 73 insertions(+), 63 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index 0128df59c22e7..eb317e40f0b3d 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -97,22 +97,25 @@ func adjustColumnInfoInDropColumn(tblInfo *model.TableInfo, offset int) { tblInfo.Columns = newCols } -func createColumnInfo(tblInfo *model.TableInfo, colInfo *model.ColumnInfo, pos *ast.ColumnPosition) (*model.ColumnInfo, int, error) { +func createColumnInfo(tblInfo *model.TableInfo, colInfo *model.ColumnInfo, pos *ast.ColumnPosition) (*model.ColumnInfo, *ast.ColumnPosition, int, error) { // Check column name duplicate. cols := tblInfo.Columns - position := len(cols) - - // Get column position. + offset := len(cols) + // Should initialize pos when it is nil. + if pos == nil { + pos = &ast.ColumnPosition{} + } + // Get column offset. if pos.Tp == ast.ColumnPositionFirst { - position = 0 + offset = 0 } else if pos.Tp == ast.ColumnPositionAfter { c := model.FindColumnInfo(cols, pos.RelativeColumn.Name.L) if c == nil { - return nil, 0, infoschema.ErrColumnNotExists.GenWithStackByArgs(pos.RelativeColumn, tblInfo.Name) + return nil, pos, 0, infoschema.ErrColumnNotExists.GenWithStackByArgs(pos.RelativeColumn, tblInfo.Name) } - // Insert position is after the mentioned column. - position = c.Offset + 1 + // Insert offset is after the mentioned column. + offset = c.Offset + 1 } colInfo.ID = allocateColumnID(tblInfo) colInfo.State = model.StateNone @@ -121,13 +124,13 @@ func createColumnInfo(tblInfo *model.TableInfo, colInfo *model.ColumnInfo, pos * colInfo.Offset = len(cols) // Append the column info to the end of the tblInfo.Columns. - // It will reorder to the right position in "Columns" when it state change to public. + // It will reorder to the right offset in "Columns" when it state change to public. newCols := make([]*model.ColumnInfo, 0, len(cols)+1) newCols = append(newCols, cols...) newCols = append(newCols, colInfo) tblInfo.Columns = newCols - return colInfo, position, nil + return colInfo, pos, offset, nil } func checkAddColumn(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ColumnInfo, *model.ColumnInfo, *ast.ColumnPosition, int, error) { @@ -177,7 +180,7 @@ func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) return ver, errors.Trace(err) } if columnInfo == nil { - columnInfo, offset, err = createColumnInfo(tblInfo, col, pos) + columnInfo, _, offset, err = createColumnInfo(tblInfo, col, pos) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) @@ -244,12 +247,6 @@ func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInf job.State = model.JobStateCancelled return nil, nil, nil, nil, errors.Trace(err) } - // Should initialize positions[i] when it is nil, otherwise the createColumnInfo function will panic. - for i := range positions { - if positions[i] == nil { - positions[i] = &ast.ColumnPosition{} - } - } columnInfos := make([]*model.ColumnInfo, len(columns)) for i, col := range columns { @@ -262,13 +259,13 @@ func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInf return nil, nil, nil, nil, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name) } } else { - columnInfo, offset, err := createColumnInfo(tblInfo, col, positions[i]) + columnInfo, pos, offset, err := createColumnInfo(tblInfo, col, positions[i]) if err != nil { job.State = model.JobStateCancelled return nil, nil, nil, nil, errors.Trace(err) } logutil.BgLogger().Info("[ddl] run add columns job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offsets[i])) - + positions[i] = pos // Set offset arg to job. offsets[i] = offset if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil { diff --git a/ddl/column_test.go b/ddl/column_test.go index 43f19cf2bdb74..52d0646cfa039 100644 --- a/ddl/column_test.go +++ b/ddl/column_test.go @@ -987,17 +987,8 @@ func (s *testColumnSuite) TestAddColumns(c *C) { c.Assert(errors.ErrorStack(hErr), Equals, "") c.Assert(ok, IsTrue) - err = ctx.NewTxn(context.Background()) - c.Assert(err, IsNil) - job = testDropTable(c, ctx, d, s.dbInfo, tblInfo) testCheckJobDone(c, d, job, false) - - txn, err = ctx.Txn(true) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - d.Stop() } @@ -1118,7 +1109,6 @@ func (s *testColumnSuite) TestDropColumns(c *C) { return } for _, colName := range colNames { - col := table.FindCol(t.(*tables.TableCommon).Columns, colName) if col == nil { checkOK = true @@ -1138,17 +1128,8 @@ func (s *testColumnSuite) TestDropColumns(c *C) { c.Assert(hErr, IsNil) c.Assert(ok, IsTrue) - err = ctx.NewTxn(context.Background()) - c.Assert(err, IsNil) - job = testDropTable(c, ctx, d, s.dbInfo, tblInfo) testCheckJobDone(c, d, job, false) - - txn, err = ctx.Txn(true) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - d.Stop() } diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index 3a3dd8eb0d877..14a5622644fa6 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -365,11 +365,40 @@ func (s *testIntegrationSuite1) TestIssue5092(c *C) { tk.MustExec("alter table t_issue_5092 add column (b int, c int)") tk.MustExec("alter table t_issue_5092 add column if not exists (b int, c int)") tk.MustExec("alter table t_issue_5092 add column b1 int after b, add column c1 int after c") + tk.MustExec("alter table t_issue_5092 add column d int after b, add column e int first, add column f int after c1, add column g int, add column h int first") + tk.MustQuery("show create table t_issue_5092").Check(testkit.Rows("t_issue_5092 CREATE TABLE `t_issue_5092` (\n" + + " `h` int(11) DEFAULT NULL,\n" + + " `e` int(11) DEFAULT NULL,\n" + + " `a` int(11) DEFAULT NULL,\n" + + " `b` int(11) DEFAULT NULL,\n" + + " `d` int(11) DEFAULT NULL,\n" + + " `b1` int(11) DEFAULT NULL,\n" + + " `c` int(11) DEFAULT NULL,\n" + + " `c1` int(11) DEFAULT NULL,\n" + + " `f` int(11) DEFAULT NULL,\n" + + " `g` int(11) DEFAULT NULL\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) // The following two statements are consistent with MariaDB. tk.MustGetErrCode("alter table t_issue_5092 add column if not exists d int, add column d int", errno.ErrDupFieldName) - tk.MustExec("alter table t_issue_5092 add column d int, add column if not exists d int") - tk.MustExec("alter table t_issue_5092 add column if not exists (d int, e int), add column f int") + tk.MustExec("alter table t_issue_5092 add column dd int, add column if not exists dd int") + tk.MustExec("alter table t_issue_5092 add column if not exists (d int, e int), add column ff text") tk.MustExec("alter table t_issue_5092 add column b2 int after b1, add column c2 int first") + tk.MustQuery("show create table t_issue_5092").Check(testkit.Rows("t_issue_5092 CREATE TABLE `t_issue_5092` (\n" + + " `c2` int(11) DEFAULT NULL,\n" + + " `h` int(11) DEFAULT NULL,\n" + + " `e` int(11) DEFAULT NULL,\n" + + " `a` int(11) DEFAULT NULL,\n" + + " `b` int(11) DEFAULT NULL,\n" + + " `d` int(11) DEFAULT NULL,\n" + + " `b1` int(11) DEFAULT NULL,\n" + + " `b2` int(11) DEFAULT NULL,\n" + + " `c` int(11) DEFAULT NULL,\n" + + " `c1` int(11) DEFAULT NULL,\n" + + " `f` int(11) DEFAULT NULL,\n" + + " `g` int(11) DEFAULT NULL,\n" + + " `dd` int(11) DEFAULT NULL,\n" + + " `ff` text DEFAULT NULL\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) tk.MustExec("drop table t_issue_5092") tk.MustExec("create table t_issue_5092 (a int default 1)") @@ -379,6 +408,15 @@ func (s *testIntegrationSuite1) TestIssue5092(c *C) { tk.MustQuery("select * from t_issue_5092").Check(testkit.Rows("1 2 22 3 33")) tk.MustExec("alter table t_issue_5092 add column d int default 4 after c1, add column aa int default 0 first") tk.MustQuery("select * from t_issue_5092").Check(testkit.Rows("0 1 2 22 3 33 4")) + tk.MustQuery("show create table t_issue_5092").Check(testkit.Rows("t_issue_5092 CREATE TABLE `t_issue_5092` (\n" + + " `aa` int(11) DEFAULT '0',\n" + + " `a` int(11) DEFAULT '1',\n" + + " `b` int(11) DEFAULT '2',\n" + + " `b1` int(11) DEFAULT '22',\n" + + " `c` int(11) DEFAULT '3',\n" + + " `c1` int(11) DEFAULT '33',\n" + + " `d` int(11) DEFAULT '4'\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) tk.MustExec("drop table t_issue_5092") tk.MustExec("create table t_issue_5092 (a int)") diff --git a/ddl/db_test.go b/ddl/db_test.go index 2b5d931d6640a..8175388689576 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -1445,19 +1445,9 @@ func (s *testDBSuite3) TestCancelDropColumns(c *C) { s.mustExec(c, "alter table test_drop_column add column c3 int, add column c4 int") } _, err1 = s.tk.Exec("alter table test_drop_column drop column c3, drop column c4") - var col3 *table.Column - var col4 *table.Column t := s.testGetTable(c, "test_drop_column") - for _, col := range t.Cols() { - if strings.EqualFold(col.Name.L, "c3") { - col3 = col - continue - } - if strings.EqualFold(col.Name.L, "c4") { - col4 = col - continue - } - } + col3 := table.FindCol(t.Cols(), "c3") + col4 := table.FindCol(t.Cols(), "c4") if testCase.cancelSucc { c.Assert(checkErr, IsNil) c.Assert(col3, NotNil) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 46cec7ebbe3e0..fa676fbcbc2b6 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2360,9 +2360,9 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte dupColumnNames[specNewColumn.Name.Name.L] = true } } - columns := make([]*table.Column, len(addingColumnNames)) - positions := make([]*ast.ColumnPosition, len(addingColumnNames)) - offsets := make([]int, len(addingColumnNames)) + columns := make([]*table.Column, 0, len(addingColumnNames)) + positions := make([]*ast.ColumnPosition, 0, len(addingColumnNames)) + offsets := make([]int, 0, len(addingColumnNames)) newColumnsCount := 0 // Check the columns one by one. for _, spec := range specs { @@ -2380,9 +2380,13 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte if col == nil && spec.IfNotExists { continue } - columns[newColumnsCount] = col - positions[newColumnsCount] = spec.Position - offsets[newColumnsCount] = 0 + columns = append(columns, col) + positions = append(positions, spec.Position) + offsets = append(offsets, 0) + + // columns[newColumnsCount] = col + // positions[newColumnsCount] = spec.Position + // offsets[newColumnsCount] = 0 newColumnsCount++ } } @@ -2390,9 +2394,9 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte return nil } // Since the newColumnsCount can be less than len(addingColumnNames) - columns = columns[:newColumnsCount] - positions = positions[:newColumnsCount] - offsets = offsets[:newColumnsCount] + // columns = columns[:newColumnsCount] + // positions = positions[:newColumnsCount] + // offsets = offsets[:newColumnsCount] if err = checkAddColumnTooManyColumns(len(t.Cols()) + newColumnsCount); err != nil { return errors.Trace(err) } diff --git a/ddl/index.go b/ddl/index.go index 6bf7ca76043c8..43ee64c5a7b22 100755 --- a/ddl/index.go +++ b/ddl/index.go @@ -44,7 +44,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/logutil" - "github.com/pingcap/tidb/util/rowDecoder" + decoder "github.com/pingcap/tidb/util/rowDecoder" "github.com/pingcap/tidb/util/timeutil" "go.uber.org/zap" ) @@ -417,7 +417,7 @@ func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK boo if len(hiddenCols) > 0 { pos := &ast.ColumnPosition{Tp: ast.ColumnPositionNone} for _, hiddenCol := range hiddenCols { - _, _, err = createColumnInfo(tblInfo, hiddenCol, pos) + _, _, _, err = createColumnInfo(tblInfo, hiddenCol, pos) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) From d53c704c09809975ba837513abdcfa812e5ce919 Mon Sep 17 00:00:00 2001 From: gauss Date: Thu, 2 Apr 2020 13:02:26 +0800 Subject: [PATCH 30/36] remove useless code --- ddl/ddl_api.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index fa676fbcbc2b6..bb23625792213 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2383,20 +2383,12 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte columns = append(columns, col) positions = append(positions, spec.Position) offsets = append(offsets, 0) - - // columns[newColumnsCount] = col - // positions[newColumnsCount] = spec.Position - // offsets[newColumnsCount] = 0 newColumnsCount++ } } if newColumnsCount == 0 { return nil } - // Since the newColumnsCount can be less than len(addingColumnNames) - // columns = columns[:newColumnsCount] - // positions = positions[:newColumnsCount] - // offsets = offsets[:newColumnsCount] if err = checkAddColumnTooManyColumns(len(t.Cols()) + newColumnsCount); err != nil { return errors.Trace(err) } From 2c6f0f5d3eaf63c3c5500071b63a07014b571023 Mon Sep 17 00:00:00 2001 From: gauss Date: Thu, 2 Apr 2020 16:33:08 +0800 Subject: [PATCH 31/36] [WIP]add test case in db_change_test.go --- ddl/db_change_test.go | 77 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go index c72378dddc0a9..5f2d0798558d5 100644 --- a/ddl/db_change_test.go +++ b/ddl/db_change_test.go @@ -528,6 +528,17 @@ func (s *testStateChangeSuite) TestWriteOnlyOnDupUpdate(c *C) { s.runTestInSchemaState(c, model.StateWriteReorganization, true, addColumnSQL, sqls, expectQuery) } +func (s *testStateChangeSuite) TestWriteOnlyOnDupUpdateForAddColumns(c *C) { + sqls := make([]sqlWithErr, 3) + sqls[0] = sqlWithErr{"delete from t", nil} + sqls[1] = sqlWithErr{"insert t set c1 = 'c1_dup', c3 = '2018-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil} + sqls[2] = sqlWithErr{"insert t set c1 = 'c1_new', c3 = '2019-02-12', c4 = 2 on duplicate key update c1 = values(c1)", nil} + addColumnsSQL := "alter table t add column c5 int not null default 1 after c4, add column c44 int not null default 1" + expectQuery := &expectQuery{"select c4, c5, c44 from t", []string{"2 1 1"}} + // TODO: This case should always fail in write-only state, but it doesn't. We use write-reorganization state here to keep it running stable. It need a double check. + s.runTestInSchemaState(c, model.StateWriteReorganization, true, addColumnsSQL, sqls, expectQuery) +} + // TestWriteOnly tests whether the correct columns is used in PhysicalIndexScan's ToPB function. func (s *testStateChangeSuite) TestWriteOnly(c *C) { sqls := make([]sqlWithErr, 3) @@ -538,6 +549,16 @@ func (s *testStateChangeSuite) TestWriteOnly(c *C) { s.runTestInSchemaState(c, model.StateWriteOnly, true, addColumnSQL, sqls, nil) } +// TestWriteOnlyForAddColumns tests whether the correct columns is used in PhysicalIndexScan's ToPB function. +func (s *testStateChangeSuite) TestWriteOnlyForAddColumns(c *C) { + sqls := make([]sqlWithErr, 3) + sqls[0] = sqlWithErr{"delete from t where c1 = 'a'", nil} + sqls[1] = sqlWithErr{"update t use index(idx2) set c1 = 'c1_update' where c1 = 'a'", nil} + sqls[2] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1", nil} + addColumnsSQL := "alter table t add column c5 int not null default 1 first, add column c6 int not null default 1" + s.runTestInSchemaState(c, model.StateWriteOnly, true, addColumnsSQL, sqls, nil) +} + // TestDeletaOnly tests whether the correct columns is used in PhysicalIndexScan's ToPB function. func (s *testStateChangeSuite) TestDeleteOnly(c *C) { sqls := make([]sqlWithErr, 1) @@ -547,6 +568,15 @@ func (s *testStateChangeSuite) TestDeleteOnly(c *C) { s.runTestInSchemaState(c, model.StateDeleteOnly, true, dropColumnSQL, sqls, nil) } +// TestDeleteOnlyForDropColumns tests whether the correct columns is used in PhysicalIndexScan's ToPB function. +func (s *testStateChangeSuite) TestDeleteOnlyForDropColumns(c *C) { + sqls := make([]sqlWithErr, 1) + sqls[0] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1", + errors.Errorf("Can't find column c1")} + dropColumnsSQL := "alter table t drop column c1, drop column c3" + s.runTestInSchemaState(c, model.StateDeleteOnly, true, dropColumnsSQL, sqls, nil) +} + func (s *testStateChangeSuite) TestWriteOnlyForDropColumn(c *C) { _, err := s.se.Execute(context.Background(), "use test_db_state") c.Assert(err, IsNil) @@ -567,6 +597,26 @@ func (s *testStateChangeSuite) TestWriteOnlyForDropColumn(c *C) { s.runTestInSchemaState(c, model.StateWriteOnly, false, dropColumnSQL, sqls, query) } +func (s *testStateChangeSuite) TestWriteOnlyForDropColumns(c *C) { + _, err := s.se.Execute(context.Background(), "use test_db_state") + c.Assert(err, IsNil) + _, err = s.se.Execute(context.Background(), `create table t_drop_columns (c1 int, c4 int)`) + c.Assert(err, IsNil) + _, err = s.se.Execute(context.Background(), "insert into t_drop_columns (c1, c4) values(8, 8)") + c.Assert(err, IsNil) + defer s.se.Execute(context.Background(), "drop table t_drop_columns") + + sqls := make([]sqlWithErr, 2) + sqls[0] = sqlWithErr{"update t set c1='5', c3='2020-03-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")} + sqls[1] = sqlWithErr{"update t t1, t_drop_columns t2 set t1.c1='5', t1.c3='2020-03-01', t2.c1='10' where t1.c4=t2.c4", + errors.New("[planner:1054]Unknown column 'c3' in 'field list'")} + // TODO: Fix the case of sqls[2]. + // sqls[2] = sqlWithErr{"update t set c1='5' where c3='2017-07-01';", errors.New("[planner:1054]Unknown column 'c3' in 'field list'")} + dropColumnsSQL := "alter table t drop column c3, drop column c1" + query := &expectQuery{sql: "select * from t;", rows: []string{"N 8"}} + s.runTestInSchemaState(c, model.StateWriteOnly, false, dropColumnsSQL, sqls, query) +} + func (s *testStateChangeSuiteBase) runTestInSchemaState(c *C, state model.SchemaState, isOnJobUpdated bool, alterTableSQL string, sqlWithErrs []sqlWithErr, expectQuery *expectQuery) { _, err := s.se.Execute(context.Background(), `create table t ( @@ -847,6 +897,15 @@ func (s *testStateChangeSuite) TestParallelDropColumn(c *C) { s.testControlParallelExecSQL(c, sql, sql, f) } +// func (s *testStateChangeSuite) TestParallelDropColumns(c *C) { +// sql := "ALTER TABLE t drop COLUMN c, drop COLUMN d;" +// f := func(c *C, err1, err2 error) { +// c.Assert(err1, IsNil) +// c.Assert(err2.Error(), Equals, "[ddl:1091]column c doesn't exist") +// } +// s.testControlParallelExecSQL(c, sql, sql, f) +// } + func (s *testStateChangeSuite) TestParallelDropIndex(c *C) { sql1 := "alter table t drop index idx1 ;" sql2 := "alter table t drop index idx2 ;" @@ -1084,6 +1143,9 @@ func (s *testStateChangeSuite) TestDDLIfNotExists(c *C) { // ADD COLUMN s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists b int") + // ADD COLUMNS + // s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists (c11 int, d11 int)") + // ADD INDEX s.testParallelExecSQL(c, "alter table test_not_exists add index if not exists idx_b (b)") @@ -1100,6 +1162,9 @@ func (s *testStateChangeSuite) TestDDLIfExists(c *C) { _, err := s.se.Execute(context.Background(), "create table if not exists test_exists (a int key, b int)") c.Assert(err, IsNil) + // DROP COLUMNS + // s.testParallelExecSQL(c, "alter table test_exists drop column if exists c11, drop column if exists d11") + // DROP COLUMN s.testParallelExecSQL(c, "alter table test_exists drop column if exists b") // only `a` exists now @@ -1234,6 +1299,18 @@ func (s *testStateChangeSuite) TestParallelTruncateTableAndAddColumn(c *C) { s.testControlParallelExecSQL(c, sql1, sql2, f) } +// TestParallelTruncateTableAndAddColumns tests add columns when truncate table. +func (s *testStateChangeSuite) TestParallelTruncateTableAndAddColumns(c *C) { + sql1 := "truncate table t" + sql2 := "alter table t add column c3 int, add column c4 int" + f := func(c *C, err1, err2 error) { + c.Assert(err1, IsNil) + c.Assert(err2, NotNil) + c.Assert(err2.Error(), Equals, "[domain:8028]Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`. [try again later]") + } + s.testControlParallelExecSQL(c, sql1, sql2, f) +} + // TestParallelFlashbackTable tests parallel flashback table. func (s *serialTestStateChangeSuite) TestParallelFlashbackTable(c *C) { c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil) From 2783bb0f2cbea3688751bb09fbbde9ea23f20991 Mon Sep 17 00:00:00 2001 From: gauss Date: Fri, 3 Apr 2020 09:33:53 +0800 Subject: [PATCH 32/36] fix TestParallelDropColumns --- ddl/db_change_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go index 5f2d0798558d5..8dc58d9ae17fb 100644 --- a/ddl/db_change_test.go +++ b/ddl/db_change_test.go @@ -897,14 +897,14 @@ func (s *testStateChangeSuite) TestParallelDropColumn(c *C) { s.testControlParallelExecSQL(c, sql, sql, f) } -// func (s *testStateChangeSuite) TestParallelDropColumns(c *C) { -// sql := "ALTER TABLE t drop COLUMN c, drop COLUMN d;" -// f := func(c *C, err1, err2 error) { -// c.Assert(err1, IsNil) -// c.Assert(err2.Error(), Equals, "[ddl:1091]column c doesn't exist") -// } -// s.testControlParallelExecSQL(c, sql, sql, f) -// } +func (s *testStateChangeSuite) TestParallelDropColumns(c *C) { + sql := "ALTER TABLE t drop COLUMN b, drop COLUMN c;" + f := func(c *C, err1, err2 error) { + c.Assert(err1, IsNil) + c.Assert(err2.Error(), Equals, "[ddl:1091]column b doesn't exist") + } + s.testControlParallelExecSQL(c, sql, sql, f) +} func (s *testStateChangeSuite) TestParallelDropIndex(c *C) { sql1 := "alter table t drop index idx1 ;" From 51b5cea2215d9b24784e52927568a13b67432c9b Mon Sep 17 00:00:00 2001 From: gauss Date: Tue, 7 Apr 2020 12:03:16 +0800 Subject: [PATCH 33/36] fix db_change_test.go --- ddl/column.go | 94 +++++++++++++++++++++++++++++------------- ddl/column_test.go | 6 ++- ddl/db_change_test.go | 13 +++++- ddl/ddl.go | 5 +++ ddl/ddl_api.go | 10 +++-- ddl/ddl_worker_test.go | 5 ++- ddl/rollingback.go | 13 +++--- 7 files changed, 102 insertions(+), 44 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index eb317e40f0b3d..22abc7a08c545 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -233,50 +233,48 @@ func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) return ver, errors.Trace(err) } -func checkAddAndCreateColumnInfos(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, []*ast.ColumnPosition, []int, error) { +func checkAddColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, []*model.ColumnInfo, []*ast.ColumnPosition, []int, []bool, error) { schemaID := job.SchemaID tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { - return nil, nil, nil, nil, errors.Trace(err) + return nil, nil, nil, nil, nil, nil, errors.Trace(err) } columns := []*model.ColumnInfo{} positions := []*ast.ColumnPosition{} offsets := []int{} - err = job.DecodeArgs(&columns, &positions, &offsets) + ifNotExists := []bool{} + err = job.DecodeArgs(&columns, &positions, &offsets, &ifNotExists) if err != nil { job.State = model.JobStateCancelled - return nil, nil, nil, nil, errors.Trace(err) + return nil, nil, nil, nil, nil, nil, errors.Trace(err) } - columnInfos := make([]*model.ColumnInfo, len(columns)) + columnInfos := make([]*model.ColumnInfo, 0, len(columns)) + newColumns := make([]*model.ColumnInfo, 0, len(columns)) + newPositions := make([]*ast.ColumnPosition, 0, len(columns)) + newOffsets := make([]int, 0, len(columns)) + newIfNotExists := make([]bool, 0, len(columns)) for i, col := range columns { columnInfo := model.FindColumnInfo(tblInfo.Columns, col.Name.L) if columnInfo != nil { - columnInfos[i] = columnInfo if columnInfo.State == model.StatePublic { // We already have a column with the same column name. + if ifNotExists[i] { + // TODO: Should return a warning. + logutil.BgLogger().Warn(fmt.Sprintf("Duplicate column name %s", col.Name)) + continue + } job.State = model.JobStateCancelled - return nil, nil, nil, nil, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name) - } - } else { - columnInfo, pos, offset, err := createColumnInfo(tblInfo, col, positions[i]) - if err != nil { - job.State = model.JobStateCancelled - return nil, nil, nil, nil, errors.Trace(err) - } - logutil.BgLogger().Info("[ddl] run add columns job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offsets[i])) - positions[i] = pos - // Set offset arg to job. - offsets[i] = offset - if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil { - job.State = model.JobStateCancelled - return nil, nil, nil, nil, errors.Trace(err) + return nil, nil, nil, nil, nil, nil, infoschema.ErrColumnExists.GenWithStackByArgs(col.Name) } - columnInfos[i] = columnInfo + columnInfos = append(columnInfos, columnInfo) } + newColumns = append(newColumns, columns[i]) + newPositions = append(newPositions, positions[i]) + newOffsets = append(newOffsets, offsets[i]) + newIfNotExists = append(newIfNotExists, ifNotExists[i]) } - job.Args = []interface{}{columnInfos, positions, offsets} - return tblInfo, columnInfos, positions, offsets, nil + return tblInfo, columnInfos, newColumns, newPositions, newOffsets, newIfNotExists, nil } func setColumnsState(columnInfos []*model.ColumnInfo, state model.SchemaState) { @@ -301,10 +299,33 @@ func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error } }) - tblInfo, columnInfos, positions, offsets, err := checkAddAndCreateColumnInfos(t, job) + tblInfo, columnInfos, columns, positions, offsets, ifNotExists, err := checkAddColumns(t, job) if err != nil { return ver, errors.Trace(err) } + if len(columnInfos) == 0 { + if len(columns) == 0 { + job.State = model.JobStateCancelled + return ver, nil + } + for i := range columns { + columnInfo, pos, offset, err := createColumnInfo(tblInfo, columns[i], positions[i]) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + logutil.BgLogger().Info("[ddl] run add columns job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offset)) + positions[i] = pos + // Set offset arg to job. + offsets[i] = offset + if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + columnInfos = append(columnInfos, columnInfo) + } + job.Args = []interface{}{columnInfos, positions, offsets, ifNotExists} + } originalState := columnInfos[0].State switch columnInfos[0].State { @@ -365,6 +386,10 @@ func onDropColumns(t *meta.Meta, job *model.Job) (ver int64, _ error) { if err != nil { return ver, errors.Trace(err) } + if len(colInfos) == 0 { + job.State = model.JobStateCancelled + return ver, nil + } originalState := colInfos[0].State switch colInfos[0].State { @@ -419,16 +444,24 @@ func checkDropColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model. } var colNames []model.CIStr - err = job.DecodeArgs(&colNames) + var ifExists []bool + err = job.DecodeArgs(&colNames, &ifExists) if err != nil { job.State = model.JobStateCancelled return nil, nil, 0, errors.Trace(err) } - colInfos := make([]*model.ColumnInfo, len(colNames)) + newColNames := make([]model.CIStr, 0, len(colNames)) + colInfos := make([]*model.ColumnInfo, 0, len(colNames)) + newIfExists := make([]bool, 0, len(colNames)) for i, colName := range colNames { colInfo := model.FindColumnInfo(tblInfo.Columns, colName.L) if colInfo == nil || colInfo.Hidden { + if ifExists[i] { + // TODO: Should return a warning. + logutil.BgLogger().Warn(fmt.Sprintf("column %s doesn't exist", colName)) + continue + } job.State = model.JobStateCancelled return nil, nil, 0, ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", colName) } @@ -436,9 +469,12 @@ func checkDropColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model. job.State = model.JobStateCancelled return nil, nil, 0, errors.Trace(err) } - colInfos[i] = colInfo + newColNames = append(newColNames, colName) + newIfExists = append(newIfExists, ifExists[i]) + colInfos = append(colInfos, colInfo) } - return tblInfo, colInfos, len(colNames), nil + job.Args = []interface{}{newColNames, newIfExists} + return tblInfo, colInfos, len(colInfos), nil } func checkDropColumnForStatePublic(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) (err error) { diff --git a/ddl/column_test.go b/ddl/column_test.go index 52d0646cfa039..c8962d6087108 100644 --- a/ddl/column_test.go +++ b/ddl/column_test.go @@ -94,6 +94,7 @@ func buildCreateColumnsJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNa positions []*ast.ColumnPosition, defaultValue interface{}) *model.Job { colInfos := make([]*model.ColumnInfo, len(colNames)) offsets := make([]int, len(colNames)) + ifNotExists := make([]bool, len(colNames)) for i, colName := range colNames { col := &model.ColumnInfo{ Name: model.NewCIStr(colName), @@ -111,7 +112,7 @@ func buildCreateColumnsJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNa TableID: tblInfo.ID, Type: model.ActionAddColumns, BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{colInfos, positions, offsets}, + Args: []interface{}{colInfos, positions, offsets, ifNotExists}, } return job } @@ -151,6 +152,7 @@ func testDropColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, func buildDropColumnsJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string) *model.Job { columnNames := make([]model.CIStr, len(colNames)) + ifExists := make([]bool, len(colNames)) for i, colName := range colNames { columnNames[i] = model.NewCIStr(colName) } @@ -159,7 +161,7 @@ func buildDropColumnsJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName TableID: tblInfo.ID, Type: model.ActionDropColumns, BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{columnNames}, + Args: []interface{}{columnNames, ifExists}, } return job } diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go index 8dc58d9ae17fb..7e12acd4c3ed5 100644 --- a/ddl/db_change_test.go +++ b/ddl/db_change_test.go @@ -906,6 +906,15 @@ func (s *testStateChangeSuite) TestParallelDropColumns(c *C) { s.testControlParallelExecSQL(c, sql, sql, f) } +func (s *testStateChangeSuite) TestParallelDropIfExistsColumns(c *C) { + sql := "ALTER TABLE t drop COLUMN if exists b, drop COLUMN if exists c;" + f := func(c *C, err1, err2 error) { + c.Assert(err1, IsNil) + c.Assert(err2, IsNil) + } + s.testControlParallelExecSQL(c, sql, sql, f) +} + func (s *testStateChangeSuite) TestParallelDropIndex(c *C) { sql1 := "alter table t drop index idx1 ;" sql2 := "alter table t drop index idx2 ;" @@ -1144,7 +1153,7 @@ func (s *testStateChangeSuite) TestDDLIfNotExists(c *C) { s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists b int") // ADD COLUMNS - // s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists (c11 int, d11 int)") + s.testParallelExecSQL(c, "alter table test_not_exists add column if not exists (c11 int, d11 int)") // ADD INDEX s.testParallelExecSQL(c, "alter table test_not_exists add index if not exists idx_b (b)") @@ -1163,7 +1172,7 @@ func (s *testStateChangeSuite) TestDDLIfExists(c *C) { c.Assert(err, IsNil) // DROP COLUMNS - // s.testParallelExecSQL(c, "alter table test_exists drop column if exists c11, drop column if exists d11") + s.testParallelExecSQL(c, "alter table test_exists drop column if exists c, drop column if exists d") // DROP COLUMN s.testParallelExecSQL(c, "alter table test_exists drop column if exists b") // only `a` exists now diff --git a/ddl/ddl.go b/ddl/ddl.go index 31b55af1033fc..f95653b007953 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -493,6 +493,11 @@ func (d *ddl) doDDLJob(ctx sessionctx.Context, job *model.Job) error { if historyJob.Error != nil { return errors.Trace(historyJob.Error) } + // Only for JobStateCancelled job which is adding columns or drop columns. + if historyJob.IsCancelled() && (historyJob.Type == model.ActionAddColumns || historyJob.Type == model.ActionDropColumns) { + logutil.BgLogger().Info("[ddl] DDL job is cancelled", zap.Int64("jobID", jobID)) + return nil + } panic("When the state is JobStateRollbackDone or JobStateCancelled, historyJob.Error should never be nil") } } diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index bb23625792213..38c12a8fc611f 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2363,6 +2363,7 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte columns := make([]*table.Column, 0, len(addingColumnNames)) positions := make([]*ast.ColumnPosition, 0, len(addingColumnNames)) offsets := make([]int, 0, len(addingColumnNames)) + ifNotExists := make([]bool, 0, len(addingColumnNames)) newColumnsCount := 0 // Check the columns one by one. for _, spec := range specs { @@ -2383,6 +2384,7 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte columns = append(columns, col) positions = append(positions, spec.Position) offsets = append(offsets, 0) + ifNotExists = append(ifNotExists, spec.IfNotExists) newColumnsCount++ } } @@ -2399,7 +2401,7 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte SchemaName: schema.Name.L, Type: model.ActionAddColumns, BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{columns, positions, offsets}, + Args: []interface{}{columns, positions, offsets, ifNotExists}, } err = d.doDDLJob(ctx, job) @@ -2648,7 +2650,8 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt } } - var colNames []model.CIStr + ifExists := make([]bool, 0, len(specs)) + colNames := make([]model.CIStr, 0, len(specs)) for _, spec := range specs { if spec.IfExists && dupColumnNames[spec.OldColumnName.Name.L] { err = ErrCantDropFieldOrKey.GenWithStack("column %s doesn't exist", spec.OldColumnName.Name.L) @@ -2664,6 +2667,7 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt continue } colNames = append(colNames, spec.OldColumnName.Name) + ifExists = append(ifExists, spec.IfExists) } if len(colNames) == 0 { return nil @@ -2679,7 +2683,7 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt SchemaName: schema.Name.L, Type: model.ActionDropColumns, BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{colNames}, + Args: []interface{}{colNames, ifExists}, } err = d.doDDLJob(ctx, job) diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index 721314412253f..a0367382a0e16 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -319,7 +319,7 @@ func (s *testDDLSuite) TestColumnError(c *C) { doDDLJobErr(c, -1, tblInfo.ID, model.ActionDropColumns, []interface{}{col, pos, 0}, ctx, d) doDDLJobErr(c, dbInfo.ID, -1, model.ActionDropColumns, []interface{}{col, pos, 0}, ctx, d) doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionDropColumns, []interface{}{0}, ctx, d) - doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionDropColumns, []interface{}{[]model.CIStr{model.NewCIStr("c5"), model.NewCIStr("c6")}}, ctx, d) + doDDLJobErr(c, dbInfo.ID, tblInfo.ID, model.ActionDropColumns, []interface{}{[]model.CIStr{model.NewCIStr("c5"), model.NewCIStr("c6")}, make([]bool, 2)}, ctx, d) } func testCheckOwner(c *C, d *ddl, expectedVal bool) { @@ -894,8 +894,9 @@ func (s *testDDLSuite) TestCancelJob(c *C) { for i := range positions { positions[i] = &ast.ColumnPosition{Tp: ast.ColumnPositionNone} } + ifNotExists := make([]bool, len(cols)) - addColumnArgs = []interface{}{cols, positions, offsets} + addColumnArgs = []interface{}{cols, positions, offsets, ifNotExists} doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo.ID, model.ActionAddColumns, addColumnArgs, &cancelState) c.Check(errors.ErrorStack(checkErr), Equals, "") s.checkAddColumns(c, d, dbInfo.ID, tblInfo.ID, addingColNames, false) diff --git a/ddl/rollingback.go b/ddl/rollingback.go index 63c6cfaa90994..2f9419ad59499 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -128,24 +128,25 @@ func rollingbackAddColumn(t *meta.Meta, job *model.Job) (ver int64, err error) { func rollingbackAddColumns(t *meta.Meta, job *model.Job) (ver int64, err error) { job.State = model.JobStateRollingback - tblInfo, columnInfos, _, _, err := checkAddAndCreateColumnInfos(t, job) + tblInfo, columnInfos, _, _, _, _, err := checkAddColumns(t, job) if err != nil { return ver, errors.Trace(err) } + if len(columnInfos) == 0 { + job.State = model.JobStateCancelled + return ver, errCancelledDDLJob + } colNames := make([]model.CIStr, len(columnInfos)) originalState := columnInfos[0].State for i, columnInfo := range columnInfos { - if columnInfo == nil { - job.State = model.JobStateCancelled - return ver, errCancelledDDLJob - } columnInfos[i].State = model.StateDeleteOnly colNames[i] = columnInfo.Name } + ifExists := make([]bool, len(columnInfos)) job.SchemaState = model.StateDeleteOnly - job.Args = []interface{}{colNames} + job.Args = []interface{}{colNames, ifExists} ver, err = updateVersionAndTableInfo(t, job, tblInfo, originalState != columnInfos[0].State) if err != nil { return ver, errors.Trace(err) From a2852000b0ca68459bffaaa2554c589ad983f45a Mon Sep 17 00:00:00 2001 From: gauss Date: Tue, 7 Apr 2020 12:21:32 +0800 Subject: [PATCH 34/36] fix conflict --- ddl/db_change_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go index 28c4a40154348..98c911b69ade4 100644 --- a/ddl/db_change_test.go +++ b/ddl/db_change_test.go @@ -901,7 +901,7 @@ func (s *testStateChangeSuite) TestParallelDropColumns(c *C) { sql := "ALTER TABLE t drop COLUMN b, drop COLUMN c;" f := func(c *C, err1, err2 error) { c.Assert(err1, IsNil) - c.Assert(err2.Error(), Equals, "[ddl:1091]column b doesn't exist") + c.Assert(err2.Error(), Equals, "[ddl:1091]current error msg: Cancelled DDL job, original error msg: column b doesn't exist") } s.testControlParallelExecSQL(c, sql, sql, f) } From 165de08fa25beaa478251ca1d1c7ef07cbcddb32 Mon Sep 17 00:00:00 2001 From: gauss1314 Date: Wed, 8 Apr 2020 11:13:12 +0800 Subject: [PATCH 35/36] Update ddl/column.go Co-Authored-By: Lynn --- ddl/column.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ddl/column.go b/ddl/column.go index 889ba8cee84ce..f31d99a9bcb26 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -261,7 +261,7 @@ func checkAddColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.C // We already have a column with the same column name. if ifNotExists[i] { // TODO: Should return a warning. - logutil.BgLogger().Warn(fmt.Sprintf("Duplicate column name %s", col.Name)) + logutil.BgLogger().Warn("[ddl] check add columns, duplicate column", zap.Stringer("col", col.Name)) continue } job.State = model.JobStateCancelled From 72b8867dd78563f3ceb3dcd45f063ca6ebe613a1 Mon Sep 17 00:00:00 2001 From: gauss Date: Wed, 8 Apr 2020 11:23:20 +0800 Subject: [PATCH 36/36] update comments --- ddl/column.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ddl/column.go b/ddl/column.go index f31d99a9bcb26..c97e3fa1a0dc9 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -316,7 +316,6 @@ func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error } logutil.BgLogger().Info("[ddl] run add columns job", zap.String("job", job.String()), zap.Reflect("columnInfo", *columnInfo), zap.Int("offset", offset)) positions[i] = pos - // Set offset arg to job. offsets[i] = offset if err = checkAddColumnTooManyColumns(len(tblInfo.Columns)); err != nil { job.State = model.JobStateCancelled @@ -324,6 +323,7 @@ func onAddColumns(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error } columnInfos = append(columnInfos, columnInfo) } + // Set arg to job. job.Args = []interface{}{columnInfos, positions, offsets, ifNotExists} }