Skip to content

Commit

Permalink
Merge branch 'master' into autoid-test
Browse files Browse the repository at this point in the history
  • Loading branch information
tiancaiamao committed Nov 4, 2022
2 parents 3546191 + 2f03a8d commit 3b281c3
Show file tree
Hide file tree
Showing 89 changed files with 1,308 additions and 494 deletions.
3 changes: 3 additions & 0 deletions .github/workflows/integration-test-br-compatibility.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ concurrency:
group: ${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true

permissions:
contents: read # to fetch code (actions/checkout)

jobs:
check:
runs-on: ubuntu-latest
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/integration-test-compile-br.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@ concurrency:
group: ${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true

permissions:
contents: read # to fetch code (actions/checkout)

jobs:
compile-windows:
if: github.event_name == 'push' || github.event_name == 'pull_request' && github.event.label.name == 'action/run-br-cross-platform-build'
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/integration-test-dumpling.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true

permissions:
contents: read # to fetch code (actions/checkout)

jobs:
integration-test:
strategy:
Expand Down
7 changes: 7 additions & 0 deletions .github/workflows/misc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,15 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true

permissions:
contents: read # to fetch code (actions/checkout)

jobs:
check:
permissions:
contents: read # to fetch code (actions/checkout)
pull-requests: write # to comment on pull-requests

runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
Expand Down
1 change: 1 addition & 0 deletions cmd/explaintest/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -724,6 +724,7 @@ func main() {
"set @@tidb_window_concurrency=4",
"set @@tidb_projection_concurrency=4",
"set @@tidb_distsql_scan_concurrency=15",
"set @@tidb_enable_clustered_index='int_only';",
"set @@global.tidb_enable_clustered_index=0;",
"set @@global.tidb_mem_quota_query=34359738368",
"set @@tidb_mem_quota_query=34359738368",
Expand Down
6 changes: 6 additions & 0 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,10 @@ type Config struct {
Plugin Plugin `toml:"plugin" json:"plugin"`
MaxServerConnections uint32 `toml:"max-server-connections" json:"max-server-connections"`
RunDDL bool `toml:"run-ddl" json:"run-ddl"`
// TiDBMaxReuseChunk indicates max cached chunk num
TiDBMaxReuseChunk uint32 `toml:"tidb-max-reuse-chunk" json:"tidb-max-reuse-chunk"`
// TiDBMaxReuseColumn indicates max cached column num
TiDBMaxReuseColumn uint32 `toml:"tidb-max-reuse-column" json:"tidb-max-reuse-column"`
}

// UpdateTempStoragePath is to update the `TempStoragePath` if port/statusPort was changed
Expand Down Expand Up @@ -975,6 +979,8 @@ var defaultConf = Config{
NewCollationsEnabledOnFirstBootstrap: true,
EnableGlobalKill: true,
TrxSummary: DefaultTrxSummary(),
TiDBMaxReuseChunk: 64,
TiDBMaxReuseColumn: 256,
}

var (
Expand Down
4 changes: 4 additions & 0 deletions config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -730,6 +730,8 @@ enable-enum-length-limit = false
stores-refresh-interval = 30
enable-forwarding = true
enable-global-kill = true
tidb-max-reuse-chunk = 10
tidb-max-reuse-column = 20
[performance]
txn-total-size-limit=2000
tcp-no-delay = false
Expand Down Expand Up @@ -798,6 +800,8 @@ max_connections = 200
require.True(t, conf.RepairMode)
require.Equal(t, uint64(16), conf.TiKVClient.ResolveLockLiteThreshold)
require.Equal(t, uint32(200), conf.Instance.MaxConnections)
require.Equal(t, uint32(10), conf.TiDBMaxReuseChunk)
require.Equal(t, uint32(20), conf.TiDBMaxReuseColumn)
require.Equal(t, []string{"tiflash"}, conf.IsolationRead.Engines)
require.Equal(t, 3080, conf.MaxIndexLength)
require.Equal(t, 70, conf.IndexLimit)
Expand Down
82 changes: 80 additions & 2 deletions ddl/db_partition_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4068,7 +4068,7 @@ func TestCreateAndAlterIntervalPartition(t *testing.T) {

tk.MustQuery("select count(*) from ipt").Check(testkit.Rows("27"))

tk.MustExec("create table idpt (id date primary key, val varchar(255), key (val)) partition by range COLUMNS (id) INTERVAL (1 week) FIRST PARTITION LESS THAN ('2022-02-01') LAST PARTITION LESS THAN ('2022-03-29') NULL PARTITION MAXVALUE PARTITION")
tk.MustExec("create table idpt (id date primary key nonclustered, val varchar(255), key (val)) partition by range COLUMNS (id) INTERVAL (1 week) FIRST PARTITION LESS THAN ('2022-02-01') LAST PARTITION LESS THAN ('2022-03-29') NULL PARTITION MAXVALUE PARTITION")
tk.MustQuery("SHOW CREATE TABLE idpt").Check(testkit.Rows(
"idpt CREATE TABLE `idpt` (\n" +
" `id` date NOT NULL,\n" +
Expand All @@ -4094,7 +4094,7 @@ func TestCreateAndAlterIntervalPartition(t *testing.T) {
// if using a month with 31 days.
// But managing partitions with the day-part of 29, 30 or 31 will be troublesome, since once the FIRST is not 31
// both the ALTER TABLE t FIRST PARTITION and MERGE FIRST PARTITION will have issues
tk.MustExec("create table t (id date primary key, val varchar(255), key (val)) partition by range COLUMNS (id) INTERVAL (1 MONTH) FIRST PARTITION LESS THAN ('2022-01-31') LAST PARTITION LESS THAN ('2022-05-31')")
tk.MustExec("create table t (id date primary key nonclustered, val varchar(255), key (val)) partition by range COLUMNS (id) INTERVAL (1 MONTH) FIRST PARTITION LESS THAN ('2022-01-31') LAST PARTITION LESS THAN ('2022-05-31')")
tk.MustQuery("show create table t").Check(testkit.Rows(
"t CREATE TABLE `t` (\n" +
" `id` date NOT NULL,\n" +
Expand Down Expand Up @@ -4662,4 +4662,82 @@ func TestAlterModifyColumnOnPartitionedTable(t *testing.T) {
"34 34💥",
"46 46",
"57 57"))
tk.MustGetErrCode(`alter table t modify a varchar(20)`, errno.ErrUnsupportedDDLOperation)
}

func TestAlterModifyColumnOnPartitionedTableFail(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
schemaName := "modColPartFail"
tk.MustExec("create database " + schemaName)
tk.MustExec("use " + schemaName)
tk.MustExec(`create table t (a int unsigned, b varchar(255), key (b)) partition by range (a) (partition p0 values less than (10), partition p1 values less than (20), partition pMax values less than (MAXVALUE))`)
tk.MustExec(`insert into t values (7, "07"), (8, "08"),(23,"23"),(34,"34💥"),(46,"46"),(57,"57")`)
tk.MustGetErrCode(`alter table t modify a varchar(255)`, errno.ErrUnsupportedDDLOperation)
tk.MustGetErrCode(`alter table t modify a float`, mysql.ErrFieldTypeNotAllowedAsPartitionField)
tk.MustExec(`drop table t`)
tk.MustExec(`create table t (b int unsigned, a varchar(255), key (b)) partition by range columns (a) (partition p0 values less than (""), partition p1 values less than ("11111"), partition pMax values less than (MAXVALUE))`)
tk.MustExec(`insert into t values (7, "07"), (8, "08"),(23,"23"),(34,"34 💥💥Longer than 11111"),(46,"46"),(57,"57")`)
tk.MustExec(`alter table t modify a varchar(50)`)
tk.MustGetErrCode(`alter table t modify a float`, mysql.ErrFieldTypeNotAllowedAsPartitionField)
tk.MustGetErrCode(`alter table t modify a int`, errno.ErrUnsupportedDDLOperation)
tk.MustContainErrMsg(`alter table t modify a varchar(4)`, "[ddl:8200]New column does not match partition definitions: [ddl:1654]Partition column values of incorrect type")
tk.MustGetErrCode(`alter table t modify a varchar(5)`, errno.WarnDataTruncated)
tk.MustExec(`SET SQL_MODE = ''`)
tk.MustExec(`alter table t modify a varchar(5)`)
// fix https://github.com/pingcap/tidb/issues/38669 and update this
tk.MustQuery(`show warnings`).Check(testkit.Rows())
tk.MustExec(`SET SQL_MODE = DEFAULT`)
tk.MustQuery(`select * from t`).Sort().Check(testkit.Rows(""+
"23 23",
"34 34 💥💥",
"46 46",
"57 57",
"7 07",
"8 08"))
tStr := "" +
"CREATE TABLE `t` (\n" +
" `b` int(10) unsigned DEFAULT NULL,\n" +
" `a` varchar(5) DEFAULT NULL,\n" +
" KEY `b` (`b`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" +
"PARTITION BY RANGE COLUMNS(`a`)\n" +
"(PARTITION `p0` VALUES LESS THAN (''),\n" +
" PARTITION `p1` VALUES LESS THAN ('11111'),\n" +
" PARTITION `pMax` VALUES LESS THAN (MAXVALUE))"
tk.MustQuery(`show create table t`).Check(testkit.Rows("t " + tStr))
tk.MustExec(`drop table t`)
tk.MustExec(tStr)
tk.MustExec(`drop table t`)
tk.MustExec("create table t (a int, b varchar(255), key (b)) partition by range (a) (partition `p-300` values less than (-300), partition p0 values less than (0), partition p300 values less than (300))")
tk.MustExec(`insert into t values (-400, "-400"), (-100, "-100"), (0, "0"), (100, "100"), (290, "290")`)
tk.MustContainErrMsg(`alter table t modify a int unsigned`, "[ddl:8200]Unsupported modify column, decreasing length of int may result in truncation and change of partition")
tk.MustContainErrMsg(`alter table t modify a tinyint`, "[ddl:8200]Unsupported modify column, decreasing length of int may result in truncation and change of partition")
tk.MustExec(`set sql_mode = ''`)
tk.MustContainErrMsg(`alter table t modify a tinyint`, "[ddl:8200]Unsupported modify column, decreasing length of int may result in truncation and change of partition")
tk.MustQuery("select * from t partition (`p-300`)").Sort().Check(testkit.Rows("-400 -400"))
tk.MustExec(`set sql_mode = default`)
tk.MustContainErrMsg(`alter table t modify a smallint`, "[ddl:8200]Unsupported modify column, decreasing length of int may result in truncation and change of partition")
tk.MustExec(`alter table t modify a bigint`)
tk.MustExec(`drop table t`)
tk.MustExec("create table t (a int, b varchar(255), key (b)) partition by range columns (a) (partition `p-300` values less than (-300), partition p0 values less than (0), partition p300 values less than (300))")
tk.MustExec(`insert into t values (-400, "-400"), (-100, "-100"), (0, "0"), (100, "100"), (290, "290")`)
tk.MustContainErrMsg(`alter table t modify a int unsigned`, "[ddl:8200]Unsupported modify column: can't change the partitioning column, since it would require reorganize all partitions")
tk.MustContainErrMsg(`alter table t modify a tinyint`, "[ddl:8200]New column does not match partition definitions: [ddl:1654]Partition column values of incorrect type")
tk.MustExec(`set sql_mode = ''`)
tk.MustContainErrMsg(`alter table t modify a tinyint`, "[ddl:8200]New column does not match partition definitions: [ddl:1654]Partition column values of incorrect type")
tk.MustQuery("select * from t partition (`p-300`)").Sort().Check(testkit.Rows("-400 -400"))
tk.MustExec(`set sql_mode = default`)
// OK to decrease, since with RANGE COLUMNS, it will check the partition definition values against the new type
tk.MustExec(`alter table t modify a smallint`)
tk.MustExec(`alter table t modify a bigint`)

tk.MustExec(`drop table t`)

tk.MustExec(`create table t (a int, b varchar(255), key (b)) partition by list columns (b) (partition p1 values in ("1", "ab", "12345"), partition p2 values in ("2", "abc", "999999"))`)
tk.MustExec(`insert into t values (1, "1"), (2, "2"), (999999, "999999")`)
tk.MustContainErrMsg(`alter table t modify column b varchar(5)`, "[ddl:8200]New column does not match partition definitions: [ddl:1654]Partition column values of incorrect type")
tk.MustExec(`set sql_mode = ''`)
tk.MustContainErrMsg(`alter table t modify column b varchar(5)`, "[ddl:8200]New column does not match partition definitions: [ddl:1654]Partition column values of incorrect type")
tk.MustExec(`set sql_mode = default`)
}
112 changes: 101 additions & 11 deletions ddl/ddl_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import (
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/charset"
"github.com/pingcap/tidb/parser/format"
Expand Down Expand Up @@ -2831,23 +2832,30 @@ func checkPartitionByList(ctx sessionctx.Context, tbInfo *model.TableInfo) error
return checkListPartitionValue(ctx, tbInfo)
}

func isColTypeAllowedAsPartitioningCol(fieldType types.FieldType) bool {
// The permitted data types are shown in the following list:
// All integer types
// DATE and DATETIME
// CHAR, VARCHAR, BINARY, and VARBINARY
// See https://dev.mysql.com/doc/mysql-partitioning-excerpt/5.7/en/partitioning-columns.html
// Note that also TIME is allowed in MySQL. Also see https://bugs.mysql.com/bug.php?id=84362
switch fieldType.GetType() {
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong:
case mysql.TypeDate, mysql.TypeDatetime, mysql.TypeDuration:
case mysql.TypeVarchar, mysql.TypeString:
default:
return false
}
return true
}

func checkColumnsPartitionType(tbInfo *model.TableInfo) error {
for _, col := range tbInfo.Partition.Columns {
colInfo := tbInfo.FindPublicColumnByName(col.L)
if colInfo == nil {
return errors.Trace(dbterror.ErrFieldNotFoundPart)
}
// The permitted data types are shown in the following list:
// All integer types
// DATE and DATETIME
// CHAR, VARCHAR, BINARY, and VARBINARY
// See https://dev.mysql.com/doc/mysql-partitioning-excerpt/5.7/en/partitioning-columns.html
// Note that also TIME is allowed in MySQL. Also see https://bugs.mysql.com/bug.php?id=84362
switch colInfo.FieldType.GetType() {
case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong:
case mysql.TypeDate, mysql.TypeDatetime, mysql.TypeDuration:
case mysql.TypeVarchar, mysql.TypeString:
default:
if !isColTypeAllowedAsPartitioningCol(colInfo.FieldType) {
return dbterror.ErrNotAllowedTypeInPartition.GenWithStackByArgs(col.O)
}
}
Expand Down Expand Up @@ -4604,6 +4612,88 @@ func GetModifiableColumnJob(
}
}

// Check that the column change does not affect the partitioning column
// It must keep the same type, int [unsigned], [var]char, date[time]
if t.Meta().Partition != nil {
pt, ok := t.(table.PartitionedTable)
if !ok {
// Should never happen!
return nil, dbterror.ErrNotAllowedTypeInPartition.GenWithStackByArgs(newCol.Name.O)
}
isPartitioningColumn := false
for _, name := range pt.GetPartitionColumnNames() {
if strings.EqualFold(name.L, col.Name.L) {
isPartitioningColumn = true
}
}
if isPartitioningColumn {
if !isColTypeAllowedAsPartitioningCol(newCol.FieldType) {
return nil, dbterror.ErrNotAllowedTypeInPartition.GenWithStackByArgs(newCol.Name.O)
}
pi := pt.Meta().GetPartitionInfo()
if len(pi.Columns) == 0 {
// non COLUMNS partitioning, only checks INTs, not their actual range
// There are many edge cases, like when truncating SQL Mode is allowed
// which will change the partitioning expression value resulting in a
// different partition. Better be safe and not allow decreasing of length.
// TODO: Should we allow it in strict mode? Wait for a use case / request.
if newCol.FieldType.GetFlen() < col.FieldType.GetFlen() {
return nil, dbterror.ErrUnsupportedModifyCollation.GenWithStack("Unsupported modify column, decreasing length of int may result in truncation and change of partition")
}
}
// Basically only allow changes of the length/decimals for the column
// Note that enum is not allowed, so elems are not checked
// TODO: support partition by ENUM
if newCol.FieldType.EvalType() != col.FieldType.EvalType() ||
newCol.FieldType.GetFlag() != col.FieldType.GetFlag() ||
newCol.FieldType.GetCollate() != col.FieldType.GetCollate() ||
newCol.FieldType.GetCharset() != col.FieldType.GetCharset() {
return nil, dbterror.ErrUnsupportedModifyColumn.GenWithStackByArgs("can't change the partitioning column, since it would require reorganize all partitions")
}
// Generate a new PartitionInfo and validate it together with the new column definition
// Checks if all partition definition values are compatible.
// Similar to what buildRangePartitionDefinitions would do in terms of checks.

tblInfo := pt.Meta()
newTblInfo := *tblInfo
// Replace col with newCol and see if we can generate a new SHOW CREATE TABLE
// and reparse it and build new partition definitions (which will do additional
// checks columns vs partition definition values
newCols := make([]*model.ColumnInfo, 0, len(newTblInfo.Columns))
for _, c := range newTblInfo.Columns {
if c.ID == col.ID {
newCols = append(newCols, newCol.ColumnInfo)
continue
}
newCols = append(newCols, c)
}
newTblInfo.Columns = newCols

var buf bytes.Buffer
AppendPartitionInfo(tblInfo.GetPartitionInfo(), &buf, mysql.ModeNone)
// The parser supports ALTER TABLE ... PARTITION BY ... even if the ddl code does not yet :)
// Ignoring warnings
stmt, _, err := parser.New().ParseSQL("ALTER TABLE t " + buf.String())
if err != nil {
// Should never happen!
return nil, dbterror.ErrUnsupportedModifyColumn.GenWithStack("cannot parse generated PartitionInfo")
}
at, ok := stmt[0].(*ast.AlterTableStmt)
if !ok || len(at.Specs) != 1 || at.Specs[0].Partition == nil {
return nil, dbterror.ErrUnsupportedModifyColumn.GenWithStack("cannot parse generated PartitionInfo")
}
pAst := at.Specs[0].Partition
sv := sctx.GetSessionVars().StmtCtx
oldTruncAsWarn, oldIgnoreTrunc := sv.TruncateAsWarning, sv.IgnoreTruncate
sv.TruncateAsWarning, sv.IgnoreTruncate = false, false
_, err = buildPartitionDefinitionsInfo(sctx, pAst.Definitions, &newTblInfo)
sv.TruncateAsWarning, sv.IgnoreTruncate = oldTruncAsWarn, oldIgnoreTrunc
if err != nil {
return nil, dbterror.ErrUnsupportedModifyColumn.GenWithStack("New column does not match partition definitions: %s", err.Error())
}
}
}

// We don't support modifying column from not_auto_increment to auto_increment.
if !mysql.HasAutoIncrementFlag(col.GetFlag()) && mysql.HasAutoIncrementFlag(newCol.GetFlag()) {
return nil, dbterror.ErrUnsupportedModifyColumn.GenWithStackByArgs("can't set auto_increment")
Expand Down
4 changes: 2 additions & 2 deletions ddl/index_modify_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -816,7 +816,7 @@ func TestDropIndexes(t *testing.T) {
store := testkit.CreateMockStoreWithSchemaLease(t, indexModifyLease, mockstore.WithDDLChecker())

// drop multiple indexes
createSQL := "create table test_drop_indexes (id int, c1 int, c2 int, primary key(id), key i1(c1), key i2(c2));"
createSQL := "create table test_drop_indexes (id int, c1 int, c2 int, primary key(id) nonclustered, key i1(c1), key i2(c2));"
dropIdxSQL := "alter table test_drop_indexes drop index i1, drop index i2;"
idxNames := []string{"i1", "i2"}
testDropIndexes(t, store, createSQL, dropIdxSQL, idxNames)
Expand All @@ -826,7 +826,7 @@ func TestDropIndexes(t *testing.T) {
idxNames = []string{"primary", "i1"}
testDropIndexes(t, store, createSQL, dropIdxSQL, idxNames)

createSQL = "create table test_drop_indexes (uuid varchar(32), c1 int, c2 int, primary key(uuid), unique key i1(c1), key i2(c2));"
createSQL = "create table test_drop_indexes (uuid varchar(32), c1 int, c2 int, primary key(uuid) nonclustered, unique key i1(c1), key i2(c2));"
dropIdxSQL = "alter table test_drop_indexes drop primary key, drop index i1, drop index i2;"
idxNames = []string{"primary", "i1", "i2"}
testDropIndexes(t, store, createSQL, dropIdxSQL, idxNames)
Expand Down
Loading

0 comments on commit 3b281c3

Please sign in to comment.