diff --git a/br/pkg/lightning/errormanager/errormanager.go b/br/pkg/lightning/errormanager/errormanager.go index 9c841bb810beb..07070bdbef915 100644 --- a/br/pkg/lightning/errormanager/errormanager.go +++ b/br/pkg/lightning/errormanager/errormanager.go @@ -121,8 +121,13 @@ const ( ` insertIntoConflictErrorData = ` +<<<<<<< HEAD:br/pkg/lightning/errormanager/errormanager.go INSERT INTO %s.` + ConflictErrorTableName + ` (task_id, table_name, index_name, key_data, row_data, raw_key, raw_value, raw_handle, raw_row) +======= + INSERT IGNORE INTO %s.` + ConflictErrorTableName + ` + (task_id, table_name, index_name, key_data, row_data, raw_key, raw_value, raw_handle, raw_row, kv_type) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)):pkg/lightning/errormanager/errormanager.go VALUES ` diff --git a/br/pkg/lightning/errormanager/errormanager_test.go b/br/pkg/lightning/errormanager/errormanager_test.go index 00ba702e55026..657217177fb1a 100644 --- a/br/pkg/lightning/errormanager/errormanager_test.go +++ b/br/pkg/lightning/errormanager/errormanager_test.go @@ -482,6 +482,7 @@ func TestReplaceConflictKeys(t *testing.T) { WillReturnResult(sqlmock.NewResult(1, 1)) mockDB.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_task_info`\\.conflict_error_v1.*"). WillReturnResult(sqlmock.NewResult(2, 1)) +<<<<<<< HEAD:br/pkg/lightning/errormanager/errormanager_test.go mockDB.ExpectQuery("\\QSELECT raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v1 WHERE table_name = ? AND index_name <> 'PRIMARY' ORDER BY raw_key\\E"). WillReturnRows(sqlmock.NewRows([]string{"raw_key", "index_name", "raw_value", "raw_handle"}). AddRow(data1IndexKey, "uni_b", data1IndexValue, data1RowKey). @@ -492,6 +493,42 @@ func TestReplaceConflictKeys(t *testing.T) { WillReturnRows(sqlmock.NewRows([]string{"raw_key", "raw_value", "raw_handle"}). AddRow(data1RowKey, data1RowValue, data1RowKey). AddRow(data1RowKey, data3RowValue, data1RowKey)) +======= + mockDB.ExpectExec("CREATE OR REPLACE VIEW `lightning_task_info`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"}). + AddRow(1, data1IndexKey, "uni_b", data1IndexValue, data1RowKey). + AddRow(2, data1IndexKey, "uni_b", data2IndexValue, data2RowKey). + AddRow(3, data3IndexKey, "uni_b", data3IndexValue, data3RowKey). + AddRow(4, data3IndexKey, "uni_b", data4IndexValue, data4RowKey)) + mockDB.ExpectBegin() + mockDB.ExpectExec("INSERT IGNORE INTO `lightning_task_info`\\.conflict_error_v3.*"). + WithArgs(0, "test", nil, nil, data2RowKey, data2RowValue, 2, + 0, "test", nil, nil, data4RowKey, data4RowValue, 2). + WillReturnResult(driver.ResultNoRows) + mockDB.ExpectCommit() + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"})) + } + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"}). + AddRow(1, data1RowKey, data1RowValue). + AddRow(2, data1RowKey, data3RowValue)) + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"})) + } + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 2)) + mockDB.ExpectCommit() + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mockDB.ExpectCommit() +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)):pkg/lightning/errormanager/errormanager_test.go cfg := config.NewConfig() cfg.TikvImporter.DuplicateResolution = config.DupeResAlgReplace diff --git a/pkg/ddl/reorg.go b/pkg/ddl/reorg.go index 1c8573d8e9d08..500e78e5af771 100644 --- a/pkg/ddl/reorg.go +++ b/pkg/ddl/reorg.go @@ -95,8 +95,169 @@ func newContext(store kv.Storage) sessionctx.Context { const defaultWaitReorgTimeout = 10 * time.Second +<<<<<<< HEAD // ReorgWaitTimeout is the timeout that wait ddl in write reorganization stage. var ReorgWaitTimeout = 5 * time.Second +======= + ctx := newReorgExprCtx() + evalCtx := ctx.GetStaticEvalCtx().Apply( + exprstatic.WithSQLMode(reorgMeta.SQLMode), + exprstatic.WithLocation(loc), + exprstatic.WithTypeFlags(reorgTypeFlagsWithSQLMode(reorgMeta.SQLMode)), + exprstatic.WithErrLevelMap(reorgErrLevelsWithSQLMode(reorgMeta.SQLMode)), + exprstatic.WithWarnHandler(warnHandler), + ) + return ctx.Apply(exprstatic.WithEvalCtx(evalCtx)), nil +} + +// reorgTableMutateContext implements table.MutateContext for reorganization. +type reorgTableMutateContext struct { + exprCtx exprctx.ExprContext + encodingConfig tblctx.RowEncodingConfig + mutateBuffers *tblctx.MutateBuffers + shardID *variable.RowIDShardGenerator + reservedRowIDAlloc stmtctx.ReservedRowIDAlloc +} + +// AlternativeAllocators implements table.MutateContext.AlternativeAllocators. +func (*reorgTableMutateContext) AlternativeAllocators(*model.TableInfo) (autoid.Allocators, bool) { + // No alternative allocators for all tables because temporary tables + // are not supported (temporary tables do not have any data in TiKV) in reorganization. + return autoid.Allocators{}, false +} + +// GetExprCtx implements table.MutateContext.GetExprCtx. +func (ctx *reorgTableMutateContext) GetExprCtx() exprctx.ExprContext { + return ctx.exprCtx +} + +// ConnectionID implements table.MutateContext.ConnectionID. +func (*reorgTableMutateContext) ConnectionID() uint64 { + return 0 +} + +// InRestrictedSQL implements table.MutateContext.InRestrictedSQL. +func (*reorgTableMutateContext) InRestrictedSQL() bool { + return false +} + +// TxnAssertionLevel implements table.MutateContext.TxnAssertionLevel. +func (*reorgTableMutateContext) TxnAssertionLevel() variable.AssertionLevel { + // Because only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method, + // we can just return `AssertionLevelOff`. + return variable.AssertionLevelOff +} + +// EnableMutationChecker implements table.MutateContext.EnableMutationChecker. +func (*reorgTableMutateContext) EnableMutationChecker() bool { + // Because only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method, + // we can just return false. + return false +} + +// GetRowEncodingConfig implements table.MutateContext.GetRowEncodingConfig. +func (ctx *reorgTableMutateContext) GetRowEncodingConfig() tblctx.RowEncodingConfig { + return ctx.encodingConfig +} + +// GetMutateBuffers implements table.MutateContext.GetMutateBuffers. +func (ctx *reorgTableMutateContext) GetMutateBuffers() *tblctx.MutateBuffers { + return ctx.mutateBuffers +} + +// GetRowIDShardGenerator implements table.MutateContext.GetRowIDShardGenerator. +func (ctx *reorgTableMutateContext) GetRowIDShardGenerator() *variable.RowIDShardGenerator { + return ctx.shardID +} + +// GetReservedRowIDAlloc implements table.MutateContext.GetReservedRowIDAlloc. +func (ctx *reorgTableMutateContext) GetReservedRowIDAlloc() (*stmtctx.ReservedRowIDAlloc, bool) { + return &ctx.reservedRowIDAlloc, true +} + +// GetStatisticsSupport implements table.MutateContext.GetStatisticsSupport. +func (*reorgTableMutateContext) GetStatisticsSupport() (tblctx.StatisticsSupport, bool) { + // We can just return `(nil, false)` because: + // - Only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method. + // - DDL reorg do need to collect statistics in this way. + return nil, false +} + +// GetCachedTableSupport implements table.MutateContext.GetCachedTableSupport. +func (*reorgTableMutateContext) GetCachedTableSupport() (tblctx.CachedTableSupport, bool) { + // We can just return `(nil, false)` because: + // - Only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method. + // - It is not allowed to execute DDL on a cached table. + return nil, false +} + +// GetTemporaryTableSupport implements table.MutateContext.GetTemporaryTableSupport. +func (*reorgTableMutateContext) GetTemporaryTableSupport() (tblctx.TemporaryTableSupport, bool) { + // We can just return `(nil, false)` because: + // - Only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method. + // - Temporary tables do not have any data in TiKV. + return nil, false +} + +// GetExchangePartitionDMLSupport implements table.MutateContext.GetExchangePartitionDMLSupport. +func (*reorgTableMutateContext) GetExchangePartitionDMLSupport() (tblctx.ExchangePartitionDMLSupport, bool) { + // We can just return `(nil, false)` because: + // - Only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method. + return nil, false +} + +// newReorgTableMutateContext creates a new table.MutateContext for reorganization. +func newReorgTableMutateContext(exprCtx exprctx.ExprContext) table.MutateContext { + rowEncoder := &rowcodec.Encoder{ + Enable: variable.GetDDLReorgRowFormat() != variable.DefTiDBRowFormatV1, + } + + encodingConfig := tblctx.RowEncodingConfig{ + IsRowLevelChecksumEnabled: rowEncoder.Enable, + RowEncoder: rowEncoder, + } + + return &reorgTableMutateContext{ + exprCtx: exprCtx, + encodingConfig: encodingConfig, + mutateBuffers: tblctx.NewMutateBuffers(&variable.WriteStmtBufs{}), + // Though currently, `RowIDShardGenerator` is not required in DDL reorg, + // we still provide a valid one to keep the context complete and to avoid panic if it is used in the future. + shardID: variable.NewRowIDShardGenerator( + rand.New(rand.NewSource(time.Now().UnixNano())), // #nosec G404 + variable.DefTiDBShardAllocateStep, + ), + } +} + +func reorgTypeFlagsWithSQLMode(mode mysql.SQLMode) types.Flags { + return types.StrictFlags. + WithTruncateAsWarning(!mode.HasStrictMode()). + WithIgnoreInvalidDateErr(mode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!mode.HasStrictMode() || mode.HasAllowInvalidDatesMode()). + WithCastTimeToYearThroughConcat(true) +} + +func reorgErrLevelsWithSQLMode(mode mysql.SQLMode) errctx.LevelMap { + return errctx.LevelMap{ + errctx.ErrGroupTruncate: errctx.ResolveErrLevel(false, !mode.HasStrictMode()), + errctx.ErrGroupBadNull: errctx.ResolveErrLevel(false, !mode.HasStrictMode()), + errctx.ErrGroupNoDefault: errctx.ResolveErrLevel(false, !mode.HasStrictMode()), + errctx.ErrGroupDividedByZero: errctx.ResolveErrLevel( + !mode.HasErrorForDivisionByZeroMode(), + !mode.HasStrictMode(), + ), + } +} + +func reorgTimeZoneWithTzLoc(tzLoc *model.TimeZoneLocation) (*time.Location, error) { + if tzLoc == nil { + // It is set to SystemLocation to be compatible with nil LocationInfo. + return timeutil.SystemLocation(), nil + } + return tzLoc.GetLocation() +} +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) func (rc *reorgCtx) notifyJobState(state model.JobState) { atomic.StoreInt32((*int32)(&rc.jobState), int32(state)) diff --git a/pkg/errctx/context.go b/pkg/errctx/context.go new file mode 100644 index 0000000000000..34fae1304463c --- /dev/null +++ b/pkg/errctx/context.go @@ -0,0 +1,266 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errctx + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/errno" + contextutil "github.com/pingcap/tidb/pkg/util/context" + "github.com/pingcap/tidb/pkg/util/intest" +) + +// Level defines the behavior for each error +type Level uint8 + +const ( + // LevelError means the error will be returned + LevelError Level = iota + // LevelWarn means it will be regarded as a warning + LevelWarn + // LevelIgnore means the error will be ignored + LevelIgnore +) + +// LevelMap indicates the map from `ErrGroup` to `Level` +type LevelMap [errGroupCount]Level + +// Context defines how to handle an error +type Context struct { + levelMap LevelMap + warnHandler contextutil.WarnAppender +} + +// LevelMap returns the `levelMap` of the context. +func (ctx *Context) LevelMap() LevelMap { + return ctx.levelMap +} + +// LevelForGroup returns the level for a specified group. +func (ctx *Context) LevelForGroup(errGroup ErrGroup) Level { + return ctx.levelMap[errGroup] +} + +// WithStrictErrGroupLevel makes the context to return the error directly for any kinds of errors. +func (ctx *Context) WithStrictErrGroupLevel() Context { + newCtx := Context{ + warnHandler: ctx.warnHandler, + } + + return newCtx +} + +// WithErrGroupLevel sets a `Level` for an `ErrGroup` +func (ctx *Context) WithErrGroupLevel(eg ErrGroup, l Level) Context { + newCtx := Context{ + levelMap: ctx.levelMap, + warnHandler: ctx.warnHandler, + } + newCtx.levelMap[eg] = l + + return newCtx +} + +// WithErrGroupLevels sets `levelMap` for an `ErrGroup` +func (ctx *Context) WithErrGroupLevels(levels LevelMap) Context { + return Context{ + levelMap: levels, + warnHandler: ctx.warnHandler, + } +} + +// AppendWarning appends the error to warning. If the inner `warnHandler` is nil, do nothing. +func (ctx *Context) AppendWarning(err error) { + intest.Assert(ctx.warnHandler != nil) + if w := ctx.warnHandler; w != nil { + // warnHandler should always not be nil, check fn != nil here to just make code safe. + w.AppendWarning(err) + } +} + +// AppendNote appends the error to warning with level 'Note'. If the inner `warnHandler` is nil, do nothing. +func (ctx *Context) AppendNote(err error) { + intest.Assert(ctx.warnHandler != nil) + if w := ctx.warnHandler; w != nil { + // warnHandler should always not be nil, check fn != nil here to just make code safe. + w.AppendNote(err) + } +} + +// HandleError handles the error according to the contextutil. See the comment of `HandleErrorWithAlias` for detailed logic. +// +// It also allows using `errors.ErrorGroup`, in this case, it'll handle each error in order, and return the first error +// it founds. +func (ctx *Context) HandleError(err error) error { + if err == nil { + return nil + } + // The function of handling `errors.ErrorGroup` is placed in `HandleError` but not in `HandleErrorWithAlias`, because + // it's hard to give a proper error and warn alias for an error group. + if errs, ok := err.(errors.ErrorGroup); ok { + for _, singleErr := range errs.Errors() { + singleErr = ctx.HandleError(singleErr) + // If the one error is found, just return it. + // TODO: consider whether it's more appropriate to continue to handle other errors. For example, other errors + // may need to append warnings. The current behavior is same with TiDB original behavior before using + // `errctx` to handle multiple errors. + if singleErr != nil { + return singleErr + } + } + + return nil + } + + return ctx.HandleErrorWithAlias(err, err, err) +} + +// HandleErrorWithAlias handles the error according to the contextutil. +// 1. If the `internalErr` is not `"pingcap/errors".Error`, or the error code is not defined in the `errGroupMap`, or the error +// level is set to `LevelError`(0), the `err` will be returned directly. +// 2. If the error level is set to `LevelWarn`, the `warnErr` will be appended as a warning. +// 3. If the error level is set to `LevelIgnore`, this function will return a `nil`. +// +// In most cases, these three should be the same. If there are many different kinds of error internally, but they are expected +// to give the same error to users, the `err` can be different form `internalErr`. Also, if the warning is expected to be +// different from the initial error, you can also use the `warnErr` argument. +// +// TODO: is it good to give an error code for internal only errors? Or should we use another way to distinguish different +// group of errors? +// TODO: both `types.Context` and `errctx.Context` can handle truncate error now. Refractor them. +func (ctx *Context) HandleErrorWithAlias(internalErr error, err error, warnErr error) error { + if internalErr == nil { + return nil + } + + internalErr = errors.Cause(internalErr) + + e, ok := internalErr.(*errors.Error) + if !ok { + return err + } + + eg, ok := errGroupMap[e.Code()] + if !ok { + return err + } + + switch ctx.levelMap[eg] { + case LevelError: + return err + case LevelWarn: + ctx.AppendWarning(warnErr) + case LevelIgnore: + } + + return nil +} + +// NewContext creates an error context to handle the errors and warnings +func NewContext(handler contextutil.WarnAppender) Context { + return NewContextWithLevels(LevelMap{}, handler) +} + +// NewContextWithLevels creates an error context to handle the errors and warnings +func NewContextWithLevels(levels LevelMap, handler contextutil.WarnAppender) Context { + intest.Assert(handler != nil) + return Context{ + warnHandler: handler, + levelMap: levels, + } +} + +// StrictNoWarningContext returns all errors directly, and ignore all errors +var StrictNoWarningContext = NewContext(contextutil.IgnoreWarn) + +var errGroupMap = make(map[errors.ErrCode]ErrGroup) + +// ErrGroup groups the error according to the behavior of handling errors +type ErrGroup int + +const ( + // ErrGroupTruncate is the group of truncated errors + ErrGroupTruncate ErrGroup = iota + // ErrGroupDupKey is the group of duplicate key errors + ErrGroupDupKey + // ErrGroupBadNull is the group of bad null errors + ErrGroupBadNull + // ErrGroupNoDefault is the group of no default value errors + ErrGroupNoDefault + // ErrGroupDividedByZero is the group of divided by zero errors + ErrGroupDividedByZero + // ErrGroupAutoIncReadFailed is the group of auto increment read failed errors + ErrGroupAutoIncReadFailed + // ErrGroupNoMatchedPartition is the group of no partition is matched errors. + ErrGroupNoMatchedPartition + // errGroupCount is the count of all `ErrGroup`. Please leave it at the end of the list. + errGroupCount +) + +func init() { + group2Errors := map[ErrGroup][]errors.ErrCode{ + ErrGroupTruncate: { + errno.ErrTruncatedWrongValue, + errno.ErrDataTooLong, + errno.ErrTruncatedWrongValueForField, + errno.ErrWarnDataOutOfRange, + errno.ErrDataOutOfRange, + errno.ErrBadNumber, + errno.ErrWrongValueForType, + errno.ErrDatetimeFunctionOverflow, + errno.WarnDataTruncated, + errno.ErrIncorrectDatetimeValue, + }, + ErrGroupBadNull: { + errno.ErrBadNull, + errno.ErrWarnNullToNotnull, + }, + ErrGroupNoDefault: { + errno.ErrNoDefaultForField, + }, + ErrGroupDividedByZero: { + errno.ErrDivisionByZero, + }, + ErrGroupAutoIncReadFailed: { + errno.ErrAutoincReadFailed, + }, + ErrGroupNoMatchedPartition: { + errno.ErrNoPartitionForGivenValue, + errno.ErrRowDoesNotMatchGivenPartitionSet, + }, + ErrGroupDupKey: { + errno.ErrDupEntry, + }, + } + + for group, codes := range group2Errors { + for _, errCode := range codes { + errGroupMap[errCode] = group + } + } +} + +// ResolveErrLevel resolves the error level according to the `ignore` and `warn` flags +// if ignore is true, it will return `LevelIgnore` to ignore the error, +// otherwise, it will return `LevelWarn` or `LevelError` according to the `warn` flag +// Only one of `ignore` and `warn` can be true. +func ResolveErrLevel(ignore bool, warn bool) Level { + if ignore { + return LevelIgnore + } + if warn { + return LevelWarn + } + return LevelError +} diff --git a/pkg/executor/BUILD.bazel b/pkg/executor/BUILD.bazel index a05c81d88a3ce..dda6609234d25 100644 --- a/pkg/executor/BUILD.bazel +++ b/pkg/executor/BUILD.bazel @@ -419,6 +419,7 @@ go_test( "//pkg/store/helper", "//pkg/store/mockstore", "//pkg/store/mockstore/unistore", + "//pkg/table", "//pkg/table/tables", "//pkg/tablecodec", "//pkg/testkit", diff --git a/pkg/executor/executor_pkg_test.go b/pkg/executor/executor_pkg_test.go index 7f00b7785e7b8..7a763e67b13ee 100644 --- a/pkg/executor/executor_pkg_test.go +++ b/pkg/executor/executor_pkg_test.go @@ -338,6 +338,7 @@ func TestSortSpillDisk(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/executor/testSortedRowContainerSpill")) }() ctx := mock.NewContext() +<<<<<<< HEAD ctx.GetSessionVars().MemQuota.MemQuotaQuery = 1 ctx.GetSessionVars().InitChunkSize = variable.DefMaxChunkSize ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize @@ -350,6 +351,166 @@ func TestSortSpillDisk(t *testing.T) { rows: cas.rows, ctx: cas.ctx, ndvs: cas.ndvs, +======= + domain.BindDomain(ctx, &domain.Domain{}) + + cases := []struct { + name string + sqlMode mysql.SQLMode + stmt []ast.StmtNode + levels errctx.LevelMap + }{ + { + name: "strict,write", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.InsertStmt{}, &ast.UpdateStmt{}, &ast.DeleteStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelError + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "non-strict,write", + sqlMode: mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.InsertStmt{}, &ast.UpdateStmt{}, &ast.DeleteStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict,insert ignore", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.InsertStmt{IgnoreErr: true}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelWarn + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelWarn + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, + { + name: "strict,update ignore", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.UpdateStmt{IgnoreErr: true}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelWarn + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, + { + name: "strict,delete ignore", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.DeleteStmt{IgnoreErr: true}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelWarn + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict without error_for_division_by_zero,write", + sqlMode: mysql.ModeStrictAllTables, + stmt: []ast.StmtNode{&ast.InsertStmt{}, &ast.UpdateStmt{}, &ast.DeleteStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelIgnore + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict,select/union", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.SelectStmt{}, &ast.SetOprStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "non-strict,select/union", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.SelectStmt{}, &ast.SetOprStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict,load_data", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.LoadDataStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, + { + name: "non-strict,load_data", + sqlMode: mysql.SQLMode(0), + stmt: []ast.StmtNode{&ast.LoadDataStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) } dataSource := buildMockDataSource(opt) exe := &SortExec{ diff --git a/pkg/executor/insert_test.go b/pkg/executor/insert_test.go index 6cfa7fb258ad8..016c3645ae874 100644 --- a/pkg/executor/insert_test.go +++ b/pkg/executor/insert_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/meta/autoid" "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/execdetails" @@ -1628,3 +1629,30 @@ func TestUnsignedDecimalFloatInsertNegative(t *testing.T) { tk.MustExec("insert into tf values('-100')") tk.MustQuery("select * from tf").Check(testkit.Rows("0")) } + +func TestInsertNullInNonStrictMode(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (id int primary key, col1 varchar(10) not null default '')") + tk.MustExec("create table t2 (id int primary key, col1 varchar(10))") + tk.MustExec("insert into t2 values (1, null)") + tk.MustExec("insert ignore into t1 values(5, null)") + + tk.MustExec("set session sql_mode = ''") + + err := tk.ExecToErr("insert into t1 values(1, null)") + require.EqualError(t, err, table.ErrColumnCantNull.GenWithStackByArgs("col1").Error()) + + err = tk.ExecToErr("insert into t1 set id = 1, col1 = null") + require.EqualError(t, err, table.ErrColumnCantNull.GenWithStackByArgs("col1").Error()) + + err = tk.ExecToErr("insert t1 VALUES (5, 5) ON DUPLICATE KEY UPDATE col1 = null") + require.EqualError(t, err, table.ErrColumnCantNull.GenWithStackByArgs("col1").Error()) + + tk.MustExec("insert into t1 select * from t2") + tk.MustExec("insert into t1 values(2, null), (3, 3), (4, 4)") + tk.MustExec("update t1 set col1 = null where id = 3") + tk.MustExec("insert ignore t1 VALUES (4, 4) ON DUPLICATE KEY UPDATE col1 = null") + tk.MustQuery("select * from t1").Check(testkit.RowsWithSep("|", "1|", "2|", "3|", "4|", "5|")) +} diff --git a/pkg/executor/load_data.go b/pkg/executor/load_data.go index e882fae1cbe9e..8a3e423ff7883 100644 --- a/pkg/executor/load_data.go +++ b/pkg/executor/load_data.go @@ -98,9 +98,18 @@ type LoadDataWorker struct { func setNonRestrictiveFlags(stmtCtx *stmtctx.StatementContext) { // TODO: DupKeyAsWarning represents too many "ignore error" paths, the // meaning of this flag is not clear. I can only reuse it here. +<<<<<<< HEAD stmtCtx.DupKeyAsWarning = true stmtCtx.TruncateAsWarning = true stmtCtx.BadNullAsWarning = true +======= + levels := stmtCtx.ErrLevels() + levels[errctx.ErrGroupDupKey] = errctx.LevelWarn + levels[errctx.ErrGroupBadNull] = errctx.LevelWarn + levels[errctx.ErrGroupNoDefault] = errctx.LevelWarn + stmtCtx.SetErrLevels(levels) + stmtCtx.SetTypeFlags(stmtCtx.TypeFlags().WithTruncateAsWarning(true)) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) } // NewLoadDataWorker creates a new LoadDataWorker that is ready to work. diff --git a/pkg/executor/select.go b/pkg/executor/select.go new file mode 100644 index 0000000000000..c27b7e498047c --- /dev/null +++ b/pkg/executor/select.go @@ -0,0 +1,1273 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + stderrors "errors" + "runtime/pprof" + "strings" + "sync/atomic" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/pkg/ddl/schematracker" + "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/executor/aggregate" + "github.com/pingcap/tidb/pkg/executor/internal/exec" + "github.com/pingcap/tidb/pkg/executor/internal/pdhelper" + "github.com/pingcap/tidb/pkg/executor/sortexec" + "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/parser/terror" + plannercore "github.com/pingcap/tidb/pkg/planner/core" + "github.com/pingcap/tidb/pkg/planner/core/base" + "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" + "github.com/pingcap/tidb/pkg/planner/planctx" + plannerutil "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/fixcontrol" + "github.com/pingcap/tidb/pkg/sessionctx" + "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/sessiontxn" + "github.com/pingcap/tidb/pkg/table" + "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/chunk" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" + "github.com/pingcap/tidb/pkg/util/deadlockhistory" + "github.com/pingcap/tidb/pkg/util/disk" + "github.com/pingcap/tidb/pkg/util/execdetails" + "github.com/pingcap/tidb/pkg/util/intest" + "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/syncutil" + "github.com/pingcap/tidb/pkg/util/topsql" + topsqlstate "github.com/pingcap/tidb/pkg/util/topsql/state" + "github.com/pingcap/tidb/pkg/util/tracing" + tikverr "github.com/tikv/client-go/v2/error" + tikvstore "github.com/tikv/client-go/v2/kv" + tikvutil "github.com/tikv/client-go/v2/util" + "go.uber.org/zap" +) + +var ( + _ exec.Executor = &aggregate.HashAggExec{} + _ exec.Executor = &IndexLookUpExecutor{} + _ exec.Executor = &IndexReaderExecutor{} + _ exec.Executor = &LimitExec{} + _ exec.Executor = &MaxOneRowExec{} + _ exec.Executor = &ProjectionExec{} + _ exec.Executor = &SelectionExec{} + _ exec.Executor = &SelectLockExec{} + _ exec.Executor = &sortexec.SortExec{} + _ exec.Executor = &aggregate.StreamAggExec{} + _ exec.Executor = &TableDualExec{} + _ exec.Executor = &TableReaderExecutor{} + _ exec.Executor = &TableScanExec{} + _ exec.Executor = &sortexec.TopNExec{} + + // GlobalMemoryUsageTracker is the ancestor of all the Executors' memory tracker and GlobalMemory Tracker + GlobalMemoryUsageTracker *memory.Tracker + // GlobalDiskUsageTracker is the ancestor of all the Executors' disk tracker + GlobalDiskUsageTracker *disk.Tracker + // GlobalAnalyzeMemoryTracker is the ancestor of all the Analyze jobs' memory tracker and child of global Tracker + GlobalAnalyzeMemoryTracker *memory.Tracker +) + +var ( + _ dataSourceExecutor = &TableReaderExecutor{} + _ dataSourceExecutor = &IndexReaderExecutor{} + _ dataSourceExecutor = &IndexLookUpExecutor{} + _ dataSourceExecutor = &IndexMergeReaderExecutor{} + + // CheckTableFastBucketSize is the bucket size of fast check table. + CheckTableFastBucketSize = atomic.Int64{} +) + +// dataSourceExecutor is a table DataSource converted Executor. +// Currently, there are TableReader/IndexReader/IndexLookUp/IndexMergeReader. +// Note, partition reader is special and the caller should handle it carefully. +type dataSourceExecutor interface { + exec.Executor + Table() table.Table +} + +const ( + // globalPanicStorageExceed represents the panic message when out of storage quota. + globalPanicStorageExceed string = "Out Of Quota For Local Temporary Space!" + // globalPanicMemoryExceed represents the panic message when out of memory limit. + globalPanicMemoryExceed string = "Out Of Global Memory Limit!" + // globalPanicAnalyzeMemoryExceed represents the panic message when out of analyze memory limit. + globalPanicAnalyzeMemoryExceed string = "Out Of Global Analyze Memory Limit!" +) + +// globalPanicOnExceed panics when GlobalDisTracker storage usage exceeds storage quota. +type globalPanicOnExceed struct { + memory.BaseOOMAction + mutex syncutil.Mutex // For synchronization. +} + +func init() { + action := &globalPanicOnExceed{} + GlobalMemoryUsageTracker = memory.NewGlobalTracker(memory.LabelForGlobalMemory, -1) + GlobalMemoryUsageTracker.SetActionOnExceed(action) + GlobalDiskUsageTracker = disk.NewGlobalTrcaker(memory.LabelForGlobalStorage, -1) + GlobalDiskUsageTracker.SetActionOnExceed(action) + GlobalAnalyzeMemoryTracker = memory.NewTracker(memory.LabelForGlobalAnalyzeMemory, -1) + GlobalAnalyzeMemoryTracker.SetActionOnExceed(action) + // register quota funcs + variable.SetMemQuotaAnalyze = GlobalAnalyzeMemoryTracker.SetBytesLimit + variable.GetMemQuotaAnalyze = GlobalAnalyzeMemoryTracker.GetBytesLimit + // TODO: do not attach now to avoid impact to global, will attach later when analyze memory track is stable + //GlobalAnalyzeMemoryTracker.AttachToGlobalTracker(GlobalMemoryUsageTracker) + + schematracker.ConstructResultOfShowCreateDatabase = ConstructResultOfShowCreateDatabase + schematracker.ConstructResultOfShowCreateTable = ConstructResultOfShowCreateTable + + // CheckTableFastBucketSize is used to set the fast analyze bucket size for check table. + CheckTableFastBucketSize.Store(1024) +} + +// Start the backend components +func Start() { + pdhelper.GlobalPDHelper.Start() +} + +// Stop the backend components +func Stop() { + pdhelper.GlobalPDHelper.Stop() +} + +// Action panics when storage usage exceeds storage quota. +func (a *globalPanicOnExceed) Action(t *memory.Tracker) { + a.mutex.Lock() + defer a.mutex.Unlock() + msg := "" + switch t.Label() { + case memory.LabelForGlobalStorage: + msg = globalPanicStorageExceed + case memory.LabelForGlobalMemory: + msg = globalPanicMemoryExceed + case memory.LabelForGlobalAnalyzeMemory: + msg = globalPanicAnalyzeMemoryExceed + default: + msg = "Out of Unknown Resource Quota!" + } + // TODO(hawkingrei): should return error instead. + panic(msg) +} + +// GetPriority get the priority of the Action +func (*globalPanicOnExceed) GetPriority() int64 { + return memory.DefPanicPriority +} + +// SelectLockExec represents a select lock executor. +// It is built from the "SELECT .. FOR UPDATE" or the "SELECT .. LOCK IN SHARE MODE" statement. +// For "SELECT .. FOR UPDATE" statement, it locks every row key from source Executor. +// After the execution, the keys are buffered in transaction, and will be sent to KV +// when doing commit. If there is any key already locked by another transaction, +// the transaction will rollback and retry. +type SelectLockExec struct { + exec.BaseExecutor + + Lock *ast.SelectLockInfo + keys []kv.Key + + // The children may be a join of multiple tables, so we need a map. + tblID2Handle map[int64][]plannerutil.HandleCols + + // When SelectLock work on a partition table, we need the partition ID + // (Physical Table ID) instead of the 'logical' table ID to calculate + // the lock KV. In that case, the Physical Table ID is extracted + // from the row key in the store and as an extra column in the chunk row. + + // tblID2PhyTblIDCol is used for partitioned tables. + // The child executor need to return an extra column containing + // the Physical Table ID (i.e. from which partition the row came from) + // Used during building + tblID2PhysTblIDCol map[int64]*expression.Column + + // Used during execution + // Map from logic tableID to column index where the physical table id is stored + // For dynamic prune mode, model.ExtraPhysTblID columns are requested from + // storage and used for physical table id + // For static prune mode, model.ExtraPhysTblID is still sent to storage/Protobuf + // but could be filled in by the partitions TableReaderExecutor + // due to issues with chunk handling between the TableReaderExecutor and the + // SelectReader result. + tblID2PhysTblIDColIdx map[int64]int +} + +// Open implements the Executor Open interface. +func (e *SelectLockExec) Open(ctx context.Context) error { + if len(e.tblID2PhysTblIDCol) > 0 { + e.tblID2PhysTblIDColIdx = make(map[int64]int) + cols := e.Schema().Columns + for i := len(cols) - 1; i >= 0; i-- { + if cols[i].ID == model.ExtraPhysTblID { + for tblID, col := range e.tblID2PhysTblIDCol { + if cols[i].UniqueID == col.UniqueID { + e.tblID2PhysTblIDColIdx[tblID] = i + break + } + } + } + } + } + return e.BaseExecutor.Open(ctx) +} + +// Next implements the Executor Next interface. +func (e *SelectLockExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + err := exec.Next(ctx, e.Children(0), req) + if err != nil { + return err + } + // If there's no handle or it's not a `SELECT FOR UPDATE` or `SELECT FOR SHARE` statement. + if len(e.tblID2Handle) == 0 || (!logicalop.IsSupportedSelectLockType(e.Lock.LockType)) { + return nil + } + + if req.NumRows() > 0 { + iter := chunk.NewIterator4Chunk(req) + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + for tblID, cols := range e.tblID2Handle { + for _, col := range cols { + handle, err := col.BuildHandle(row) + if err != nil { + return err + } + physTblID := tblID + if physTblColIdx, ok := e.tblID2PhysTblIDColIdx[tblID]; ok { + physTblID = row.GetInt64(physTblColIdx) + if physTblID == 0 { + // select * from t1 left join t2 on t1.c = t2.c for update + // The join right side might be added NULL in left join + // In that case, physTblID is 0, so skip adding the lock. + // + // Note, we can't distinguish whether it's the left join case, + // or a bug that TiKV return without correct physical ID column. + continue + } + } + e.keys = append(e.keys, tablecodec.EncodeRowKeyWithHandle(physTblID, handle)) + } + } + } + return nil + } + lockWaitTime := e.Ctx().GetSessionVars().LockWaitTimeout + if e.Lock.LockType == ast.SelectLockForUpdateNoWait || e.Lock.LockType == ast.SelectLockForShareNoWait { + lockWaitTime = tikvstore.LockNoWait + } else if e.Lock.LockType == ast.SelectLockForUpdateWaitN { + lockWaitTime = int64(e.Lock.WaitSec) * 1000 + } + + for id := range e.tblID2Handle { + e.UpdateDeltaForTableID(id) + } + lockCtx, err := newLockCtx(e.Ctx(), lockWaitTime, len(e.keys)) + if err != nil { + return err + } + return doLockKeys(ctx, e.Ctx(), lockCtx, e.keys...) +} + +func newLockCtx(sctx sessionctx.Context, lockWaitTime int64, numKeys int) (*tikvstore.LockCtx, error) { + seVars := sctx.GetSessionVars() + forUpdateTS, err := sessiontxn.GetTxnManager(sctx).GetStmtForUpdateTS() + if err != nil { + return nil, err + } + lockCtx := tikvstore.NewLockCtx(forUpdateTS, lockWaitTime, seVars.StmtCtx.GetLockWaitStartTime()) + lockCtx.Killed = &seVars.SQLKiller.Signal + lockCtx.PessimisticLockWaited = &seVars.StmtCtx.PessimisticLockWaited + lockCtx.LockKeysDuration = &seVars.StmtCtx.LockKeysDuration + lockCtx.LockKeysCount = &seVars.StmtCtx.LockKeysCount + lockCtx.LockExpired = &seVars.TxnCtx.LockExpire + lockCtx.ResourceGroupTagger = func(req *kvrpcpb.PessimisticLockRequest) []byte { + if req == nil { + return nil + } + if len(req.Mutations) == 0 { + return nil + } + if mutation := req.Mutations[0]; mutation != nil { + normalized, digest := seVars.StmtCtx.SQLDigest() + if len(normalized) == 0 { + return nil + } + _, planDigest := seVars.StmtCtx.GetPlanDigest() + + return kv.NewResourceGroupTagBuilder(). + SetPlanDigest(planDigest). + SetSQLDigest(digest). + EncodeTagWithKey(mutation.Key) + } + return nil + } + lockCtx.OnDeadlock = func(deadlock *tikverr.ErrDeadlock) { + cfg := config.GetGlobalConfig() + if deadlock.IsRetryable && !cfg.PessimisticTxn.DeadlockHistoryCollectRetryable { + return + } + rec := deadlockhistory.ErrDeadlockToDeadlockRecord(deadlock) + deadlockhistory.GlobalDeadlockHistory.Push(rec) + } + if lockCtx.ForUpdateTS > 0 && seVars.AssertionLevel != variable.AssertionLevelOff { + lockCtx.InitCheckExistence(numKeys) + } + return lockCtx, nil +} + +// doLockKeys is the main entry for pessimistic lock keys +// waitTime means the lock operation will wait in milliseconds if target key is already +// locked by others. used for (select for update nowait) situation +func doLockKeys(ctx context.Context, se sessionctx.Context, lockCtx *tikvstore.LockCtx, keys ...kv.Key) error { + sessVars := se.GetSessionVars() + sctx := sessVars.StmtCtx + if !sctx.InUpdateStmt && !sctx.InDeleteStmt { + atomic.StoreUint32(&se.GetSessionVars().TxnCtx.ForUpdate, 1) + } + // Lock keys only once when finished fetching all results. + txn, err := se.Txn(true) + if err != nil { + return err + } + + // Skip the temporary table keys. + keys = filterTemporaryTableKeys(sessVars, keys) + + keys = filterLockTableKeys(sessVars.StmtCtx, keys) + var lockKeyStats *tikvutil.LockKeysDetails + ctx = context.WithValue(ctx, tikvutil.LockKeysDetailCtxKey, &lockKeyStats) + err = txn.LockKeys(tikvutil.SetSessionID(ctx, se.GetSessionVars().ConnectionID), lockCtx, keys...) + if lockKeyStats != nil { + sctx.MergeLockKeysExecDetails(lockKeyStats) + } + return err +} + +func filterTemporaryTableKeys(vars *variable.SessionVars, keys []kv.Key) []kv.Key { + txnCtx := vars.TxnCtx + if txnCtx == nil || txnCtx.TemporaryTables == nil { + return keys + } + + newKeys := keys[:0:len(keys)] + for _, key := range keys { + tblID := tablecodec.DecodeTableID(key) + if _, ok := txnCtx.TemporaryTables[tblID]; !ok { + newKeys = append(newKeys, key) + } + } + return newKeys +} + +func filterLockTableKeys(stmtCtx *stmtctx.StatementContext, keys []kv.Key) []kv.Key { + if len(stmtCtx.LockTableIDs) == 0 { + return keys + } + newKeys := keys[:0:len(keys)] + for _, key := range keys { + tblID := tablecodec.DecodeTableID(key) + if _, ok := stmtCtx.LockTableIDs[tblID]; ok { + newKeys = append(newKeys, key) + } + } + return newKeys +} + +// LimitExec represents limit executor +// It ignores 'Offset' rows from src, then returns 'Count' rows at maximum. +type LimitExec struct { + exec.BaseExecutor + + begin uint64 + end uint64 + cursor uint64 + + // meetFirstBatch represents whether we have met the first valid Chunk from child. + meetFirstBatch bool + + childResult *chunk.Chunk + + // columnIdxsUsedByChild keep column indexes of child executor used for inline projection + columnIdxsUsedByChild []int + columnSwapHelper *chunk.ColumnSwapHelper + + // Log the close time when opentracing is enabled. + span opentracing.Span +} + +// Next implements the Executor Next interface. +func (e *LimitExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.cursor >= e.end { + return nil + } + for !e.meetFirstBatch { + // transfer req's requiredRows to childResult and then adjust it in childResult + e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.MaxChunkSize()) + err := exec.Next(ctx, e.Children(0), e.adjustRequiredRows(e.childResult)) + if err != nil { + return err + } + batchSize := uint64(e.childResult.NumRows()) + // no more data. + if batchSize == 0 { + return nil + } + if newCursor := e.cursor + batchSize; newCursor >= e.begin { + e.meetFirstBatch = true + begin, end := e.begin-e.cursor, batchSize + if newCursor > e.end { + end = e.end - e.cursor + } + e.cursor += end + if begin == end { + break + } + if e.columnIdxsUsedByChild != nil { + req.Append(e.childResult.Prune(e.columnIdxsUsedByChild), int(begin), int(end)) + } else { + req.Append(e.childResult, int(begin), int(end)) + } + return nil + } + e.cursor += batchSize + } + e.childResult.Reset() + e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.MaxChunkSize()) + e.adjustRequiredRows(e.childResult) + err := exec.Next(ctx, e.Children(0), e.childResult) + if err != nil { + return err + } + batchSize := uint64(e.childResult.NumRows()) + // no more data. + if batchSize == 0 { + return nil + } + if e.cursor+batchSize > e.end { + e.childResult.TruncateTo(int(e.end - e.cursor)) + batchSize = e.end - e.cursor + } + e.cursor += batchSize + + if e.columnIdxsUsedByChild != nil { + err = e.columnSwapHelper.SwapColumns(e.childResult, req) + if err != nil { + return err + } + } else { + req.SwapColumns(e.childResult) + } + return nil +} + +// Open implements the Executor Open interface. +func (e *LimitExec) Open(ctx context.Context) error { + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + e.childResult = exec.TryNewCacheChunk(e.Children(0)) + e.cursor = 0 + e.meetFirstBatch = e.begin == 0 + if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { + e.span = span + } + return nil +} + +// Close implements the Executor Close interface. +func (e *LimitExec) Close() error { + start := time.Now() + + e.childResult = nil + err := e.BaseExecutor.Close() + + elapsed := time.Since(start) + if elapsed > time.Millisecond { + logutil.BgLogger().Info("limit executor close takes a long time", + zap.Duration("elapsed", elapsed)) + if e.span != nil { + span1 := e.span.Tracer().StartSpan("limitExec.Close", opentracing.ChildOf(e.span.Context()), opentracing.StartTime(start)) + defer span1.Finish() + } + } + return err +} + +func (e *LimitExec) adjustRequiredRows(chk *chunk.Chunk) *chunk.Chunk { + // the limit of maximum number of rows the LimitExec should read + limitTotal := int(e.end - e.cursor) + + var limitRequired int + if e.cursor < e.begin { + // if cursor is less than begin, it have to read (begin-cursor) rows to ignore + // and then read chk.RequiredRows() rows to return, + // so the limit is (begin-cursor)+chk.RequiredRows(). + limitRequired = int(e.begin) - int(e.cursor) + chk.RequiredRows() + } else { + // if cursor is equal or larger than begin, just read chk.RequiredRows() rows to return. + limitRequired = chk.RequiredRows() + } + + return chk.SetRequiredRows(min(limitTotal, limitRequired), e.MaxChunkSize()) +} + +func init() { + // While doing optimization in the plan package, we need to execute uncorrelated subquery, + // but the plan package cannot import the executor package because of the dependency cycle. + // So we assign a function implemented in the executor package to the plan package to avoid the dependency cycle. + plannercore.EvalSubqueryFirstRow = func(ctx context.Context, p base.PhysicalPlan, is infoschema.InfoSchema, pctx planctx.PlanContext) ([]types.Datum, error) { + if fixcontrol.GetBoolWithDefault(pctx.GetSessionVars().OptimizerFixControl, fixcontrol.Fix43817, false) { + return nil, errors.NewNoStackError("evaluate non-correlated sub-queries during optimization phase is not allowed by fix-control 43817") + } + + defer func(begin time.Time) { + s := pctx.GetSessionVars() + s.StmtCtx.SetSkipPlanCache("query has uncorrelated sub-queries is un-cacheable") + s.RewritePhaseInfo.PreprocessSubQueries++ + s.RewritePhaseInfo.DurationPreprocessSubQuery += time.Since(begin) + }(time.Now()) + + r, ctx := tracing.StartRegionEx(ctx, "executor.EvalSubQuery") + defer r.End() + + sctx, err := plannercore.AsSctx(pctx) + intest.AssertNoError(err) + if err != nil { + return nil, err + } + + e := newExecutorBuilder(sctx, is) + executor := e.build(p) + if e.err != nil { + return nil, e.err + } + err = exec.Open(ctx, executor) + defer func() { terror.Log(exec.Close(executor)) }() + if err != nil { + return nil, err + } + if pi, ok := sctx.(processinfoSetter); ok { + // Before executing the sub-query, we need update the processinfo to make the progress bar more accurate. + // because the sub-query may take a long time. + pi.UpdateProcessInfo() + } + chk := exec.TryNewCacheChunk(executor) + err = exec.Next(ctx, executor, chk) + if err != nil { + return nil, err + } + if chk.NumRows() == 0 { + return nil, nil + } + row := chk.GetRow(0).GetDatumRow(exec.RetTypes(executor)) + return row, err + } +} + +// TableDualExec represents a dual table executor. +type TableDualExec struct { + exec.BaseExecutorV2 + + // numDualRows can only be 0 or 1. + numDualRows int + numReturned int +} + +// Open implements the Executor Open interface. +func (e *TableDualExec) Open(context.Context) error { + e.numReturned = 0 + return nil +} + +// Next implements the Executor Next interface. +func (e *TableDualExec) Next(_ context.Context, req *chunk.Chunk) error { + req.Reset() + if e.numReturned >= e.numDualRows { + return nil + } + if e.Schema().Len() == 0 { + req.SetNumVirtualRows(1) + } else { + for i := range e.Schema().Columns { + req.AppendNull(i) + } + } + e.numReturned = e.numDualRows + return nil +} + +type selectionExecutorContext struct { + stmtMemTracker *memory.Tracker + evalCtx expression.EvalContext + enableVectorizedExpression bool +} + +func newSelectionExecutorContext(sctx sessionctx.Context) selectionExecutorContext { + return selectionExecutorContext{ + stmtMemTracker: sctx.GetSessionVars().StmtCtx.MemTracker, + evalCtx: sctx.GetExprCtx().GetEvalCtx(), + enableVectorizedExpression: sctx.GetSessionVars().EnableVectorizedExpression, + } +} + +// SelectionExec represents a filter executor. +type SelectionExec struct { + selectionExecutorContext + exec.BaseExecutorV2 + + batched bool + filters []expression.Expression + selected []bool + inputIter *chunk.Iterator4Chunk + inputRow chunk.Row + childResult *chunk.Chunk + + memTracker *memory.Tracker +} + +// Open implements the Executor Open interface. +func (e *SelectionExec) Open(ctx context.Context) error { + if err := e.BaseExecutorV2.Open(ctx); err != nil { + return err + } + failpoint.Inject("mockSelectionExecBaseExecutorOpenReturnedError", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(errors.New("mock SelectionExec.baseExecutor.Open returned error")) + } + }) + return e.open(ctx) +} + +func (e *SelectionExec) open(context.Context) error { + if e.memTracker != nil { + e.memTracker.Reset() + } else { + e.memTracker = memory.NewTracker(e.ID(), -1) + } + e.memTracker.AttachTo(e.stmtMemTracker) + e.childResult = exec.TryNewCacheChunk(e.Children(0)) + e.memTracker.Consume(e.childResult.MemoryUsage()) + e.batched = expression.Vectorizable(e.filters) + if e.batched { + e.selected = make([]bool, 0, chunk.InitialCapacity) + } + e.inputIter = chunk.NewIterator4Chunk(e.childResult) + e.inputRow = e.inputIter.End() + return nil +} + +// Close implements plannercore.Plan Close interface. +func (e *SelectionExec) Close() error { + if e.childResult != nil { + e.memTracker.Consume(-e.childResult.MemoryUsage()) + e.childResult = nil + } + e.selected = nil + return e.BaseExecutorV2.Close() +} + +// Next implements the Executor Next interface. +func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + + if !e.batched { + return e.unBatchedNext(ctx, req) + } + + for { + for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() { + if req.IsFull() { + return nil + } + + if !e.selected[e.inputRow.Idx()] { + continue + } + + req.AppendRow(e.inputRow) + } + mSize := e.childResult.MemoryUsage() + err := exec.Next(ctx, e.Children(0), e.childResult) + e.memTracker.Consume(e.childResult.MemoryUsage() - mSize) + if err != nil { + return err + } + // no more data. + if e.childResult.NumRows() == 0 { + return nil + } + e.selected, err = expression.VectorizedFilter(e.evalCtx, e.enableVectorizedExpression, e.filters, e.inputIter, e.selected) + if err != nil { + return err + } + e.inputRow = e.inputIter.Begin() + } +} + +// unBatchedNext filters input rows one by one and returns once an input row is selected. +// For sql with "SETVAR" in filter and "GETVAR" in projection, for example: "SELECT @a FROM t WHERE (@a := 2) > 0", +// we have to set batch size to 1 to do the evaluation of filter and projection. +func (e *SelectionExec) unBatchedNext(ctx context.Context, chk *chunk.Chunk) error { + evalCtx := e.evalCtx + for { + for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() { + selected, _, err := expression.EvalBool(evalCtx, e.filters, e.inputRow) + if err != nil { + return err + } + if selected { + chk.AppendRow(e.inputRow) + e.inputRow = e.inputIter.Next() + return nil + } + } + mSize := e.childResult.MemoryUsage() + err := exec.Next(ctx, e.Children(0), e.childResult) + e.memTracker.Consume(e.childResult.MemoryUsage() - mSize) + if err != nil { + return err + } + e.inputRow = e.inputIter.Begin() + // no more data. + if e.childResult.NumRows() == 0 { + return nil + } + } +} + +// TableScanExec is a table scan executor without result fields. +type TableScanExec struct { + exec.BaseExecutor + + t table.Table + columns []*model.ColumnInfo + virtualTableChunkList *chunk.List + virtualTableChunkIdx int +} + +// Next implements the Executor Next interface. +func (e *TableScanExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + return e.nextChunk4InfoSchema(ctx, req) +} + +func (e *TableScanExec) nextChunk4InfoSchema(ctx context.Context, chk *chunk.Chunk) error { + chk.GrowAndReset(e.MaxChunkSize()) + if e.virtualTableChunkList == nil { + e.virtualTableChunkList = chunk.NewList(exec.RetTypes(e), e.InitCap(), e.MaxChunkSize()) + columns := make([]*table.Column, e.Schema().Len()) + for i, colInfo := range e.columns { + columns[i] = table.ToColumn(colInfo) + } + mutableRow := chunk.MutRowFromTypes(exec.RetTypes(e)) + type tableIter interface { + IterRecords(ctx context.Context, sctx sessionctx.Context, cols []*table.Column, fn table.RecordIterFunc) error + } + err := (e.t.(tableIter)).IterRecords(ctx, e.Ctx(), columns, func(_ kv.Handle, rec []types.Datum, _ []*table.Column) (bool, error) { + mutableRow.SetDatums(rec...) + e.virtualTableChunkList.AppendRow(mutableRow.ToRow()) + return true, nil + }) + if err != nil { + return err + } + } + // no more data. + if e.virtualTableChunkIdx >= e.virtualTableChunkList.NumChunks() { + return nil + } + virtualTableChunk := e.virtualTableChunkList.GetChunk(e.virtualTableChunkIdx) + e.virtualTableChunkIdx++ + chk.SwapColumns(virtualTableChunk) + return nil +} + +// Open implements the Executor Open interface. +func (e *TableScanExec) Open(context.Context) error { + e.virtualTableChunkList = nil + return nil +} + +// MaxOneRowExec checks if the number of rows that a query returns is at maximum one. +// It's built from subquery expression. +type MaxOneRowExec struct { + exec.BaseExecutor + + evaluated bool +} + +// Open implements the Executor Open interface. +func (e *MaxOneRowExec) Open(ctx context.Context) error { + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + e.evaluated = false + return nil +} + +// Next implements the Executor Next interface. +func (e *MaxOneRowExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.evaluated { + return nil + } + e.evaluated = true + err := exec.Next(ctx, e.Children(0), req) + if err != nil { + return err + } + + if num := req.NumRows(); num == 0 { + for i := range e.Schema().Columns { + req.AppendNull(i) + } + return nil + } else if num != 1 { + return exeerrors.ErrSubqueryMoreThan1Row + } + + childChunk := exec.TryNewCacheChunk(e.Children(0)) + err = exec.Next(ctx, e.Children(0), childChunk) + if err != nil { + return err + } + if childChunk.NumRows() != 0 { + return exeerrors.ErrSubqueryMoreThan1Row + } + + return nil +} + +// ResetContextOfStmt resets the StmtContext and session variables. +// Before every execution, we must clear statement context. +func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { + defer func() { + if r := recover(); r != nil { + logutil.BgLogger().Warn("ResetContextOfStmt panicked", zap.Stack("stack"), zap.Any("recover", r), zap.Error(err)) + if err != nil { + err = stderrors.Join(err, util.GetRecoverError(r)) + } else { + err = util.GetRecoverError(r) + } + } + }() + vars := ctx.GetSessionVars() + for name, val := range vars.StmtCtx.SetVarHintRestore { + err := vars.SetSystemVar(name, val) + if err != nil { + logutil.BgLogger().Warn("Failed to restore the variable after SET_VAR hint", zap.String("variable name", name), zap.String("expected value", val)) + } + } + vars.StmtCtx.SetVarHintRestore = nil + var sc *stmtctx.StatementContext + if vars.TxnCtx.CouldRetry || vars.HasStatusFlag(mysql.ServerStatusCursorExists) { + // Must construct new statement context object, the retry history need context for every statement. + // TODO: Maybe one day we can get rid of transaction retry, then this logic can be deleted. + sc = stmtctx.NewStmtCtx() + } else { + sc = vars.InitStatementContext() + } + sc.SetTimeZone(vars.Location()) + sc.TaskID = stmtctx.AllocateTaskID() + if sc.CTEStorageMap == nil { + sc.CTEStorageMap = map[int]*CTEStorages{} + } else { + clear(sc.CTEStorageMap.(map[int]*CTEStorages)) + } + if sc.LockTableIDs == nil { + sc.LockTableIDs = make(map[int64]struct{}) + } else { + clear(sc.LockTableIDs) + } + if sc.TableStats == nil { + sc.TableStats = make(map[int64]any) + } else { + clear(sc.TableStats) + } + if sc.MDLRelatedTableIDs == nil { + sc.MDLRelatedTableIDs = make(map[int64]struct{}) + } else { + clear(sc.MDLRelatedTableIDs) + } + if sc.TblInfo2UnionScan == nil { + sc.TblInfo2UnionScan = make(map[*model.TableInfo]bool) + } else { + clear(sc.TblInfo2UnionScan) + } + sc.IsStaleness = false + sc.EnableOptimizeTrace = false + sc.OptimizeTracer = nil + sc.OptimizerCETrace = nil + sc.IsSyncStatsFailed = false + sc.IsExplainAnalyzeDML = false + sc.ResourceGroupName = vars.ResourceGroupName + // Firstly we assume that UseDynamicPruneMode can be enabled according session variable, then we will check other conditions + // in PlanBuilder.buildDataSource + if ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { + sc.UseDynamicPruneMode = true + } else { + sc.UseDynamicPruneMode = false + } + + sc.StatsLoad.Timeout = 0 + sc.StatsLoad.NeededItems = nil + sc.StatsLoad.ResultCh = nil + + sc.SysdateIsNow = ctx.GetSessionVars().SysdateIsNow + + vars.MemTracker.Detach() + vars.MemTracker.UnbindActions() + vars.MemTracker.SetBytesLimit(vars.MemQuotaQuery) + vars.MemTracker.ResetMaxConsumed() + vars.DiskTracker.Detach() + vars.DiskTracker.ResetMaxConsumed() + vars.MemTracker.SessionID.Store(vars.ConnectionID) + vars.MemTracker.Killer = &vars.SQLKiller + vars.DiskTracker.Killer = &vars.SQLKiller + vars.SQLKiller.Reset() + vars.SQLKiller.ConnID.Store(vars.ConnectionID) + + isAnalyze := false + if execStmt, ok := s.(*ast.ExecuteStmt); ok { + prepareStmt, err := plannercore.GetPreparedStmt(execStmt, vars) + if err != nil { + return err + } + _, isAnalyze = prepareStmt.PreparedAst.Stmt.(*ast.AnalyzeTableStmt) + } else if _, ok := s.(*ast.AnalyzeTableStmt); ok { + isAnalyze = true + } + if isAnalyze { + sc.InitMemTracker(memory.LabelForAnalyzeMemory, -1) + vars.MemTracker.SetBytesLimit(-1) + vars.MemTracker.AttachTo(GlobalAnalyzeMemoryTracker) + } else { + sc.InitMemTracker(memory.LabelForSQLText, -1) + } + logOnQueryExceedMemQuota := domain.GetDomain(ctx).ExpensiveQueryHandle().LogOnQueryExceedMemQuota + switch variable.OOMAction.Load() { + case variable.OOMActionCancel: + action := &memory.PanicOnExceed{ConnID: vars.ConnectionID, Killer: vars.MemTracker.Killer} + action.SetLogHook(logOnQueryExceedMemQuota) + vars.MemTracker.SetActionOnExceed(action) + case variable.OOMActionLog: + fallthrough + default: + action := &memory.LogOnExceed{ConnID: vars.ConnectionID} + action.SetLogHook(logOnQueryExceedMemQuota) + vars.MemTracker.SetActionOnExceed(action) + } + sc.MemTracker.SessionID.Store(vars.ConnectionID) + sc.MemTracker.AttachTo(vars.MemTracker) + sc.InitDiskTracker(memory.LabelForSQLText, -1) + globalConfig := config.GetGlobalConfig() + if variable.EnableTmpStorageOnOOM.Load() && sc.DiskTracker != nil { + sc.DiskTracker.AttachTo(vars.DiskTracker) + if GlobalDiskUsageTracker != nil { + vars.DiskTracker.AttachTo(GlobalDiskUsageTracker) + } + } + if execStmt, ok := s.(*ast.ExecuteStmt); ok { + prepareStmt, err := plannercore.GetPreparedStmt(execStmt, vars) + if err != nil { + return err + } + s = prepareStmt.PreparedAst.Stmt + sc.InitSQLDigest(prepareStmt.NormalizedSQL, prepareStmt.SQLDigest) + // For `execute stmt` SQL, should reset the SQL digest with the prepare SQL digest. + goCtx := context.Background() + if variable.EnablePProfSQLCPU.Load() && len(prepareStmt.NormalizedSQL) > 0 { + goCtx = pprof.WithLabels(goCtx, pprof.Labels("sql", FormatSQL(prepareStmt.NormalizedSQL).String())) + pprof.SetGoroutineLabels(goCtx) + } + if topsqlstate.TopSQLEnabled() && prepareStmt.SQLDigest != nil { + sc.IsSQLRegistered.Store(true) + topsql.AttachAndRegisterSQLInfo(goCtx, prepareStmt.NormalizedSQL, prepareStmt.SQLDigest, vars.InRestrictedSQL) + } + if s, ok := prepareStmt.PreparedAst.Stmt.(*ast.SelectStmt); ok { + if s.LockInfo == nil { + sc.WeakConsistency = isWeakConsistencyRead(ctx, execStmt) + } + } + } + // execute missed stmtID uses empty sql + sc.OriginalSQL = s.Text() + if explainStmt, ok := s.(*ast.ExplainStmt); ok { + sc.InExplainStmt = true + sc.ExplainFormat = explainStmt.Format + sc.InExplainAnalyzeStmt = explainStmt.Analyze + sc.IgnoreExplainIDSuffix = strings.ToLower(explainStmt.Format) == types.ExplainFormatBrief + sc.InVerboseExplain = strings.ToLower(explainStmt.Format) == types.ExplainFormatVerbose + s = explainStmt.Stmt + } else { + sc.ExplainFormat = "" + } + if explainForStmt, ok := s.(*ast.ExplainForStmt); ok { + sc.InExplainStmt = true + sc.InExplainAnalyzeStmt = true + sc.InVerboseExplain = strings.ToLower(explainForStmt.Format) == types.ExplainFormatVerbose + } + + // TODO: Many same bool variables here. + // We should set only two variables ( + // IgnoreErr and StrictSQLMode) to avoid setting the same bool variables and + // pushing them down to TiKV as flags. + + sc.InRestrictedSQL = vars.InRestrictedSQL + strictSQLMode := vars.SQLMode.HasStrictMode() + + errLevels := sc.ErrLevels() + errLevels[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + switch stmt := s.(type) { + // `ResetUpdateStmtCtx` and `ResetDeleteStmtCtx` may modify the flags, so we'll need to store them. + case *ast.UpdateStmt: + ResetUpdateStmtCtx(sc, stmt, vars) + errLevels = sc.ErrLevels() + case *ast.DeleteStmt: + ResetDeleteStmtCtx(sc, stmt, vars) + errLevels = sc.ErrLevels() + case *ast.InsertStmt: + sc.InInsertStmt = true + // For insert statement (not for update statement), disabling the StrictSQLMode + // should make TruncateAsWarning and DividedByZeroAsWarning, + // but should not make DupKeyAsWarning. + if stmt.IgnoreErr { + errLevels[errctx.ErrGroupDupKey] = errctx.LevelWarn + errLevels[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelWarn + errLevels[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + } + // For single-row INSERT statements, ignore non-strict mode + // See https://dev.mysql.com/doc/refman/5.7/en/constraint-invalid-data.html + isSingleInsert := len(stmt.Lists) == 1 + errLevels[errctx.ErrGroupBadNull] = errctx.ResolveErrLevel(false, (!strictSQLMode && !isSingleInsert) || stmt.IgnoreErr) + errLevels[errctx.ErrGroupNoDefault] = errctx.ResolveErrLevel(false, !strictSQLMode || stmt.IgnoreErr) + errLevels[errctx.ErrGroupDividedByZero] = errctx.ResolveErrLevel( + !vars.SQLMode.HasErrorForDivisionByZeroMode(), + !strictSQLMode || stmt.IgnoreErr, + ) + sc.Priority = stmt.Priority + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!strictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || + !vars.SQLMode.HasNoZeroDateMode() || !strictSQLMode || stmt.IgnoreErr || + vars.SQLMode.HasAllowInvalidDatesMode())) + case *ast.CreateTableStmt, *ast.AlterTableStmt: + sc.InCreateOrAlterStmt = true + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!strictSQLMode). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !strictSQLMode || + vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroDateErr(!vars.SQLMode.HasNoZeroDateMode() || !strictSQLMode)) + + case *ast.LoadDataStmt: + sc.InLoadDataStmt = true + // return warning instead of error when load data meet no partition for value + errLevels[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + case *ast.SelectStmt: + sc.InSelectStmt = true + + // Return warning for truncate error in selection. + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + if opts := stmt.SelectStmtOpts; opts != nil { + sc.Priority = opts.Priority + sc.NotFillCache = !opts.SQLCache + } + sc.WeakConsistency = isWeakConsistencyRead(ctx, stmt) + case *ast.SetOprStmt: + sc.InSelectStmt = true + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + case *ast.ShowStmt: + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + if stmt.Tp == ast.ShowWarnings || stmt.Tp == ast.ShowErrors || stmt.Tp == ast.ShowSessionStates { + sc.InShowWarning = true + sc.SetWarnings(vars.StmtCtx.GetWarnings()) + } + case *ast.SplitRegionStmt: + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(false). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + case *ast.SetSessionStatesStmt: + sc.InSetSessionStatesStmt = true + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + default: + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + } + + if errLevels != sc.ErrLevels() { + sc.SetErrLevels(errLevels) + } + + sc.SetTypeFlags(sc.TypeFlags(). + WithSkipUTF8Check(vars.SkipUTF8Check). + WithSkipSACIICheck(vars.SkipASCIICheck). + WithSkipUTF8MB4Check(!globalConfig.Instance.CheckMb4ValueInUTF8.Load()). + // WithAllowNegativeToUnsigned with false value indicates values less than 0 should be clipped to 0 for unsigned integer types. + // This is the case for `insert`, `update`, `alter table`, `create table` and `load data infile` statements, when not in strict SQL mode. + // see https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html + WithAllowNegativeToUnsigned(!sc.InInsertStmt && !sc.InLoadDataStmt && !sc.InUpdateStmt && !sc.InCreateOrAlterStmt), + ) + + vars.PlanCacheParams.Reset() + if priority := mysql.PriorityEnum(atomic.LoadInt32(&variable.ForcePriority)); priority != mysql.NoPriority { + sc.Priority = priority + } + if vars.StmtCtx.LastInsertID > 0 { + sc.PrevLastInsertID = vars.StmtCtx.LastInsertID + } else { + sc.PrevLastInsertID = vars.StmtCtx.PrevLastInsertID + } + sc.PrevAffectedRows = 0 + if vars.StmtCtx.InUpdateStmt || vars.StmtCtx.InDeleteStmt || vars.StmtCtx.InInsertStmt || vars.StmtCtx.InSetSessionStatesStmt { + sc.PrevAffectedRows = int64(vars.StmtCtx.AffectedRows()) + } else if vars.StmtCtx.InSelectStmt { + sc.PrevAffectedRows = -1 + } + if globalConfig.Instance.EnableCollectExecutionInfo.Load() { + // In ExplainFor case, RuntimeStatsColl should not be reset for reuse, + // because ExplainFor need to display the last statement information. + reuseObj := vars.StmtCtx.RuntimeStatsColl + if _, ok := s.(*ast.ExplainForStmt); ok { + reuseObj = nil + } + sc.RuntimeStatsColl = execdetails.NewRuntimeStatsColl(reuseObj) + + // also enable index usage collector + if sc.IndexUsageCollector == nil { + sc.IndexUsageCollector = ctx.NewStmtIndexUsageCollector() + } else { + sc.IndexUsageCollector.Reset() + } + } else { + // turn off the index usage collector + sc.IndexUsageCollector = nil + } + + sc.SetForcePlanCache(fixcontrol.GetBoolWithDefault(vars.OptimizerFixControl, fixcontrol.Fix49736, false)) + sc.SetAlwaysWarnSkipCache(sc.InExplainStmt && sc.ExplainFormat == "plan_cache") + errCount, warnCount := vars.StmtCtx.NumErrorWarnings() + vars.SysErrorCount = errCount + vars.SysWarningCount = warnCount + vars.ExchangeChunkStatus() + vars.StmtCtx = sc + vars.PrevFoundInPlanCache = vars.FoundInPlanCache + vars.FoundInPlanCache = false + vars.PrevFoundInBinding = vars.FoundInBinding + vars.FoundInBinding = false + vars.DurationWaitTS = 0 + vars.CurrInsertBatchExtraCols = nil + vars.CurrInsertValues = chunk.Row{} + + return +} + +// ResetUpdateStmtCtx resets statement context for UpdateStmt. +func ResetUpdateStmtCtx(sc *stmtctx.StatementContext, stmt *ast.UpdateStmt, vars *variable.SessionVars) { + strictSQLMode := vars.SQLMode.HasStrictMode() + sc.InUpdateStmt = true + errLevels := sc.ErrLevels() + errLevels[errctx.ErrGroupDupKey] = errctx.ResolveErrLevel(false, stmt.IgnoreErr) + errLevels[errctx.ErrGroupBadNull] = errctx.ResolveErrLevel(false, !strictSQLMode || stmt.IgnoreErr) + errLevels[errctx.ErrGroupNoDefault] = errLevels[errctx.ErrGroupBadNull] + errLevels[errctx.ErrGroupDividedByZero] = errctx.ResolveErrLevel( + !vars.SQLMode.HasErrorForDivisionByZeroMode(), + !strictSQLMode || stmt.IgnoreErr, + ) + errLevels[errctx.ErrGroupNoMatchedPartition] = errctx.ResolveErrLevel(false, stmt.IgnoreErr) + sc.SetErrLevels(errLevels) + sc.Priority = stmt.Priority + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!strictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || + !strictSQLMode || stmt.IgnoreErr || vars.SQLMode.HasAllowInvalidDatesMode())) +} + +// ResetDeleteStmtCtx resets statement context for DeleteStmt. +func ResetDeleteStmtCtx(sc *stmtctx.StatementContext, stmt *ast.DeleteStmt, vars *variable.SessionVars) { + strictSQLMode := vars.SQLMode.HasStrictMode() + sc.InDeleteStmt = true + errLevels := sc.ErrLevels() + errLevels[errctx.ErrGroupDupKey] = errctx.ResolveErrLevel(false, stmt.IgnoreErr) + errLevels[errctx.ErrGroupBadNull] = errctx.ResolveErrLevel(false, !strictSQLMode || stmt.IgnoreErr) + errLevels[errctx.ErrGroupNoDefault] = errLevels[errctx.ErrGroupBadNull] + errLevels[errctx.ErrGroupDividedByZero] = errctx.ResolveErrLevel( + !vars.SQLMode.HasErrorForDivisionByZeroMode(), + !strictSQLMode || stmt.IgnoreErr, + ) + sc.SetErrLevels(errLevels) + sc.Priority = stmt.Priority + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!strictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || + !strictSQLMode || stmt.IgnoreErr || vars.SQLMode.HasAllowInvalidDatesMode())) +} + +func setOptionForTopSQL(sc *stmtctx.StatementContext, snapshot kv.Snapshot) { + if snapshot == nil { + return + } + // pipelined dml may already flush in background, don't touch it to avoid race. + if txn, ok := snapshot.(kv.Transaction); ok && txn.IsPipelined() { + return + } + snapshot.SetOption(kv.ResourceGroupTagger, sc.GetResourceGroupTagger()) + if sc.KvExecCounter != nil { + snapshot.SetOption(kv.RPCInterceptor, sc.KvExecCounter.RPCInterceptor()) + } +} + +func isWeakConsistencyRead(ctx sessionctx.Context, node ast.Node) bool { + sessionVars := ctx.GetSessionVars() + return sessionVars.ConnectionID > 0 && sessionVars.ReadConsistency.IsWeak() && + plannercore.IsAutoCommitTxn(sessionVars) && plannercore.IsReadOnly(node, sessionVars) +} diff --git a/pkg/executor/test/writetest/write_test.go b/pkg/executor/test/writetest/write_test.go index 07bd89cb9206f..c9afceb3d22c5 100644 --- a/pkg/executor/test/writetest/write_test.go +++ b/pkg/executor/test/writetest/write_test.go @@ -1311,9 +1311,17 @@ func TestIssue18681(t *testing.T) { require.NotNil(t, ld) deleteSQL := "delete from load_data_test" +<<<<<<< HEAD selectSQL := "select bin(a), bin(b), bin(c), bin(d) from load_data_test;" ctx.GetSessionVars().StmtCtx.DupKeyAsWarning = true ctx.GetSessionVars().StmtCtx.BadNullAsWarning = true +======= + selectSQL := "select bin(a), bin(b), bin(c), bin(d), bin(e), bin(f) from load_data_test;" + levels := ctx.GetSessionVars().StmtCtx.ErrLevels() + levels[errctx.ErrGroupDupKey] = errctx.LevelWarn + levels[errctx.ErrGroupBadNull] = errctx.LevelWarn + levels[errctx.ErrGroupNoDefault] = errctx.LevelWarn +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) sc := ctx.GetSessionVars().StmtCtx originIgnoreTruncate := sc.IgnoreTruncate.Load() diff --git a/pkg/expression/builtin_miscellaneous_vec_test.go b/pkg/expression/builtin_miscellaneous_vec_test.go index 91da4c219c536..e7db3278fc0b5 100644 --- a/pkg/expression/builtin_miscellaneous_vec_test.go +++ b/pkg/expression/builtin_miscellaneous_vec_test.go @@ -151,7 +151,14 @@ func TestSleepVectorized(t *testing.T) { warnCnt := counter{} // non-strict model +<<<<<<< HEAD sessVars.StmtCtx.BadNullAsWarning = true +======= + var levels errctx.LevelMap + levels[errctx.ErrGroupBadNull] = errctx.LevelWarn + levels[errctx.ErrGroupNoDefault] = errctx.LevelWarn + sessVars.StmtCtx.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) input.AppendFloat64(0, 1) err = f.vecEvalInt(input, result) require.NoError(t, err) @@ -184,7 +191,13 @@ func TestSleepVectorized(t *testing.T) { require.Equal(t, uint16(warnCnt.add(2)), sessVars.StmtCtx.WarningCount()) // for error case under the strict model +<<<<<<< HEAD sessVars.StmtCtx.BadNullAsWarning = false +======= + levels[errctx.ErrGroupBadNull] = errctx.LevelError + levels[errctx.ErrGroupNoDefault] = errctx.LevelError + sessVars.StmtCtx.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) input.Reset() input.AppendNull(0) err = f.vecEvalInt(input, result) diff --git a/pkg/expression/evaluator_test.go b/pkg/expression/evaluator_test.go index daeefbfacff8b..d5a343fc48afa 100644 --- a/pkg/expression/evaluator_test.go +++ b/pkg/expression/evaluator_test.go @@ -104,7 +104,14 @@ func TestSleep(t *testing.T) { fc := funcs[ast.Sleep] // non-strict model +<<<<<<< HEAD sessVars.StmtCtx.BadNullAsWarning = true +======= + var levels errctx.LevelMap + levels[errctx.ErrGroupBadNull] = errctx.LevelWarn + levels[errctx.ErrGroupNoDefault] = errctx.LevelWarn + sessVars.StmtCtx.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) d := make([]types.Datum, 1) f, err := fc.getFunction(ctx, datumsToConstants(d)) require.NoError(t, err) @@ -121,7 +128,13 @@ func TestSleep(t *testing.T) { require.Equal(t, int64(0), ret) // for error case under the strict model +<<<<<<< HEAD sessVars.StmtCtx.BadNullAsWarning = false +======= + levels[errctx.ErrGroupBadNull] = errctx.LevelError + levels[errctx.ErrGroupNoDefault] = errctx.LevelError + sessVars.StmtCtx.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) d[0].SetNull() _, err = fc.getFunction(ctx, datumsToConstants(d)) require.NoError(t, err) diff --git a/pkg/expression/exprstatic/evalctx_test.go b/pkg/expression/exprstatic/evalctx_test.go new file mode 100644 index 0000000000000..0a792e4ef3e87 --- /dev/null +++ b/pkg/expression/exprstatic/evalctx_test.go @@ -0,0 +1,670 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exprstatic + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/expression/exprctx" + "github.com/pingcap/tidb/pkg/expression/expropt" + infoschema "github.com/pingcap/tidb/pkg/infoschema/context" + "github.com/pingcap/tidb/pkg/parser/auth" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/types" + contextutil "github.com/pingcap/tidb/pkg/util/context" + "github.com/pingcap/tidb/pkg/util/deeptest" + "github.com/stretchr/testify/require" +) + +func TestNewStaticEvalCtx(t *testing.T) { + // default context + prevID := contextutil.GenContextID() + ctx := NewEvalContext() + require.Equal(t, prevID+1, ctx.CtxID()) + checkDefaultStaticEvalCtx(t, ctx) + + // with options + prevID = ctx.CtxID() + options, stateForTest := getEvalCtxOptionsForTest(t) + ctx = NewEvalContext(options...) + require.Equal(t, prevID+1, ctx.CtxID()) + checkOptionsStaticEvalCtx(t, ctx, stateForTest) +} + +func checkDefaultStaticEvalCtx(t *testing.T, ctx *EvalContext) { + mode, err := mysql.GetSQLMode(mysql.DefaultSQLMode) + require.NoError(t, err) + require.Equal(t, mode, ctx.SQLMode()) + require.Same(t, time.UTC, ctx.Location()) + require.Equal(t, types.NewContext(types.StrictFlags, time.UTC, ctx), ctx.TypeCtx()) + require.Equal(t, errctx.NewContextWithLevels(errctx.LevelMap{}, ctx), ctx.ErrCtx()) + require.Equal(t, "", ctx.CurrentDB()) + require.Equal(t, variable.DefMaxAllowedPacket, ctx.GetMaxAllowedPacket()) + require.Equal(t, variable.DefDefaultWeekFormat, ctx.GetDefaultWeekFormatMode()) + require.Equal(t, variable.DefDivPrecisionIncrement, ctx.GetDivPrecisionIncrement()) + require.Empty(t, ctx.AllParamValues()) + require.Equal(t, variable.NewUserVars(), ctx.GetUserVarsReader()) + require.True(t, ctx.GetOptionalPropSet().IsEmpty()) + p, ok := ctx.GetOptionalPropProvider(exprctx.OptPropAdvisoryLock) + require.Nil(t, p) + require.False(t, ok) + + tm, err := ctx.CurrentTime() + require.NoError(t, err) + require.Same(t, time.UTC, tm.Location()) + require.InDelta(t, time.Now().Unix(), tm.Unix(), 5) + + warnHandler, ok := ctx.warnHandler.(*contextutil.StaticWarnHandler) + require.True(t, ok) + require.Equal(t, 0, warnHandler.WarningCount()) +} + +type evalCtxOptionsTestState struct { + now time.Time + loc *time.Location + warnHandler *contextutil.StaticWarnHandler + userVars *variable.UserVars + ddlOwner bool +} + +func getEvalCtxOptionsForTest(t *testing.T) ([]EvalCtxOption, *evalCtxOptionsTestState) { + loc, err := time.LoadLocation("US/Eastern") + require.NoError(t, err) + s := &evalCtxOptionsTestState{ + now: time.Now(), + loc: loc, + warnHandler: contextutil.NewStaticWarnHandler(8), + userVars: variable.NewUserVars(), + } + + provider1 := expropt.CurrentUserPropProvider(func() (*auth.UserIdentity, []*auth.RoleIdentity) { + return &auth.UserIdentity{Username: "user1", Hostname: "host1"}, + []*auth.RoleIdentity{{Username: "role1", Hostname: "host2"}} + }) + + provider2 := expropt.DDLOwnerInfoProvider(func() bool { + return s.ddlOwner + }) + + return []EvalCtxOption{ + WithWarnHandler(s.warnHandler), + WithSQLMode(mysql.ModeNoZeroDate | mysql.ModeStrictTransTables), + WithTypeFlags(types.FlagAllowNegativeToUnsigned | types.FlagSkipASCIICheck), + WithErrLevelMap(errctx.LevelMap{ + errctx.ErrGroupBadNull: errctx.LevelError, + errctx.ErrGroupNoDefault: errctx.LevelError, + errctx.ErrGroupDividedByZero: errctx.LevelWarn, + }), + WithLocation(loc), + WithCurrentDB("db1"), + WithCurrentTime(func() (time.Time, error) { + return s.now, nil + }), + WithMaxAllowedPacket(12345), + WithDefaultWeekFormatMode("3"), + WithDivPrecisionIncrement(5), + WithUserVarsReader(s.userVars), + WithOptionalProperty(provider1, provider2), + }, s +} + +func checkOptionsStaticEvalCtx(t *testing.T, ctx *EvalContext, s *evalCtxOptionsTestState) { + require.Same(t, ctx.warnHandler, s.warnHandler) + require.Equal(t, mysql.ModeNoZeroDate|mysql.ModeStrictTransTables, ctx.SQLMode()) + require.Equal(t, + types.NewContext(types.FlagAllowNegativeToUnsigned|types.FlagSkipASCIICheck, s.loc, ctx), + ctx.TypeCtx(), + ) + require.Equal(t, errctx.NewContextWithLevels(errctx.LevelMap{ + errctx.ErrGroupBadNull: errctx.LevelError, + errctx.ErrGroupNoDefault: errctx.LevelError, + errctx.ErrGroupDividedByZero: errctx.LevelWarn, + }, ctx), ctx.ErrCtx()) + require.Same(t, s.loc, ctx.Location()) + require.Equal(t, "db1", ctx.CurrentDB()) + current, err := ctx.CurrentTime() + require.NoError(t, err) + require.Equal(t, current.UnixNano(), s.now.UnixNano()) + require.Same(t, s.loc, current.Location()) + require.Equal(t, uint64(12345), ctx.GetMaxAllowedPacket()) + require.Equal(t, "3", ctx.GetDefaultWeekFormatMode()) + require.Equal(t, 5, ctx.GetDivPrecisionIncrement()) + require.Same(t, s.userVars, ctx.GetUserVarsReader()) + + var optSet exprctx.OptionalEvalPropKeySet + optSet = optSet.Add(exprctx.OptPropCurrentUser).Add(exprctx.OptPropDDLOwnerInfo) + require.Equal(t, optSet, ctx.GetOptionalPropSet()) + p, ok := ctx.GetOptionalPropProvider(exprctx.OptPropCurrentUser) + require.True(t, ok) + user, roles := p.(expropt.CurrentUserPropProvider)() + require.Equal(t, &auth.UserIdentity{Username: "user1", Hostname: "host1"}, user) + require.Equal(t, []*auth.RoleIdentity{{Username: "role1", Hostname: "host2"}}, roles) + p, ok = ctx.GetOptionalPropProvider(exprctx.OptPropDDLOwnerInfo) + s.ddlOwner = true + require.True(t, ok) + require.True(t, p.(expropt.DDLOwnerInfoProvider)()) + s.ddlOwner = false + require.False(t, p.(expropt.DDLOwnerInfoProvider)()) + p, ok = ctx.GetOptionalPropProvider(exprctx.OptPropInfoSchema) + require.False(t, ok) + require.Nil(t, p) +} + +func TestStaticEvalCtxCurrentTime(t *testing.T) { + loc1, err := time.LoadLocation("US/Eastern") + require.NoError(t, err) + + tm := time.UnixMicro(123456789).In(loc1) + calls := 0 + getTime := func() (time.Time, error) { + defer func() { + calls++ + }() + + if calls < 2 { + return time.Time{}, errors.NewNoStackError(fmt.Sprintf("err%d", calls)) + } + + if calls == 2 { + return tm, nil + } + + return time.Time{}, errors.NewNoStackError("should not reach here") + } + + ctx := NewEvalContext(WithCurrentTime(getTime)) + + // get time for the first two times should fail + got, err := ctx.CurrentTime() + require.EqualError(t, err, "err0") + require.Equal(t, time.Time{}, got) + + got, err = ctx.CurrentTime() + require.EqualError(t, err, "err1") + require.Equal(t, time.Time{}, got) + + // the third time will success + got, err = ctx.CurrentTime() + require.Nil(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, time.UTC, got.Location()) + require.Equal(t, 3, calls) + + // next ctx should cache the time without calling inner function + got, err = ctx.CurrentTime() + require.Nil(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, time.UTC, got.Location()) + require.Equal(t, 3, calls) + + // CurrentTime should have the same location with `ctx.Location()` + loc2, err := time.LoadLocation("Australia/Sydney") + require.NoError(t, err) + ctx = NewEvalContext( + WithLocation(loc2), + WithCurrentTime(func() (time.Time, error) { + return tm, nil + }), + ) + got, err = ctx.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, loc2, got.Location()) + + // Apply should copy the current time + ctx2 := ctx.Apply() + got, err = ctx2.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, loc2, got.Location()) + + // Apply with location should change current time's location + ctx2 = ctx.Apply(WithLocation(loc1)) + got, err = ctx2.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, loc1, got.Location()) + + // Apply will not affect previous current time + got, err = ctx.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, loc2, got.Location()) + + // Apply with a different current time func + ctx2 = ctx.Apply(WithCurrentTime(func() (time.Time, error) { + return time.UnixMicro(987654321), nil + })) + got, err = ctx2.CurrentTime() + require.NoError(t, err) + require.Equal(t, int64(987654321), got.UnixMicro()) + require.Same(t, loc2, got.Location()) + + // Apply will not affect previous current time + got, err = ctx.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, loc2, got.Location()) +} + +func TestStaticEvalCtxWarnings(t *testing.T) { + // default context should have a empty StaticWarningsHandler + ctx := NewEvalContext() + h, ok := ctx.warnHandler.(*contextutil.StaticWarnHandler) + require.True(t, ok) + require.Equal(t, 0, h.WarningCount()) + + // WithWarnHandler should work + ignoreHandler := contextutil.IgnoreWarn + ctx = NewEvalContext(WithWarnHandler(ignoreHandler)) + require.True(t, ctx.warnHandler == ignoreHandler) + + // All contexts should use the same warning handler + h = contextutil.NewStaticWarnHandler(8) + ctx = NewEvalContext(WithWarnHandler(h)) + tc, ec := ctx.TypeCtx(), ctx.ErrCtx() + h.AppendWarning(errors.NewNoStackError("warn0")) + ctx.AppendWarning(errors.NewNoStackError("warn1")) + ctx.AppendNote(errors.NewNoStackError("note1")) + tc.AppendWarning(errors.NewNoStackError("warn2")) + ec.AppendWarning(errors.NewNoStackError("warn3")) + require.Equal(t, 5, h.WarningCount()) + require.Equal(t, h.WarningCount(), ctx.WarningCount()) + + // ctx.CopyWarnings + warnings := ctx.CopyWarnings(nil) + require.Equal(t, []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn0")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn1")}, + {Level: contextutil.WarnLevelNote, Err: errors.NewNoStackError("note1")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn2")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn3")}, + }, warnings) + require.Equal(t, 5, h.WarningCount()) + require.Equal(t, h.WarningCount(), ctx.WarningCount()) + + // ctx.TruncateWarnings + warnings = ctx.TruncateWarnings(2) + require.Equal(t, []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelNote, Err: errors.NewNoStackError("note1")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn2")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn3")}, + }, warnings) + require.Equal(t, 2, h.WarningCount()) + require.Equal(t, h.WarningCount(), ctx.WarningCount()) + warnings = ctx.CopyWarnings(nil) + require.Equal(t, []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn0")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn1")}, + }, warnings) + + // Apply should use the old warning handler by default + ctx2 := ctx.Apply() + require.NotSame(t, ctx, ctx2) + require.True(t, ctx.warnHandler == ctx2.warnHandler) + require.True(t, ctx.warnHandler == h) + + // Apply with `WithWarnHandler` + h2 := contextutil.NewStaticWarnHandler(16) + ctx2 = ctx.Apply(WithWarnHandler(h2)) + require.True(t, ctx2.warnHandler == h2) + require.True(t, ctx.warnHandler == h) + + // The type context and error context should use the new handler. + ctx.TruncateWarnings(0) + tc, ec = ctx.TypeCtx(), ctx.ErrCtx() + tc2, ec2 := ctx2.TypeCtx(), ctx2.ErrCtx() + tc2.AppendWarning(errors.NewNoStackError("warn4")) + ec2.AppendWarning(errors.NewNoStackError("warn5")) + tc.AppendWarning(errors.NewNoStackError("warn6")) + ec.AppendWarning(errors.NewNoStackError("warn7")) + require.Equal(t, []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn4")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn5")}, + }, ctx2.CopyWarnings(nil)) + require.Equal(t, []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn6")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn7")}, + }, ctx.CopyWarnings(nil)) +} + +func TestStaticEvalContextOptionalProps(t *testing.T) { + ctx := NewEvalContext() + require.True(t, ctx.GetOptionalPropSet().IsEmpty()) + + ctx2 := ctx.Apply(WithOptionalProperty( + expropt.CurrentUserPropProvider(func() (u *auth.UserIdentity, r []*auth.RoleIdentity) { return }), + )) + var emptySet exprctx.OptionalEvalPropKeySet + require.Equal(t, emptySet, ctx.GetOptionalPropSet()) + require.Equal(t, emptySet.Add(exprctx.OptPropCurrentUser), ctx2.GetOptionalPropSet()) + + // Apply should override all optional properties + ctx3 := ctx2.Apply(WithOptionalProperty( + expropt.DDLOwnerInfoProvider(func() bool { return true }), + expropt.InfoSchemaPropProvider(func(isDomain bool) infoschema.MetaOnlyInfoSchema { return nil }), + )) + require.Equal(t, + emptySet.Add(exprctx.OptPropDDLOwnerInfo).Add(exprctx.OptPropInfoSchema), + ctx3.GetOptionalPropSet(), + ) + require.Equal(t, emptySet, ctx.GetOptionalPropSet()) + require.Equal(t, emptySet.Add(exprctx.OptPropCurrentUser), ctx2.GetOptionalPropSet()) +} + +func TestUpdateStaticEvalContext(t *testing.T) { + oldCtx := NewEvalContext() + ctx := oldCtx.Apply() + + // Should return a different instance + require.NotSame(t, oldCtx, ctx) + + // CtxID should be different + require.Greater(t, ctx.CtxID(), oldCtx.CtxID()) + + // inner state should not be the same address + require.NotSame(t, &oldCtx.evalCtxState, &ctx.evalCtxState) + + // compare a state object by excluding some changed fields + excludeChangedFields := func(s *evalCtxState) evalCtxState { + state := *s + state.typeCtx = types.DefaultStmtNoWarningContext + state.errCtx = errctx.StrictNoWarningContext + state.currentTime = nil + return state + } + require.Equal(t, excludeChangedFields(&oldCtx.evalCtxState), excludeChangedFields(&ctx.evalCtxState)) + + // check fields + checkDefaultStaticEvalCtx(t, ctx) + + // apply options + opts, optState := getEvalCtxOptionsForTest(t) + ctx2 := oldCtx.Apply(opts...) + require.Greater(t, ctx2.CtxID(), ctx.CtxID()) + checkOptionsStaticEvalCtx(t, ctx2, optState) + + // old ctx aren't affected + checkDefaultStaticEvalCtx(t, oldCtx) + + // create with options + opts, optState = getEvalCtxOptionsForTest(t) + ctx3 := NewEvalContext(opts...) + require.Greater(t, ctx3.CtxID(), ctx2.CtxID()) + checkOptionsStaticEvalCtx(t, ctx3, optState) +} + +func TestParamList(t *testing.T) { + paramList := variable.NewPlanCacheParamList() + paramList.Append(types.NewDatum(1)) + paramList.Append(types.NewDatum(2)) + paramList.Append(types.NewDatum(3)) + ctx := NewEvalContext( + WithParamList(paramList), + ) + for i := 0; i < 3; i++ { + val, err := ctx.GetParamValue(i) + require.NoError(t, err) + require.Equal(t, int64(i+1), val.GetInt64()) + } + + // after reset the paramList and append new one, the value is still persisted + paramList.Reset() + paramList.Append(types.NewDatum(4)) + for i := 0; i < 3; i++ { + val, err := ctx.GetParamValue(i) + require.NoError(t, err) + require.Equal(t, int64(i+1), val.GetInt64()) + } +} + +func TestMakeEvalContextStatic(t *testing.T) { + // This test is to ensure that the `MakeEvalContextStatic` function works as expected. + // It requires the developers to create a special `EvalContext`, whose every fields + // are non-empty. Then, the `MakeEvalContextStatic` function is called to create a new + // clone of it. Finally, the new clone is compared with the original one to ensure that + // the fields are correctly copied. + paramList := variable.NewPlanCacheParamList() + paramList.Append(types.NewDatum(1)) + + userVars := variable.NewUserVars() + userVars.SetUserVarVal("a", types.NewStringDatum("v1")) + userVars.SetUserVarVal("b", types.NewIntDatum(2)) + + provider := expropt.DDLOwnerInfoProvider(func() bool { + return true + }) + + obj := NewEvalContext( + WithWarnHandler(contextutil.NewStaticWarnHandler(16)), + WithSQLMode(mysql.ModeNoZeroDate|mysql.ModeStrictTransTables), + WithTypeFlags(types.FlagAllowNegativeToUnsigned|types.FlagSkipASCIICheck), + WithErrLevelMap(errctx.LevelMap{}), + WithLocation(time.UTC), + WithCurrentDB("db1"), + WithCurrentTime(func() (time.Time, error) { + return time.Now(), nil + }), + WithMaxAllowedPacket(12345), + WithDefaultWeekFormatMode("3"), + WithDivPrecisionIncrement(5), + WithParamList(paramList), + WithUserVarsReader(userVars), + WithOptionalProperty(provider), + WithEnableRedactLog("test"), + ) + obj.AppendWarning(errors.New("test warning")) + + ignorePath := []string{ + "$.evalCtxState.warnHandler.**", + "$.evalCtxState.typeCtx.**", + "$.evalCtxState.errCtx.**", + "$.evalCtxState.currentTime.**", + "$.evalCtxState.userVars.lock", + "$.evalCtxState.props", + "$.id", + } + deeptest.AssertRecursivelyNotEqual(t, obj, NewEvalContext(), + deeptest.WithIgnorePath(ignorePath), + ) + + staticObj := MakeEvalContextStatic(obj) + + deeptest.AssertDeepClonedEqual(t, obj, staticObj, + deeptest.WithIgnorePath(ignorePath), + deeptest.WithPointerComparePath([]string{ + "$.evalCtxState.warnHandler", + "$.evalCtxState.paramList*.b", + }), + ) + + require.Equal(t, obj.GetWarnHandler(), staticObj.GetWarnHandler()) + require.Equal(t, obj.typeCtx.Flags(), staticObj.typeCtx.Flags()) + require.Equal(t, obj.errCtx.LevelMap(), staticObj.errCtx.LevelMap()) + + oldT, err := obj.CurrentTime() + require.NoError(t, err) + newT, err := staticObj.CurrentTime() + require.NoError(t, err) + require.Equal(t, oldT.Unix(), newT.Unix()) + + require.NotEqual(t, obj.GetOptionalPropSet(), staticObj.GetOptionalPropSet()) + // Now, it didn't copy any optional properties. + require.Equal(t, exprctx.OptionalEvalPropKeySet(0), staticObj.GetOptionalPropSet()) +} + +func TestEvalCtxLoadSystemVars(t *testing.T) { + vars := []struct { + name string + val string + field string + assert func(ctx *EvalContext, vars *variable.SessionVars) + }{ + { + name: "time_zone", + val: "Europe/Berlin", + field: "$.typeCtx.loc", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, "Europe/Berlin", ctx.Location().String()) + require.Equal(t, vars.Location().String(), ctx.Location().String()) + }, + }, + { + name: "sql_mode", + val: "ALLOW_INVALID_DATES,ONLY_FULL_GROUP_BY", + field: "$.sqlMode", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, mysql.ModeAllowInvalidDates|mysql.ModeOnlyFullGroupBy, ctx.SQLMode()) + require.Equal(t, vars.SQLMode, ctx.SQLMode()) + }, + }, + { + name: "timestamp", + val: "1234567890.123456", + field: "$.currentTime", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + currentTime, err := ctx.CurrentTime() + require.NoError(t, err) + require.Equal(t, int64(1234567890123456), currentTime.UnixMicro()) + require.Equal(t, vars.Location().String(), currentTime.Location().String()) + }, + }, + { + name: strings.ToUpper("max_allowed_packet"), // test for settings an upper case variable + val: "524288", + field: "$.maxAllowedPacket", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, uint64(524288), ctx.GetMaxAllowedPacket()) + require.Equal(t, vars.MaxAllowedPacket, ctx.GetMaxAllowedPacket()) + }, + }, + { + name: strings.ToUpper("tidb_redact_log"), // test for settings an upper case variable + val: "on", + field: "$.enableRedactLog", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, "ON", ctx.GetTiDBRedactLog()) + require.Equal(t, vars.EnableRedactLog, ctx.GetTiDBRedactLog()) + }, + }, + { + name: "default_week_format", + val: "5", + field: "$.defaultWeekFormatMode", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, "5", ctx.GetDefaultWeekFormatMode()) + mode, ok := vars.GetSystemVar(variable.DefaultWeekFormat) + require.True(t, ok) + require.Equal(t, mode, ctx.GetDefaultWeekFormatMode()) + }, + }, + { + name: "div_precision_increment", + val: "12", + field: "$.divPrecisionIncrement", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, 12, ctx.GetDivPrecisionIncrement()) + require.Equal(t, vars.DivPrecisionIncrement, ctx.GetDivPrecisionIncrement()) + }, + }, + } + + // nonVarRelatedFields means the fields not related to any system variables. + // To make sure that all the variables which affect the context state are covered in the above test list, + // we need to test all inner fields except those in `nonVarRelatedFields` are changed after `LoadSystemVars`. + nonVarRelatedFields := []string{ + "$.warnHandler", + "$.typeCtx.flags", + "$.typeCtx.warnHandler", + "$.errCtx", + "$.currentDB", + "$.paramList", + "$.userVars", + "$.props", + } + + // varsRelatedFields means the fields related to + varsRelatedFields := make([]string, 0, len(vars)) + varsMap := make(map[string]string) + sessionVars := variable.NewSessionVars(nil) + for _, sysVar := range vars { + varsMap[sysVar.name] = sysVar.val + if sysVar.field != "" { + varsRelatedFields = append(varsRelatedFields, sysVar.field) + } + require.NoError(t, sessionVars.SetSystemVar(sysVar.name, sysVar.val)) + } + + defaultEvalCtx := NewEvalContext() + ctx, err := defaultEvalCtx.LoadSystemVars(varsMap) + require.NoError(t, err) + require.Greater(t, ctx.CtxID(), defaultEvalCtx.CtxID()) + + // Check all fields except these in `nonVarRelatedFields` are changed after `LoadSystemVars` to make sure + // all system variables related fields are covered in the test list. + deeptest.AssertRecursivelyNotEqual( + t, + defaultEvalCtx.evalCtxState, + ctx.evalCtxState, + deeptest.WithIgnorePath(nonVarRelatedFields), + deeptest.WithPointerComparePath([]string{"$.currentTime"}), + ) + + // We need to compare the new context again with an empty one to make sure those values are set from sys vars, + // not inherited from the empty go value. + deeptest.AssertRecursivelyNotEqual( + t, + evalCtxState{}, + ctx.evalCtxState, + deeptest.WithIgnorePath(nonVarRelatedFields), + deeptest.WithPointerComparePath([]string{"$.currentTime"}), + ) + + // Check all system vars unrelated fields are not changed after `LoadSystemVars`. + deeptest.AssertDeepClonedEqual( + t, + defaultEvalCtx.evalCtxState, + ctx.evalCtxState, + deeptest.WithIgnorePath(append( + varsRelatedFields, + // Do not check warnHandler in `typeCtx` and `errCtx` because they should be changed to even if + // they are not related to any system variable. + "$.typeCtx.warnHandler", + "$.errCtx.warnHandler", + )), + // LoadSystemVars only does shallow copy for `EvalContext` so we just need to compare the pointers. + deeptest.WithPointerComparePath(nonVarRelatedFields), + ) + + for _, sysVar := range vars { + sysVar.assert(ctx, sessionVars) + } + + // additional check about @@timestamp + // setting to `variable.DefTimestamp` should return the current timestamp + ctx, err = defaultEvalCtx.LoadSystemVars(map[string]string{ + "timestamp": variable.DefTimestamp, + }) + require.NoError(t, err) + tm, err := ctx.CurrentTime() + require.NoError(t, err) + require.InDelta(t, time.Now().Unix(), tm.Unix(), 5) +} diff --git a/pkg/expression/sessionexpr/sessionctx_test.go b/pkg/expression/sessionexpr/sessionctx_test.go new file mode 100644 index 0000000000000..d1ef9d7c64402 --- /dev/null +++ b/pkg/expression/sessionexpr/sessionctx_test.go @@ -0,0 +1,333 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sessionexpr_test + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/expression/exprctx" + "github.com/pingcap/tidb/pkg/expression/expropt" + "github.com/pingcap/tidb/pkg/expression/sessionexpr" + "github.com/pingcap/tidb/pkg/parser/auth" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/privilege" + "github.com/pingcap/tidb/pkg/types" + contextutil "github.com/pingcap/tidb/pkg/util/context" + "github.com/pingcap/tidb/pkg/util/mathutil" + "github.com/pingcap/tidb/pkg/util/mock" + tmock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/tikv/client-go/v2/oracle" +) + +func TestSessionEvalContextBasic(t *testing.T) { + ctx := mock.NewContext() + vars := ctx.GetSessionVars() + sc := vars.StmtCtx + impl := sessionexpr.NewEvalContext(ctx) + require.True(t, impl.GetOptionalPropSet().IsFull()) + + // should contain all the optional properties + for i := 0; i < exprctx.OptPropsCnt; i++ { + provider, ok := impl.GetOptionalPropProvider(exprctx.OptionalEvalPropKey(i)) + require.True(t, ok) + require.NotNil(t, provider) + require.Same(t, exprctx.OptionalEvalPropKey(i).Desc(), provider.Desc()) + } + + ctx.ResetSessionAndStmtTimeZone(time.FixedZone("UTC+11", 11*3600)) + vars.SQLMode = mysql.ModeStrictTransTables | mysql.ModeNoZeroDate + sc.SetTypeFlags(types.FlagIgnoreInvalidDateErr | types.FlagSkipUTF8Check) + sc.SetErrLevels(errctx.LevelMap{ + errctx.ErrGroupDupKey: errctx.LevelWarn, + errctx.ErrGroupBadNull: errctx.LevelIgnore, + errctx.ErrGroupNoDefault: errctx.LevelIgnore, + }) + vars.CurrentDB = "db1" + vars.MaxAllowedPacket = 123456 + + // basic fields + tc, ec := impl.TypeCtx(), sc.ErrCtx() + require.Equal(t, tc, sc.TypeCtx()) + require.Equal(t, ec, impl.ErrCtx()) + require.Equal(t, vars.SQLMode, impl.SQLMode()) + require.Same(t, vars.Location(), impl.Location()) + require.Same(t, sc.TimeZone(), impl.Location()) + require.Same(t, tc.Location(), impl.Location()) + require.Equal(t, "db1", impl.CurrentDB()) + require.Equal(t, uint64(123456), impl.GetMaxAllowedPacket()) + require.Equal(t, "0", impl.GetDefaultWeekFormatMode()) + require.NoError(t, ctx.GetSessionVars().SetSystemVar("default_week_format", "5")) + require.Equal(t, "5", impl.GetDefaultWeekFormatMode()) + require.Same(t, vars.UserVars, impl.GetUserVarsReader()) + + // handle warnings + require.Equal(t, 0, impl.WarningCount()) + impl.AppendWarning(errors.New("err1")) + require.Equal(t, 1, impl.WarningCount()) + tc.AppendWarning(errors.New("err2")) + require.Equal(t, 2, impl.WarningCount()) + ec.AppendWarning(errors.New("err3")) + require.Equal(t, 3, impl.WarningCount()) + + for _, dst := range [][]contextutil.SQLWarn{ + nil, + make([]contextutil.SQLWarn, 1), + make([]contextutil.SQLWarn, 3), + make([]contextutil.SQLWarn, 0, 3), + } { + warnings := impl.CopyWarnings(dst) + require.Equal(t, 3, len(warnings)) + require.Equal(t, contextutil.WarnLevelWarning, warnings[0].Level) + require.Equal(t, contextutil.WarnLevelWarning, warnings[1].Level) + require.Equal(t, contextutil.WarnLevelWarning, warnings[2].Level) + require.Equal(t, "err1", warnings[0].Err.Error()) + require.Equal(t, "err2", warnings[1].Err.Error()) + require.Equal(t, "err3", warnings[2].Err.Error()) + } + + warnings := impl.TruncateWarnings(1) + require.Equal(t, 2, len(warnings)) + require.Equal(t, contextutil.WarnLevelWarning, warnings[0].Level) + require.Equal(t, contextutil.WarnLevelWarning, warnings[1].Level) + require.Equal(t, "err2", warnings[0].Err.Error()) + require.Equal(t, "err3", warnings[1].Err.Error()) + + warnings = impl.TruncateWarnings(0) + require.Equal(t, 1, len(warnings)) + require.Equal(t, contextutil.WarnLevelWarning, warnings[0].Level) + require.Equal(t, "err1", warnings[0].Err.Error()) +} + +func TestSessionEvalContextCurrentTime(t *testing.T) { + ctx := mock.NewContext() + vars := ctx.GetSessionVars() + sc := vars.StmtCtx + impl := sessionexpr.NewEvalContext(ctx) + + var now atomic.Pointer[time.Time] + sc.SetStaleTSOProvider(func() (uint64, error) { + v := time.UnixMilli(123456789) + // should only be called once + require.True(t, now.CompareAndSwap(nil, &v)) + return oracle.GoTimeToTS(v), nil + }) + + // now should return the stable TSO if set + tm, err := impl.CurrentTime() + require.NoError(t, err) + v := now.Load() + require.NotNil(t, v) + require.Equal(t, v.UnixNano(), tm.UnixNano()) + + // The second call should return the same value + tm, err = impl.CurrentTime() + require.NoError(t, err) + require.Equal(t, v.UnixNano(), tm.UnixNano()) + + // now should return the system variable if "timestamp" is set + sc.SetStaleTSOProvider(nil) + sc.Reset() + require.NoError(t, vars.SetSystemVar("timestamp", "7654321.875")) + tm, err = impl.CurrentTime() + require.NoError(t, err) + require.Equal(t, int64(7654321_875_000_000), tm.UnixNano()) + + // The second call should return the same value + tm, err = impl.CurrentTime() + require.NoError(t, err) + require.Equal(t, int64(7654321_875_000_000), tm.UnixNano()) + + // now should return the system current time if not stale TSO or "timestamp" is set + require.NoError(t, vars.SetSystemVar("timestamp", "0")) + sc.Reset() + tm, err = impl.CurrentTime() + require.NoError(t, err) + require.InDelta(t, time.Now().Unix(), tm.Unix(), 5) + + // The second call should return the same value + tm2, err := impl.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), tm2.UnixNano()) +} + +type mockPrivManager struct { + tmock.Mock + privilege.Manager +} + +func (m *mockPrivManager) RequestVerification( + activeRole []*auth.RoleIdentity, db, table, column string, priv mysql.PrivilegeType, +) bool { + return m.Called(activeRole, db, table, column, priv).Bool(0) +} + +func (m *mockPrivManager) RequestDynamicVerification( + activeRoles []*auth.RoleIdentity, privName string, grantable bool, +) bool { + return m.Called(activeRoles, privName, grantable).Bool(0) +} + +func TestSessionEvalContextPrivilegeCheck(t *testing.T) { + ctx := mock.NewContext() + impl := sessionexpr.NewEvalContext(ctx) + activeRoles := []*auth.RoleIdentity{ + {Username: "role1", Hostname: "host1"}, + {Username: "role2", Hostname: "host2"}, + } + ctx.GetSessionVars().ActiveRoles = activeRoles + + // no privilege manager should always return true for privilege check + privilege.BindPrivilegeManager(ctx, nil) + require.True(t, impl.RequestVerification("test", "tbl1", "col1", mysql.SuperPriv)) + require.True(t, impl.RequestDynamicVerification("RESTRICTED_TABLES_ADMIN", true)) + require.True(t, impl.RequestDynamicVerification("RESTRICTED_TABLES_ADMIN", false)) + + // if privilege manager bound, it should return the privilege manager value + mgr := &mockPrivManager{} + privilege.BindPrivilegeManager(ctx, mgr) + mgr.On("RequestVerification", activeRoles, "db1", "t1", "c1", mysql.CreatePriv). + Return(true).Once() + require.True(t, impl.RequestVerification("db1", "t1", "c1", mysql.CreatePriv)) + mgr.AssertExpectations(t) + + mgr.On("RequestVerification", activeRoles, "db2", "t2", "c2", mysql.SuperPriv). + Return(false).Once() + require.False(t, impl.RequestVerification("db2", "t2", "c2", mysql.SuperPriv)) + mgr.AssertExpectations(t) + + mgr.On("RequestDynamicVerification", activeRoles, "RESTRICTED_USER_ADMIN", false). + Return(true).Once() + require.True(t, impl.RequestDynamicVerification("RESTRICTED_USER_ADMIN", false)) + + mgr.On("RequestDynamicVerification", activeRoles, "RESTRICTED_CONNECTION_ADMIN", true). + Return(false).Once() + require.False(t, impl.RequestDynamicVerification("RESTRICTED_CONNECTION_ADMIN", true)) +} + +func getProvider[T exprctx.OptionalEvalPropProvider]( + t *testing.T, + impl *sessionexpr.EvalContext, + key exprctx.OptionalEvalPropKey, +) T { + val, ok := impl.GetOptionalPropProvider(key) + require.True(t, ok) + p, ok := val.(T) + require.True(t, ok) + require.Equal(t, key, p.Desc().Key()) + return p +} + +func TestSessionEvalContextOptProps(t *testing.T) { + ctx := mock.NewContext() + impl := sessionexpr.NewEvalContext(ctx) + + // test for OptPropCurrentUser + ctx.GetSessionVars().User = &auth.UserIdentity{Username: "user1", Hostname: "host1"} + ctx.GetSessionVars().ActiveRoles = []*auth.RoleIdentity{ + {Username: "role1", Hostname: "host1"}, + {Username: "role2", Hostname: "host2"}, + } + user, roles := getProvider[expropt.CurrentUserPropProvider](t, impl, exprctx.OptPropCurrentUser)() + require.Equal(t, ctx.GetSessionVars().User, user) + require.Equal(t, ctx.GetSessionVars().ActiveRoles, roles) + + // test for OptPropSessionVars + sessVarsProvider := getProvider[*expropt.SessionVarsPropProvider](t, impl, exprctx.OptPropSessionVars) + require.NotNil(t, sessVarsProvider) + gotVars, err := expropt.SessionVarsPropReader{}.GetSessionVars(impl) + require.NoError(t, err) + require.Same(t, ctx.GetSessionVars(), gotVars) + + // test for OptPropAdvisoryLock + lockProvider := getProvider[*expropt.AdvisoryLockPropProvider](t, impl, exprctx.OptPropAdvisoryLock) + gotCtx, ok := lockProvider.AdvisoryLockContext.(*mock.Context) + require.True(t, ok) + require.Same(t, ctx, gotCtx) + + // test for OptPropDDLOwnerInfo + ddlInfoProvider := getProvider[expropt.DDLOwnerInfoProvider](t, impl, exprctx.OptPropDDLOwnerInfo) + require.False(t, ddlInfoProvider()) + ctx.SetIsDDLOwner(true) + require.True(t, ddlInfoProvider()) + + // test for OptPropPrivilegeChecker + privCheckerProvider := getProvider[expropt.PrivilegeCheckerProvider](t, impl, exprctx.OptPropPrivilegeChecker) + privChecker := privCheckerProvider() + require.NotNil(t, privChecker) + require.Same(t, impl, privChecker) +} + +func TestSessionBuildContext(t *testing.T) { + ctx := mock.NewContext() + impl := sessionexpr.NewExprContext(ctx) + evalCtx, ok := impl.GetEvalCtx().(*sessionexpr.EvalContext) + require.True(t, ok) + require.Same(t, evalCtx, impl.EvalContext) + require.True(t, evalCtx.GetOptionalPropSet().IsFull()) + require.Same(t, ctx, evalCtx.Sctx()) + + // charset and collation + vars := ctx.GetSessionVars() + err := vars.SetSystemVar("character_set_connection", "gbk") + require.NoError(t, err) + err = vars.SetSystemVar("collation_connection", "gbk_chinese_ci") + require.NoError(t, err) + vars.DefaultCollationForUTF8MB4 = "utf8mb4_0900_ai_ci" + + charset, collate := impl.GetCharsetInfo() + require.Equal(t, "gbk", charset) + require.Equal(t, "gbk_chinese_ci", collate) + require.Equal(t, "utf8mb4_0900_ai_ci", impl.GetDefaultCollationForUTF8MB4()) + + // SysdateIsNow + vars.SysdateIsNow = true + require.True(t, impl.GetSysdateIsNow()) + + // NoopFuncsMode + vars.NoopFuncsMode = 2 + require.Equal(t, 2, impl.GetNoopFuncsMode()) + + // Rng + vars.Rng = mathutil.NewWithSeed(123) + require.Same(t, vars.Rng, impl.Rng()) + + // PlanCache + vars.StmtCtx.EnablePlanCache() + require.True(t, impl.IsUseCache()) + impl.SetSkipPlanCache("mockReason") + require.False(t, impl.IsUseCache()) + + // Alloc column id + prevID := vars.PlanColumnID.Load() + colID := impl.AllocPlanColumnID() + require.Equal(t, colID, prevID+1) + colID = impl.AllocPlanColumnID() + require.Equal(t, colID, prevID+2) + vars.AllocPlanColumnID() + colID = impl.AllocPlanColumnID() + require.Equal(t, colID, prevID+4) + + // InNullRejectCheck + require.False(t, impl.IsInNullRejectCheck()) + + // ConnID + vars.ConnectionID = 123 + require.Equal(t, uint64(123), impl.ConnectionID()) +} diff --git a/pkg/lightning/backend/kv/context.go b/pkg/lightning/backend/kv/context.go new file mode 100644 index 0000000000000..14c1963723c21 --- /dev/null +++ b/pkg/lightning/backend/kv/context.go @@ -0,0 +1,255 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "maps" + "math/rand" + "sync" + "time" + + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/expression/exprctx" + "github.com/pingcap/tidb/pkg/expression/exprstatic" + "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/table" + "github.com/pingcap/tidb/pkg/table/tblctx" + "github.com/pingcap/tidb/pkg/types" + contextutil "github.com/pingcap/tidb/pkg/util/context" + "github.com/pingcap/tidb/pkg/util/intest" + "github.com/pingcap/tidb/pkg/util/timeutil" +) + +var _ exprctx.ExprContext = &litExprContext{} + +// litExprContext implements the `exprctx.ExprContext` interface for lightning import. +// It provides the context to build and evaluate expressions, furthermore, it allows to set user variables +// for `IMPORT INTO ...` statements. +type litExprContext struct { + *exprstatic.ExprContext + userVars *variable.UserVars +} + +// NewExpressionContext creates a new `*ExprContext` for lightning import. +func newLitExprContext(sqlMode mysql.SQLMode, sysVars map[string]string, timestamp int64) (*litExprContext, error) { + flags := types.DefaultStmtFlags. + WithTruncateAsWarning(!sqlMode.HasStrictMode()). + WithIgnoreInvalidDateErr(sqlMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!sqlMode.HasStrictMode() || sqlMode.HasAllowInvalidDatesMode() || + !sqlMode.HasNoZeroInDateMode() || !sqlMode.HasNoZeroDateMode()) + + errLevels := stmtctx.DefaultStmtErrLevels + errLevels[errctx.ErrGroupTruncate] = errctx.ResolveErrLevel(flags.IgnoreTruncateErr(), flags.TruncateAsWarning()) + errLevels[errctx.ErrGroupBadNull] = errctx.ResolveErrLevel(false, !sqlMode.HasStrictMode()) + errLevels[errctx.ErrGroupNoDefault] = errctx.ResolveErrLevel(false, !sqlMode.HasStrictMode()) + errLevels[errctx.ErrGroupDividedByZero] = + errctx.ResolveErrLevel(!sqlMode.HasErrorForDivisionByZeroMode(), !sqlMode.HasStrictMode()) + + userVars := variable.NewUserVars() + evalCtx := exprstatic.NewEvalContext( + exprstatic.WithSQLMode(sqlMode), + exprstatic.WithTypeFlags(flags), + exprstatic.WithLocation(timeutil.SystemLocation()), + exprstatic.WithErrLevelMap(errLevels), + exprstatic.WithUserVarsReader(userVars), + ) + + // no need to build as plan cache. + planCacheTracker := contextutil.NewPlanCacheTracker(contextutil.IgnoreWarn) + intest.Assert(!planCacheTracker.UseCache()) + ctx := exprstatic.NewExprContext( + exprstatic.WithEvalCtx(evalCtx), + exprstatic.WithPlanCacheTracker(&planCacheTracker), + ) + + if len(sysVars) > 0 { + var err error + ctx, err = ctx.LoadSystemVars(sysVars) + if err != nil { + return nil, err + } + evalCtx = ctx.GetStaticEvalCtx() + } + + currentTime := func() (time.Time, error) { return time.Now(), nil } + if timestamp > 0 { + currentTime = func() (time.Time, error) { return time.Unix(timestamp, 0), nil } + } + + evalCtx = evalCtx.Apply(exprstatic.WithCurrentTime(currentTime)) + ctx = ctx.Apply(exprstatic.WithEvalCtx(evalCtx)) + + return &litExprContext{ + ExprContext: ctx, + userVars: userVars, + }, nil +} + +// setUserVarVal sets the value of a user variable. +func (ctx *litExprContext) setUserVarVal(name string, dt types.Datum) { + ctx.userVars.SetUserVarVal(name, dt) +} + +// UnsetUserVar unsets a user variable. +func (ctx *litExprContext) unsetUserVar(varName string) { + ctx.userVars.UnsetUserVar(varName) +} + +var _ table.MutateContext = &litTableMutateContext{} + +// litTableMutateContext implements the `table.MutateContext` interface for lightning import. +type litTableMutateContext struct { + exprCtx *litExprContext + encodingConfig tblctx.RowEncodingConfig + mutateBuffers *tblctx.MutateBuffers + shardID *variable.RowIDShardGenerator + reservedRowIDAlloc stmtctx.ReservedRowIDAlloc + enableMutationChecker bool + assertionLevel variable.AssertionLevel + tableDelta struct { + sync.Mutex + // tblID -> (colID -> deltaSize) + m map[int64]map[int64]int64 + } +} + +// AlternativeAllocators implements the `table.MutateContext` interface. +func (*litTableMutateContext) AlternativeAllocators(*model.TableInfo) (autoid.Allocators, bool) { + // lightning does not support temporary tables, so we don't need to provide alternative allocators. + return autoid.Allocators{}, false +} + +// GetExprCtx implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetExprCtx() exprctx.ExprContext { + return ctx.exprCtx +} + +// ConnectionID implements the `table.MutateContext` interface. +func (*litTableMutateContext) ConnectionID() uint64 { + // Just return 0 because lightning import does not in any connection. + return 0 +} + +// InRestrictedSQL implements the `table.MutateContext` interface. +func (*litTableMutateContext) InRestrictedSQL() bool { + // Just return false because lightning import does not in any SQL. + return false +} + +// TxnAssertionLevel implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) TxnAssertionLevel() variable.AssertionLevel { + return ctx.assertionLevel +} + +// EnableMutationChecker implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) EnableMutationChecker() bool { + return ctx.enableMutationChecker +} + +// GetRowEncodingConfig implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetRowEncodingConfig() tblctx.RowEncodingConfig { + return ctx.encodingConfig +} + +// GetMutateBuffers implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetMutateBuffers() *tblctx.MutateBuffers { + return ctx.mutateBuffers +} + +// GetRowIDShardGenerator implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetRowIDShardGenerator() *variable.RowIDShardGenerator { + return ctx.shardID +} + +// GetReservedRowIDAlloc implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetReservedRowIDAlloc() (*stmtctx.ReservedRowIDAlloc, bool) { + return &ctx.reservedRowIDAlloc, true +} + +// GetStatisticsSupport implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetStatisticsSupport() (tblctx.StatisticsSupport, bool) { + return ctx, true +} + +// UpdatePhysicalTableDelta implements the `table.StatisticsSupport` interface. +func (ctx *litTableMutateContext) UpdatePhysicalTableDelta( + physicalTableID int64, _ int64, + _ int64, cols variable.DeltaCols, +) { + ctx.tableDelta.Lock() + defer ctx.tableDelta.Unlock() + if ctx.tableDelta.m == nil { + ctx.tableDelta.m = make(map[int64]map[int64]int64) + } + tableMap := ctx.tableDelta.m + colSize := tableMap[physicalTableID] + tableMap[physicalTableID] = cols.UpdateColSizeMap(colSize) +} + +// GetColumnSize returns the colum size map (colID -> deltaSize) for the given table ID. +func (ctx *litTableMutateContext) GetColumnSize(tblID int64) (ret map[int64]int64) { + ctx.tableDelta.Lock() + defer ctx.tableDelta.Unlock() + return maps.Clone(ctx.tableDelta.m[tblID]) +} + +// GetCachedTableSupport implements the `table.MutateContext` interface. +func (*litTableMutateContext) GetCachedTableSupport() (tblctx.CachedTableSupport, bool) { + // lightning import does not support cached table. + return nil, false +} + +func (*litTableMutateContext) GetTemporaryTableSupport() (tblctx.TemporaryTableSupport, bool) { + // lightning import does not support temporary table. + return nil, false +} + +func (*litTableMutateContext) GetExchangePartitionDMLSupport() (tblctx.ExchangePartitionDMLSupport, bool) { + // lightning import is not in a DML query, we do not need to support it. + return nil, false +} + +// newLitTableMutateContext creates a new `*litTableMutateContext` for lightning import. +func newLitTableMutateContext(exprCtx *litExprContext, sysVars map[string]string) (*litTableMutateContext, error) { + intest.AssertNotNil(exprCtx) + sessVars := variable.NewSessionVars(nil) + for k, v := range sysVars { + if err := sessVars.SetSystemVar(k, v); err != nil { + return nil, err + } + } + + return &litTableMutateContext{ + exprCtx: exprCtx, + encodingConfig: tblctx.RowEncodingConfig{ + IsRowLevelChecksumEnabled: sessVars.IsRowLevelChecksumEnabled(), + RowEncoder: &sessVars.RowEncoder, + }, + mutateBuffers: tblctx.NewMutateBuffers(sessVars.GetWriteStmtBufs()), + // Though the row ID is generated by lightning itself, and `GetRowIDShardGenerator` is useless, + // still return a valid object to make the context complete and avoid some potential panic + // if there are some changes in the future. + shardID: variable.NewRowIDShardGenerator( + rand.New(rand.NewSource(time.Now().UnixNano())), // #nosec G404 + int(sessVars.ShardAllocateStep), + ), + enableMutationChecker: sessVars.EnableMutationChecker, + assertionLevel: sessVars.AssertionLevel, + }, nil +} diff --git a/pkg/lightning/backend/kv/context_test.go b/pkg/lightning/backend/kv/context_test.go new file mode 100644 index 0000000000000..bb04a4f3d5bbe --- /dev/null +++ b/pkg/lightning/backend/kv/context_test.go @@ -0,0 +1,315 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "strconv" + "strings" + "testing" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/expression/exprctx" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/table/tblctx" + "github.com/pingcap/tidb/pkg/types" + contextutil "github.com/pingcap/tidb/pkg/util/context" + "github.com/pingcap/tidb/pkg/util/rowcodec" + "github.com/pingcap/tidb/pkg/util/timeutil" + "github.com/stretchr/testify/require" +) + +func TestLitExprContext(t *testing.T) { + cases := []struct { + sqlMode mysql.SQLMode + sysVars map[string]string + timestamp int64 + checkFlags types.Flags + checkErrLevel errctx.LevelMap + check func(types.Flags, errctx.LevelMap) + }{ + { + sqlMode: mysql.ModeNone, + timestamp: 1234567, + checkFlags: types.DefaultStmtFlags | types.FlagTruncateAsWarning | types.FlagIgnoreZeroInDateErr, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelWarn + m[errctx.ErrGroupBadNull] = errctx.LevelWarn + m[errctx.ErrGroupNoDefault] = errctx.LevelWarn + m[errctx.ErrGroupDividedByZero] = errctx.LevelIgnore + return m + }(), + sysVars: map[string]string{ + "max_allowed_packet": "10240", + "div_precision_increment": "5", + "time_zone": "Europe/Berlin", + "default_week_format": "2", + "block_encryption_mode": "aes-128-ofb", + "group_concat_max_len": "2048", + }, + }, + { + sqlMode: mysql.ModeStrictTransTables | mysql.ModeNoZeroDate | mysql.ModeNoZeroInDate | + mysql.ModeErrorForDivisionByZero, + checkFlags: types.DefaultStmtFlags, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelError + m[errctx.ErrGroupBadNull] = errctx.LevelError + m[errctx.ErrGroupNoDefault] = errctx.LevelError + m[errctx.ErrGroupDividedByZero] = errctx.LevelError + return m + }(), + }, + { + sqlMode: mysql.ModeNoZeroDate | mysql.ModeNoZeroInDate | mysql.ModeErrorForDivisionByZero, + checkFlags: types.DefaultStmtFlags | types.FlagTruncateAsWarning | types.FlagIgnoreZeroInDateErr, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelWarn + m[errctx.ErrGroupBadNull] = errctx.LevelWarn + m[errctx.ErrGroupNoDefault] = errctx.LevelWarn + m[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + return m + }(), + }, + { + sqlMode: mysql.ModeStrictTransTables | mysql.ModeNoZeroInDate, + checkFlags: types.DefaultStmtFlags | types.FlagIgnoreZeroInDateErr, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelError + m[errctx.ErrGroupBadNull] = errctx.LevelError + m[errctx.ErrGroupNoDefault] = errctx.LevelError + m[errctx.ErrGroupDividedByZero] = errctx.LevelIgnore + return m + }(), + }, + { + sqlMode: mysql.ModeStrictTransTables | mysql.ModeNoZeroDate, + checkFlags: types.DefaultStmtFlags | types.FlagIgnoreZeroInDateErr, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelError + m[errctx.ErrGroupBadNull] = errctx.LevelError + m[errctx.ErrGroupNoDefault] = errctx.LevelError + m[errctx.ErrGroupDividedByZero] = errctx.LevelIgnore + return m + }(), + }, + { + sqlMode: mysql.ModeStrictTransTables | mysql.ModeAllowInvalidDates, + checkFlags: types.DefaultStmtFlags | types.FlagIgnoreZeroInDateErr | types.FlagIgnoreInvalidDateErr, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelError + m[errctx.ErrGroupBadNull] = errctx.LevelError + m[errctx.ErrGroupNoDefault] = errctx.LevelError + m[errctx.ErrGroupDividedByZero] = errctx.LevelIgnore + return m + }(), + }, + } + + for i, c := range cases { + t.Run("case-"+strconv.Itoa(i), func(t *testing.T) { + ctx, err := newLitExprContext(c.sqlMode, c.sysVars, c.timestamp) + require.NoError(t, err) + evalCtx := ctx.GetEvalCtx() + require.Equal(t, c.sqlMode, evalCtx.SQLMode()) + tc, ec := evalCtx.TypeCtx(), evalCtx.ErrCtx() + require.Same(t, evalCtx.Location(), tc.Location()) + require.Equal(t, c.checkFlags, tc.Flags()) + require.Equal(t, c.checkErrLevel, ec.LevelMap()) + + // shares the same warning handler + warns := []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelWarning, Err: errors.New("mockErr1")}, + {Level: contextutil.WarnLevelWarning, Err: errors.New("mockErr2")}, + {Level: contextutil.WarnLevelWarning, Err: errors.New("mockErr3")}, + } + require.Equal(t, 0, evalCtx.WarningCount()) + evalCtx.AppendWarning(warns[0].Err) + tc.AppendWarning(warns[1].Err) + ec.AppendWarning(warns[2].Err) + require.Equal(t, warns, evalCtx.CopyWarnings(nil)) + + // system vars + timeZone := "SYSTEM" + expectedMaxAllowedPacket := variable.DefMaxAllowedPacket + expectedDivPrecisionInc := variable.DefDivPrecisionIncrement + expectedDefaultWeekFormat := variable.DefDefaultWeekFormat + expectedBlockEncryptionMode := variable.DefBlockEncryptionMode + expectedGroupConcatMaxLen := variable.DefGroupConcatMaxLen + for k, v := range c.sysVars { + switch strings.ToLower(k) { + case "time_zone": + timeZone = v + case "max_allowed_packet": + expectedMaxAllowedPacket, err = strconv.ParseUint(v, 10, 64) + case "div_precision_increment": + expectedDivPrecisionInc, err = strconv.Atoi(v) + case "default_week_format": + expectedDefaultWeekFormat = v + case "block_encryption_mode": + expectedBlockEncryptionMode = v + case "group_concat_max_len": + expectedGroupConcatMaxLen, err = strconv.ParseUint(v, 10, 64) + } + require.NoError(t, err) + } + if strings.ToLower(timeZone) == "system" { + require.Same(t, timeutil.SystemLocation(), evalCtx.Location()) + } else { + require.Equal(t, timeZone, evalCtx.Location().String()) + } + require.Equal(t, expectedMaxAllowedPacket, evalCtx.GetMaxAllowedPacket()) + require.Equal(t, expectedDivPrecisionInc, evalCtx.GetDivPrecisionIncrement()) + require.Equal(t, expectedDefaultWeekFormat, evalCtx.GetDefaultWeekFormatMode()) + require.Equal(t, expectedBlockEncryptionMode, ctx.GetBlockEncryptionMode()) + require.Equal(t, expectedGroupConcatMaxLen, ctx.GetGroupConcatMaxLen()) + + now := time.Now() + tm, err := evalCtx.CurrentTime() + require.NoError(t, err) + require.Same(t, evalCtx.Location(), tm.Location()) + if c.timestamp == 0 { + // timestamp == 0 means use the current time. + require.InDelta(t, now.Unix(), tm.Unix(), 2) + } else { + require.Equal(t, c.timestamp*1000000000, tm.UnixNano()) + } + // CurrentTime returns the same value + tm2, err := evalCtx.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.Nanosecond(), tm2.Nanosecond()) + require.Same(t, tm.Location(), tm2.Location()) + + // currently we don't support optional properties + require.Equal(t, exprctx.OptionalEvalPropKeySet(0), evalCtx.GetOptionalPropSet()) + // not build for plan cache + require.False(t, ctx.IsUseCache()) + // rng not nil + require.NotNil(t, ctx.Rng()) + // ConnectionID + require.Equal(t, uint64(0), ctx.ConnectionID()) + // user vars + userVars := evalCtx.GetUserVarsReader() + _, ok := userVars.GetUserVarVal("a") + require.False(t, ok) + ctx.setUserVarVal("a", types.NewIntDatum(123)) + d, ok := userVars.GetUserVarVal("a") + require.True(t, ok) + require.Equal(t, types.NewIntDatum(123), d) + ctx.unsetUserVar("a") + _, ok = userVars.GetUserVarVal("a") + require.False(t, ok) + }) + } +} + +func TestLitTableMutateContext(t *testing.T) { + exprCtx, err := newLitExprContext(mysql.ModeNone, nil, 0) + require.NoError(t, err) + + checkCommon := func(t *testing.T, tblCtx *litTableMutateContext) { + require.Same(t, exprCtx, tblCtx.GetExprCtx()) + _, ok := tblCtx.AlternativeAllocators(&model.TableInfo{ID: 1}) + require.False(t, ok) + require.Equal(t, uint64(0), tblCtx.ConnectionID()) + require.Equal(t, tblCtx.GetExprCtx().ConnectionID(), tblCtx.ConnectionID()) + require.False(t, tblCtx.InRestrictedSQL()) + require.NotNil(t, tblCtx.GetMutateBuffers()) + require.NotNil(t, tblCtx.GetMutateBuffers().GetWriteStmtBufs()) + alloc, ok := tblCtx.GetReservedRowIDAlloc() + require.True(t, ok) + require.NotNil(t, alloc) + require.Equal(t, &stmtctx.ReservedRowIDAlloc{}, alloc) + require.True(t, alloc.Exhausted()) + _, ok = tblCtx.GetCachedTableSupport() + require.False(t, ok) + _, ok = tblCtx.GetTemporaryTableSupport() + require.False(t, ok) + stats, ok := tblCtx.GetStatisticsSupport() + require.True(t, ok) + // test for `UpdatePhysicalTableDelta` and `GetColumnSize` + stats.UpdatePhysicalTableDelta(123, 5, 2, variable.DeltaColsMap{1: 2, 3: 4}) + r := tblCtx.GetColumnSize(123) + require.Equal(t, map[int64]int64{1: 2, 3: 4}, r) + stats.UpdatePhysicalTableDelta(123, 8, 2, variable.DeltaColsMap{3: 5, 4: 3}) + r = tblCtx.GetColumnSize(123) + require.Equal(t, map[int64]int64{1: 2, 3: 9, 4: 3}, r) + // the result should be a cloned value + r[1] = 100 + require.Equal(t, map[int64]int64{1: 2, 3: 9, 4: 3}, tblCtx.GetColumnSize(123)) + // test gets a non-existed table + require.Empty(t, tblCtx.GetColumnSize(456)) + } + + // test for default + tblCtx, err := newLitTableMutateContext(exprCtx, nil) + require.NoError(t, err) + checkCommon(t, tblCtx) + require.Equal(t, variable.AssertionLevelOff, tblCtx.TxnAssertionLevel()) + require.Equal(t, variable.DefTiDBEnableMutationChecker, tblCtx.EnableMutationChecker()) + require.False(t, tblCtx.EnableMutationChecker()) + require.Equal(t, tblctx.RowEncodingConfig{ + IsRowLevelChecksumEnabled: false, + RowEncoder: &rowcodec.Encoder{Enable: false}, + }, tblCtx.GetRowEncodingConfig()) + g := tblCtx.GetRowIDShardGenerator() + require.NotNil(t, g) + require.Equal(t, variable.DefTiDBShardAllocateStep, g.GetShardStep()) + + // test for load vars + sysVars := map[string]string{ + "tidb_txn_assertion_level": "STRICT", + "tidb_enable_mutation_checker": "ON", + "tidb_row_format_version": "2", + "tidb_shard_allocate_step": "1234567", + } + tblCtx, err = newLitTableMutateContext(exprCtx, sysVars) + require.NoError(t, err) + checkCommon(t, tblCtx) + require.Equal(t, variable.AssertionLevelStrict, tblCtx.TxnAssertionLevel()) + require.True(t, tblCtx.EnableMutationChecker()) + require.Equal(t, tblctx.RowEncodingConfig{ + IsRowLevelChecksumEnabled: false, + RowEncoder: &rowcodec.Encoder{Enable: true}, + }, tblCtx.GetRowEncodingConfig()) + g = tblCtx.GetRowIDShardGenerator() + require.NotNil(t, g) + require.NotEqual(t, variable.DefTiDBShardAllocateStep, g.GetShardStep()) + require.Equal(t, 1234567, g.GetShardStep()) + + // test for `RowEncodingConfig.IsRowLevelChecksumEnabled` which should be loaded from global variable. + require.False(t, variable.EnableRowLevelChecksum.Load()) + defer variable.EnableRowLevelChecksum.Store(false) + variable.EnableRowLevelChecksum.Store(true) + sysVars = map[string]string{ + "tidb_row_format_version": "2", + } + tblCtx, err = newLitTableMutateContext(exprCtx, sysVars) + require.NoError(t, err) + require.Equal(t, tblctx.RowEncodingConfig{ + IsRowLevelChecksumEnabled: true, + RowEncoder: &rowcodec.Encoder{Enable: true}, + }, tblCtx.GetRowEncodingConfig()) +} diff --git a/pkg/lightning/errormanager/resolveconflict_test.go b/pkg/lightning/errormanager/resolveconflict_test.go new file mode 100644 index 0000000000000..7dee78af9a715 --- /dev/null +++ b/pkg/lightning/errormanager/resolveconflict_test.go @@ -0,0 +1,848 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errormanager_test + +import ( + "bytes" + "context" + "database/sql/driver" + "fmt" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/pingcap/tidb/pkg/ddl" + "github.com/pingcap/tidb/pkg/lightning/backend/encode" + tidbkv "github.com/pingcap/tidb/pkg/lightning/backend/kv" + "github.com/pingcap/tidb/pkg/lightning/config" + "github.com/pingcap/tidb/pkg/lightning/errormanager" + "github.com/pingcap/tidb/pkg/lightning/log" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser" + "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/table/tables" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/mock" + "github.com/stretchr/testify/require" + tikverr "github.com/tikv/client-go/v2/error" + "go.uber.org/atomic" +) + +func TestReplaceConflictMultipleKeysNonclusteredPk(t *testing.T) { + p := parser.New() + node, _, err := p.ParseSQL("create table a (a int primary key nonclustered, b int not null, c int not null, d text, key key_b(b), key key_c(c));") + require.NoError(t, err) + mockSctx := mock.NewContext() + mockSctx.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOff + info, err := ddl.MockTableInfo(mockSctx, node[0].(*ast.CreateTableStmt), 108) + require.NoError(t, err) + info.State = model.StatePublic + require.False(t, info.PKIsHandle) + tbl, err := tables.TableFromMeta(tidbkv.NewPanickingAllocators(info.SepAutoInc()), info) + require.NoError(t, err) + require.False(t, tbl.Meta().HasClusteredIndex()) + + sessionOpts := encode.SessionOptions{ + SQLMode: mysql.ModeStrictAllTables, + Timestamp: 1234567890, + } + + encoder, err := tidbkv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: log.L(), + }) + require.NoError(t, err) + encoder.SessionCtx.GetTableCtx().GetRowEncodingConfig().RowEncoder.Enable = true + + data1 := []types.Datum{ + types.NewIntDatum(1), + types.NewIntDatum(1), + types.NewIntDatum(1), + types.NewStringDatum("1.csv"), + types.NewIntDatum(1), + } + data2 := []types.Datum{ + types.NewIntDatum(1), + types.NewIntDatum(1), + types.NewIntDatum(2), + types.NewStringDatum("2.csv"), + types.NewIntDatum(2), + } + data3 := []types.Datum{ + types.NewIntDatum(2), + types.NewIntDatum(2), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + types.NewIntDatum(3), + } + data4 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + types.NewIntDatum(4), + } + data5 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewStringDatum("4.csv"), + types.NewIntDatum(5), + } + data6 := []types.Datum{ + types.NewIntDatum(4), + types.NewIntDatum(4), + types.NewIntDatum(4), + types.NewStringDatum("4.csv"), + types.NewIntDatum(6), + } + data7 := []types.Datum{ + types.NewIntDatum(5), + types.NewIntDatum(4), + types.NewIntDatum(5), + types.NewStringDatum("5.csv"), + types.NewIntDatum(7), + } + _, err = encoder.AddRecord(data1) + require.NoError(t, err) + _, err = encoder.AddRecord(data2) + require.NoError(t, err) + _, err = encoder.AddRecord(data3) + require.NoError(t, err) + _, err = encoder.AddRecord(data4) + require.NoError(t, err) + _, err = encoder.AddRecord(data5) + require.NoError(t, err) + _, err = encoder.AddRecord(data6) + require.NoError(t, err) + _, err = encoder.AddRecord(data7) + require.NoError(t, err) + kvPairs := encoder.SessionCtx.TakeKvPairs() + + data2IndexKey := kvPairs.Pairs[5].Key + data2IndexValue := kvPairs.Pairs[5].Val + data6IndexKey := kvPairs.Pairs[17].Key + + data1RowKey := kvPairs.Pairs[0].Key + data2RowKey := kvPairs.Pairs[3].Key + data2RowValue := kvPairs.Pairs[3].Val + data3RowKey := kvPairs.Pairs[6].Key + data3RowValue := kvPairs.Pairs[6].Val + data5RowKey := kvPairs.Pairs[12].Key + data6RowKey := kvPairs.Pairs[15].Key + data6RowValue := kvPairs.Pairs[15].Val + data7RowKey := kvPairs.Pairs[18].Key + data7RowValue := kvPairs.Pairs[18].Val + + data2NonclusteredKey := kvPairs.Pairs[4].Key + data2NonclusteredValue := kvPairs.Pairs[4].Val + data3NonclusteredValue := kvPairs.Pairs[7].Val + data6NonclusteredKey := kvPairs.Pairs[16].Key + data6NonclusteredValue := kvPairs.Pairs[16].Val + data7NonclusteredValue := kvPairs.Pairs[19].Val + + db, mockDB, err := sqlmock.New() + require.NoError(t, err) + defer func() { + _ = db.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockDB.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_task_info`"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockDB.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + mockDB.ExpectExec("CREATE OR REPLACE VIEW `lightning_task_info`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"}). + AddRow(1, data2RowKey, "PRIMARY", data2RowValue, data1RowKey). + AddRow(2, data2RowKey, "PRIMARY", data3NonclusteredValue, data2NonclusteredKey). + AddRow(3, data6RowKey, "PRIMARY", data6RowValue, data5RowKey). + AddRow(4, data6RowKey, "PRIMARY", data7NonclusteredValue, data6NonclusteredKey)) + mockDB.ExpectBegin() + mockDB.ExpectExec("INSERT IGNORE INTO `lightning_task_info`\\.conflict_error_v3.*"). + WithArgs(0, "a", nil, nil, data2NonclusteredKey, data2NonclusteredValue, 2, + 0, "a", nil, nil, data6NonclusteredKey, data6NonclusteredValue, 2). + WillReturnResult(driver.ResultNoRows) + mockDB.ExpectCommit() + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"})) + } + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"}). + AddRow(1, data2NonclusteredKey, data2NonclusteredValue). + AddRow(2, data6NonclusteredKey, data6NonclusteredValue)) + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"})) + } + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 2)) + mockDB.ExpectCommit() + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mockDB.ExpectCommit() + + cfg := config.NewConfig() + cfg.Conflict.Strategy = config.ReplaceOnDup + cfg.TikvImporter.Backend = config.BackendLocal + cfg.App.TaskInfoSchemaName = "lightning_task_info" + em := errormanager.New(db, cfg, log.L()) + err = em.Init(ctx) + require.NoError(t, err) + + fnGetLatestCount := atomic.NewInt32(0) + fnDeleteKeyCount := atomic.NewInt32(0) + pool := util.NewWorkerPool(16, "resolve duplicate rows by replace") + err = em.ReplaceConflictKeys( + ctx, tbl, "a", pool, + func(ctx context.Context, key []byte) ([]byte, error) { + fnGetLatestCount.Add(1) + switch { + case bytes.Equal(key, data2RowKey): + return data2RowValue, nil + case bytes.Equal(key, data2NonclusteredKey): + if fnGetLatestCount.String() == "3" { + return data2NonclusteredValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data6RowKey): + return data6RowValue, nil + case bytes.Equal(key, data6NonclusteredKey): + if fnGetLatestCount.String() == "6" { + return data6NonclusteredValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data2IndexKey): + return data2IndexValue, nil + case bytes.Equal(key, data3RowKey): + return data3RowValue, nil + case bytes.Equal(key, data6IndexKey): + return data3RowValue, nil + case bytes.Equal(key, data7RowKey): + return data7RowValue, nil + default: + return nil, fmt.Errorf("key %v is not expected", key) + } + }, + func(ctx context.Context, keys [][]byte) error { + fnDeleteKeyCount.Add(int32(len(keys))) + for _, key := range keys { + if !bytes.Equal(key, data2NonclusteredKey) && !bytes.Equal(key, data6NonclusteredKey) && !bytes.Equal(key, data2IndexKey) && !bytes.Equal(key, data3RowKey) && !bytes.Equal(key, data6IndexKey) && !bytes.Equal(key, data7RowKey) { + return fmt.Errorf("key %v is not expected", key) + } + } + return nil + }, + ) + require.NoError(t, err) + require.Equal(t, int32(16), fnGetLatestCount.Load()) + require.Equal(t, int32(6), fnDeleteKeyCount.Load()) + err = mockDB.ExpectationsWereMet() + require.NoError(t, err) +} + +func TestReplaceConflictOneKeyNonclusteredPk(t *testing.T) { + p := parser.New() + node, _, err := p.ParseSQL("create table a (a int primary key nonclustered, b int not null, c text, key key_b(b));") + require.NoError(t, err) + mockSctx := mock.NewContext() + mockSctx.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOff + info, err := ddl.MockTableInfo(mockSctx, node[0].(*ast.CreateTableStmt), 108) + require.NoError(t, err) + info.State = model.StatePublic + require.False(t, info.PKIsHandle) + tbl, err := tables.TableFromMeta(tidbkv.NewPanickingAllocators(info.SepAutoInc()), info) + require.NoError(t, err) + require.False(t, tbl.Meta().HasClusteredIndex()) + + sessionOpts := encode.SessionOptions{ + SQLMode: mysql.ModeStrictAllTables, + Timestamp: 1234567890, + } + + encoder, err := tidbkv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: log.L(), + }) + require.NoError(t, err) + encoder.SessionCtx.GetTableCtx().GetRowEncodingConfig().RowEncoder.Enable = true + + data1 := []types.Datum{ + types.NewIntDatum(1), + types.NewIntDatum(6), + types.NewStringDatum("1.csv"), + types.NewIntDatum(1), + } + data2 := []types.Datum{ + types.NewIntDatum(2), + types.NewIntDatum(6), + types.NewStringDatum("2.csv"), + types.NewIntDatum(2), + } + data3 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + types.NewIntDatum(3), + } + data4 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(4), + types.NewStringDatum("4.csv"), + types.NewIntDatum(4), + } + data5 := []types.Datum{ + types.NewIntDatum(5), + types.NewIntDatum(4), + types.NewStringDatum("5.csv"), + types.NewIntDatum(5), + } + _, err = encoder.AddRecord(data1) + require.NoError(t, err) + _, err = encoder.AddRecord(data2) + require.NoError(t, err) + _, err = encoder.AddRecord(data3) + require.NoError(t, err) + _, err = encoder.AddRecord(data4) + require.NoError(t, err) + _, err = encoder.AddRecord(data5) + require.NoError(t, err) + kvPairs := encoder.SessionCtx.TakeKvPairs() + + data3IndexKey := kvPairs.Pairs[8].Key + data3IndexValue := kvPairs.Pairs[8].Val + data4IndexValue := kvPairs.Pairs[11].Val + data3RowKey := kvPairs.Pairs[6].Key + data4RowKey := kvPairs.Pairs[9].Key + data4RowValue := kvPairs.Pairs[9].Val + data4NonclusteredKey := kvPairs.Pairs[10].Key + data4NonclusteredValue := kvPairs.Pairs[10].Val + + db, mockDB, err := sqlmock.New() + require.NoError(t, err) + defer func() { + _ = db.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockDB.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_task_info`"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockDB.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + mockDB.ExpectExec("CREATE OR REPLACE VIEW `lightning_task_info`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"}). + AddRow(1, data3IndexKey, "PRIMARY", data3IndexValue, data3RowKey). + AddRow(2, data3IndexKey, "PRIMARY", data4IndexValue, data4RowKey)) + mockDB.ExpectBegin() + mockDB.ExpectExec("INSERT IGNORE INTO `lightning_task_info`\\.conflict_error_v3.*"). + WithArgs(0, "a", nil, nil, data4RowKey, data4RowValue, 2). + WillReturnResult(driver.ResultNoRows) + mockDB.ExpectCommit() + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"})) + } + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"}). + AddRow(1, data4RowKey, data4RowValue)) + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"})) + } + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 1)) + mockDB.ExpectCommit() + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mockDB.ExpectCommit() + + cfg := config.NewConfig() + cfg.Conflict.Strategy = config.ReplaceOnDup + cfg.TikvImporter.Backend = config.BackendLocal + cfg.App.TaskInfoSchemaName = "lightning_task_info" + em := errormanager.New(db, cfg, log.L()) + err = em.Init(ctx) + require.NoError(t, err) + + fnGetLatestCount := atomic.NewInt32(0) + fnDeleteKeyCount := atomic.NewInt32(0) + pool := util.NewWorkerPool(16, "resolve duplicate rows by replace") + err = em.ReplaceConflictKeys( + ctx, tbl, "a", pool, + func(ctx context.Context, key []byte) ([]byte, error) { + fnGetLatestCount.Add(1) + switch { + case bytes.Equal(key, data3IndexKey): + return data3IndexValue, nil + case bytes.Equal(key, data4RowKey): + if fnGetLatestCount.String() == "3" { + return data4RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data4NonclusteredKey): + return data4NonclusteredValue, nil + default: + return nil, fmt.Errorf("key %v is not expected", key) + } + }, + func(ctx context.Context, keys [][]byte) error { + fnDeleteKeyCount.Add(int32(len(keys))) + for _, key := range keys { + if !bytes.Equal(key, data4RowKey) && !bytes.Equal(key, data4NonclusteredKey) { + return fmt.Errorf("key %v is not expected", key) + } + } + return nil + }, + ) + require.NoError(t, err) + require.Equal(t, int32(7), fnGetLatestCount.Load()) + require.Equal(t, int32(2), fnDeleteKeyCount.Load()) + err = mockDB.ExpectationsWereMet() + require.NoError(t, err) +} + +func TestReplaceConflictOneUniqueKeyNonclusteredPk(t *testing.T) { + p := parser.New() + node, _, err := p.ParseSQL("create table a (a int primary key nonclustered, b int not null, c text, unique key uni_b(b));") + require.NoError(t, err) + mockSctx := mock.NewContext() + mockSctx.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOff + info, err := ddl.MockTableInfo(mockSctx, node[0].(*ast.CreateTableStmt), 108) + require.NoError(t, err) + info.State = model.StatePublic + require.False(t, info.PKIsHandle) + tbl, err := tables.TableFromMeta(tidbkv.NewPanickingAllocators(info.SepAutoInc()), info) + require.NoError(t, err) + require.False(t, tbl.Meta().HasClusteredIndex()) + + sessionOpts := encode.SessionOptions{ + SQLMode: mysql.ModeStrictAllTables, + Timestamp: 1234567890, + } + + encoder, err := tidbkv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: log.L(), + }) + require.NoError(t, err) + encoder.SessionCtx.GetTableCtx().GetRowEncodingConfig().RowEncoder.Enable = true + + data1 := []types.Datum{ + types.NewIntDatum(1), + types.NewIntDatum(6), + types.NewStringDatum("1.csv"), + types.NewIntDatum(1), + } + data2 := []types.Datum{ + types.NewIntDatum(2), + types.NewIntDatum(6), + types.NewStringDatum("2.csv"), + types.NewIntDatum(2), + } + data3 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + types.NewIntDatum(3), + } + data4 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(4), + types.NewStringDatum("4.csv"), + types.NewIntDatum(4), + } + data5 := []types.Datum{ + types.NewIntDatum(5), + types.NewIntDatum(4), + types.NewStringDatum("5.csv"), + types.NewIntDatum(5), + } + _, err = encoder.AddRecord(data1) + require.NoError(t, err) + _, err = encoder.AddRecord(data2) + require.NoError(t, err) + _, err = encoder.AddRecord(data3) + require.NoError(t, err) + _, err = encoder.AddRecord(data4) + require.NoError(t, err) + _, err = encoder.AddRecord(data5) + require.NoError(t, err) + kvPairs := encoder.SessionCtx.TakeKvPairs() + + data1RowKey := kvPairs.Pairs[0].Key + data2RowKey := kvPairs.Pairs[3].Key + data2RowValue := kvPairs.Pairs[3].Val + data3RowKey := kvPairs.Pairs[6].Key + data4RowKey := kvPairs.Pairs[9].Key + data4RowValue := kvPairs.Pairs[9].Val + data5RowKey := kvPairs.Pairs[12].Key + data5RowValue := kvPairs.Pairs[12].Val + + data2IndexKey := kvPairs.Pairs[5].Key + data2IndexValue := kvPairs.Pairs[5].Val + data3IndexKey := kvPairs.Pairs[8].Key + data3IndexValue := kvPairs.Pairs[8].Val + data5IndexKey := kvPairs.Pairs[14].Key + data5IndexValue := kvPairs.Pairs[14].Val + + data1NonclusteredKey := kvPairs.Pairs[1].Key + data1NonclusteredValue := kvPairs.Pairs[1].Val + data2NonclusteredValue := kvPairs.Pairs[4].Val + data4NonclusteredKey := kvPairs.Pairs[10].Key + data4NonclusteredValue := kvPairs.Pairs[10].Val + data5NonclusteredValue := kvPairs.Pairs[13].Val + + db, mockDB, err := sqlmock.New() + require.NoError(t, err) + defer func() { + _ = db.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockDB.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_task_info`"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockDB.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + mockDB.ExpectExec("CREATE OR REPLACE VIEW `lightning_task_info`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"}). + AddRow(1, data4NonclusteredKey, "uni_b", data4NonclusteredValue, data4RowKey). + AddRow(2, data4NonclusteredKey, "uni_b", data5NonclusteredValue, data5RowKey). + AddRow(3, data1NonclusteredKey, "uni_b", data1NonclusteredValue, data1RowKey). + AddRow(4, data1NonclusteredKey, "uni_b", data2NonclusteredValue, data2RowKey). + AddRow(5, data3IndexKey, "PRIMARY", data3IndexValue, data3RowKey). + AddRow(6, data3IndexKey, "PRIMARY", data4NonclusteredValue, data4RowKey)) + mockDB.ExpectBegin() + mockDB.ExpectExec("INSERT IGNORE INTO `lightning_task_info`\\.conflict_error_v3.*"). + WithArgs(0, "a", nil, nil, data5RowKey, data5RowValue, 2, + 0, "a", nil, nil, data2RowKey, data2RowValue, 2, + 0, "a", nil, nil, data4RowKey, data4RowValue, 2). + WillReturnResult(driver.ResultNoRows) + mockDB.ExpectCommit() + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"})) + } + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"}). + AddRow(1, data5RowKey, data5RowValue). + AddRow(2, data2RowKey, data2RowValue). + AddRow(3, data4RowKey, data4RowValue)) + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"})) + } + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 3)) + mockDB.ExpectCommit() + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mockDB.ExpectCommit() + + cfg := config.NewConfig() + cfg.Conflict.Strategy = config.ReplaceOnDup + cfg.TikvImporter.Backend = config.BackendLocal + cfg.App.TaskInfoSchemaName = "lightning_task_info" + em := errormanager.New(db, cfg, log.L()) + err = em.Init(ctx) + require.NoError(t, err) + + fnGetLatestCount := atomic.NewInt32(0) + fnDeleteKeyCount := atomic.NewInt32(0) + pool := util.NewWorkerPool(16, "resolve duplicate rows by replace") + err = em.ReplaceConflictKeys( + ctx, tbl, "a", pool, + func(ctx context.Context, key []byte) ([]byte, error) { + fnGetLatestCount.Add(1) + switch { + case bytes.Equal(key, data4NonclusteredKey): + if fnGetLatestCount.String() != "20" { + return data4NonclusteredValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data5RowKey): + if fnGetLatestCount.String() == "3" { + return data5RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data1NonclusteredKey): + return data1NonclusteredValue, nil + case bytes.Equal(key, data2RowKey): + if fnGetLatestCount.String() == "6" { + return data2RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data3IndexKey): + return data3IndexValue, nil + case bytes.Equal(key, data4RowKey): + return data4RowValue, nil + case bytes.Equal(key, data2IndexKey): + return data2IndexValue, nil + case bytes.Equal(key, data5IndexKey): + return data5IndexValue, nil + default: + return nil, fmt.Errorf("key %x is not expected", key) + } + }, + func(ctx context.Context, keys [][]byte) error { + fnDeleteKeyCount.Add(int32(len(keys))) + for _, key := range keys { + if !bytes.Equal(key, data5RowKey) && !bytes.Equal(key, data2RowKey) && !bytes.Equal(key, data4RowKey) && !bytes.Equal(key, data2IndexKey) && !bytes.Equal(key, data4NonclusteredKey) && !bytes.Equal(key, data5IndexKey) { + return fmt.Errorf("key %v is not expected", key) + } + } + return nil + }, + ) + require.NoError(t, err) + require.Equal(t, int32(18), fnGetLatestCount.Load()) + require.Equal(t, int32(5), fnDeleteKeyCount.Load()) + err = mockDB.ExpectationsWereMet() + require.NoError(t, err) +} + +func TestReplaceConflictOneUniqueKeyNonclusteredVarcharPk(t *testing.T) { + p := parser.New() + node, _, err := p.ParseSQL("create table a (a varchar(20) primary key nonclustered, b int not null, c text, unique key uni_b(b));") + require.NoError(t, err) + mockSctx := mock.NewContext() + mockSctx.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOff + info, err := ddl.MockTableInfo(mockSctx, node[0].(*ast.CreateTableStmt), 108) + require.NoError(t, err) + info.State = model.StatePublic + require.False(t, info.PKIsHandle) + tbl, err := tables.TableFromMeta(tidbkv.NewPanickingAllocators(info.SepAutoInc()), info) + require.NoError(t, err) + require.False(t, tbl.Meta().HasClusteredIndex()) + + sessionOpts := encode.SessionOptions{ + SQLMode: mysql.ModeStrictAllTables, + Timestamp: 1234567890, + } + + encoder, err := tidbkv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: log.L(), + }) + require.NoError(t, err) + encoder.SessionCtx.GetTableCtx().GetRowEncodingConfig().RowEncoder.Enable = true + + data1 := []types.Datum{ + types.NewStringDatum("x"), + types.NewIntDatum(6), + types.NewStringDatum("1.csv"), + types.NewIntDatum(1), + } + data2 := []types.Datum{ + types.NewStringDatum("y"), + types.NewIntDatum(6), + types.NewStringDatum("2.csv"), + types.NewIntDatum(2), + } + data3 := []types.Datum{ + types.NewStringDatum("z"), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + types.NewIntDatum(3), + } + data4 := []types.Datum{ + types.NewStringDatum("z"), + types.NewIntDatum(4), + types.NewStringDatum("4.csv"), + types.NewIntDatum(4), + } + data5 := []types.Datum{ + types.NewStringDatum("t"), + types.NewIntDatum(4), + types.NewStringDatum("5.csv"), + types.NewIntDatum(5), + } + _, err = encoder.AddRecord(data1) + require.NoError(t, err) + _, err = encoder.AddRecord(data2) + require.NoError(t, err) + _, err = encoder.AddRecord(data3) + require.NoError(t, err) + _, err = encoder.AddRecord(data4) + require.NoError(t, err) + _, err = encoder.AddRecord(data5) + require.NoError(t, err) + kvPairs := encoder.SessionCtx.TakeKvPairs() + + data1RowKey := kvPairs.Pairs[0].Key + data2RowKey := kvPairs.Pairs[3].Key + data2RowValue := kvPairs.Pairs[3].Val + data3RowKey := kvPairs.Pairs[6].Key + data4RowKey := kvPairs.Pairs[9].Key + data4RowValue := kvPairs.Pairs[9].Val + data5RowKey := kvPairs.Pairs[12].Key + data5RowValue := kvPairs.Pairs[12].Val + + data2IndexKey := kvPairs.Pairs[5].Key + data2IndexValue := kvPairs.Pairs[5].Val + data3IndexKey := kvPairs.Pairs[8].Key + data3IndexValue := kvPairs.Pairs[8].Val + data4IndexValue := kvPairs.Pairs[11].Val + data5IndexKey := kvPairs.Pairs[14].Key + data5IndexValue := kvPairs.Pairs[14].Val + + data1NonclusteredKey := kvPairs.Pairs[1].Key + data1NonclusteredValue := kvPairs.Pairs[1].Val + data2NonclusteredValue := kvPairs.Pairs[4].Val + data4NonclusteredKey := kvPairs.Pairs[10].Key + data4NonclusteredValue := kvPairs.Pairs[10].Val + data5NonclusteredValue := kvPairs.Pairs[13].Val + + db, mockDB, err := sqlmock.New() + require.NoError(t, err) + defer func() { + _ = db.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockDB.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_task_info`"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockDB.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + mockDB.ExpectExec("CREATE OR REPLACE VIEW `lightning_task_info`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"}). + AddRow(1, data4NonclusteredKey, "uni_b", data4NonclusteredValue, data4RowKey). + AddRow(2, data4NonclusteredKey, "uni_b", data5NonclusteredValue, data5RowKey). + AddRow(3, data1NonclusteredKey, "uni_b", data1NonclusteredValue, data1RowKey). + AddRow(4, data1NonclusteredKey, "uni_b", data2NonclusteredValue, data2RowKey). + AddRow(5, data3IndexKey, "PRIMARY", data3IndexValue, data3RowKey). + AddRow(6, data3IndexKey, "PRIMARY", data4IndexValue, data4RowKey)) + mockDB.ExpectBegin() + mockDB.ExpectExec("INSERT IGNORE INTO `lightning_task_info`\\.conflict_error_v3.*"). + WithArgs(0, "a", nil, nil, data5RowKey, data5RowValue, 2, + 0, "a", nil, nil, data2RowKey, data2RowValue, 2, + 0, "a", nil, nil, data4RowKey, data4RowValue, 2). + WillReturnResult(driver.ResultNoRows) + mockDB.ExpectCommit() + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"})) + } + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"}). + AddRow(1, data5RowKey, data5RowValue). + AddRow(2, data2RowKey, data2RowValue). + AddRow(3, data4RowKey, data4RowValue)) + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"})) + } + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 3)) + mockDB.ExpectCommit() + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mockDB.ExpectCommit() + + cfg := config.NewConfig() + cfg.Conflict.Strategy = config.ReplaceOnDup + cfg.TikvImporter.Backend = config.BackendLocal + cfg.App.TaskInfoSchemaName = "lightning_task_info" + em := errormanager.New(db, cfg, log.L()) + err = em.Init(ctx) + require.NoError(t, err) + + fnGetLatestCount := atomic.NewInt32(0) + fnDeleteKeyCount := atomic.NewInt32(0) + pool := util.NewWorkerPool(16, "resolve duplicate rows by replace") + err = em.ReplaceConflictKeys( + ctx, tbl, "a", pool, + func(ctx context.Context, key []byte) ([]byte, error) { + fnGetLatestCount.Add(1) + switch { + case bytes.Equal(key, data4NonclusteredKey): + if fnGetLatestCount.String() != "20" { + return data4NonclusteredValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data5RowKey): + if fnGetLatestCount.String() == "3" { + return data5RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data1NonclusteredKey): + return data1NonclusteredValue, nil + case bytes.Equal(key, data2RowKey): + if fnGetLatestCount.String() == "6" { + return data2RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data3IndexKey): + return data3IndexValue, nil + case bytes.Equal(key, data4RowKey): + if fnGetLatestCount.String() == "9" { + return data4RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data2IndexKey): + return data2IndexValue, nil + case bytes.Equal(key, data5IndexKey): + return data5IndexValue, nil + default: + return nil, fmt.Errorf("key %x is not expected", key) + } + }, + func(ctx context.Context, keys [][]byte) error { + fnDeleteKeyCount.Add(int32(len(keys))) + for _, key := range keys { + if !bytes.Equal(key, data5RowKey) && !bytes.Equal(key, data2RowKey) && !bytes.Equal(key, data4RowKey) && !bytes.Equal(key, data2IndexKey) && !bytes.Equal(key, data4NonclusteredKey) && !bytes.Equal(key, data5IndexKey) { + return fmt.Errorf("key %v is not expected", key) + } + } + return nil + }, + ) + require.NoError(t, err) + require.Equal(t, int32(21), fnGetLatestCount.Load()) + require.Equal(t, int32(5), fnDeleteKeyCount.Load()) + err = mockDB.ExpectationsWereMet() + require.NoError(t, err) +} diff --git a/pkg/table/column.go b/pkg/table/column.go index 7c315fe318a41..00541d0145cef 100644 --- a/pkg/table/column.go +++ b/pkg/table/column.go @@ -491,7 +491,14 @@ func (c *Column) CheckNotNull(data *types.Datum, rowCntInLoadData uint64) error // error is ErrWarnNullToNotnull. // Otherwise, the error is ErrColumnCantNull. // If BadNullAsWarning is true, it will append the error as a warning, else return the error. +<<<<<<< HEAD func (c *Column) HandleBadNull(d *types.Datum, sc *stmtctx.StatementContext, rowCntInLoadData uint64) error { +======= +func (c *Column) HandleBadNull( + ec errctx.Context, + d *types.Datum, + rowCntInLoadData uint64) error { +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) if err := c.CheckNotNull(d, rowCntInLoadData); err != nil { if sc.BadNullAsWarning { sc.AppendWarning(err) @@ -535,7 +542,12 @@ func GetColOriginDefaultValueWithoutStrictSQLMode(ctx sessionctx.Context, col *m // But CheckNoDefaultValueForInsert logic should only check before insert. func CheckNoDefaultValueForInsert(sc *stmtctx.StatementContext, col *model.ColumnInfo) error { if mysql.HasNoDefaultValueFlag(col.GetFlag()) && !col.DefaultIsExpr && col.GetDefaultValue() == nil && col.GetType() != mysql.TypeEnum { +<<<<<<< HEAD if !sc.BadNullAsWarning { +======= + ignoreErr := sc.ErrGroupLevel(errctx.ErrGroupNoDefault) != errctx.LevelError + if !ignoreErr { +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) return ErrNoDefaultValue.GenWithStackByArgs(col.Name) } if !mysql.HasNotNullFlag(col.GetFlag()) { diff --git a/pkg/table/column_test.go b/pkg/table/column_test.go index 48caa4257539d..dce01eb86a58e 100644 --- a/pkg/table/column_test.go +++ b/pkg/table/column_test.go @@ -464,7 +464,19 @@ func TestGetDefaultValue(t *testing.T) { }() for _, tt := range tests { +<<<<<<< HEAD ctx.GetSessionVars().StmtCtx.BadNullAsWarning = !tt.strict +======= + sc := ctx.GetSessionVars().StmtCtx + if tt.strict { + ctx.GetSessionVars().SQLMode = defaultMode + } else { + ctx.GetSessionVars().SQLMode = mysql.DelSQLMode(defaultMode, mysql.ModeStrictAllTables|mysql.ModeStrictTransTables) + } + levels := sc.ErrLevels() + levels[errctx.ErrGroupNoDefault] = errctx.ResolveErrLevel(false, !tt.strict) + sc.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) val, err := GetColDefaultValue(ctx, tt.colInfo) if err != nil { require.Errorf(t, tt.err, "%v", err) @@ -478,7 +490,19 @@ func TestGetDefaultValue(t *testing.T) { } for _, tt := range tests { +<<<<<<< HEAD ctx.GetSessionVars().StmtCtx.BadNullAsWarning = !tt.strict +======= + sc := ctx.GetSessionVars().StmtCtx + if tt.strict { + ctx.GetSessionVars().SQLMode = defaultMode + } else { + ctx.GetSessionVars().SQLMode = mysql.DelSQLMode(defaultMode, mysql.ModeStrictAllTables|mysql.ModeStrictTransTables) + } + levels := sc.ErrLevels() + levels[errctx.ErrGroupNoDefault] = errctx.ResolveErrLevel(false, !tt.strict) + sc.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) val, err := GetColOriginDefaultValue(ctx, tt.colInfo) if err != nil { require.Errorf(t, tt.err, "%v", err) diff --git a/tests/integrationtest/r/executor/executor.result b/tests/integrationtest/r/executor/executor.result index 7ea042a6cd496..8249bd99511d8 100644 --- a/tests/integrationtest/r/executor/executor.result +++ b/tests/integrationtest/r/executor/executor.result @@ -2797,3 +2797,1791 @@ select -10*a from t; select a/-2 from t; a/-2 -61.56 +<<<<<<< HEAD +======= +drop table if exists t1, t2, t3; +create table t1(a int, b int); +create table t2(a int, b varchar(20)); +create table t3(a int, b decimal(30,10)); +insert into t1 values (1,1),(1,1),(2,2),(3,3),(null,null); +insert into t2 values (1,'1'),(2,'2'),(null,null),(null,'3'); +insert into t3 values (2,2.1),(3,3); +explain format = 'brief' select * from t3 union select * from t1; +id estRows task access object operator info +HashAgg 16000.00 root group by:Column#7, Column#8, funcs:firstrow(Column#7)->Column#7, funcs:firstrow(Column#8)->Column#8 +└─Union 20000.00 root + ├─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo + └─Projection 10000.00 root executor__executor.t1.a->Column#7, cast(executor__executor.t1.b, decimal(30,10) BINARY)->Column#8 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t3 union select * from t1; +a b +NULL NULL +1 1.0000000000 +2 2.0000000000 +2 2.1000000000 +3 3.0000000000 +explain format = 'brief' select * from t2 union all select * from t1; +id estRows task access object operator info +Union 20000.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─Projection 10000.00 root executor__executor.t1.a->Column#7, cast(executor__executor.t1.b, varchar(20) BINARY CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#8 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t2 union all select * from t1; +a b +NULL NULL +NULL NULL +NULL 3 +1 1 +1 1 +1 1 +2 2 +2 2 +3 3 +explain format = 'brief' select * from t1 except select * from t3; +id estRows task access object operator info +HashJoin 6400.00 root anti semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t3.a)], other cond:nulleq(cast(executor__executor.t1.b, decimal(10,0) BINARY), executor__executor.t3.b) +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, executor__executor.t1.b, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a, funcs:firstrow(executor__executor.t1.b)->executor__executor.t1.b + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, executor__executor.t1.b, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 except select * from t3; +a b +NULL NULL +1 1 +2 2 +explain format = 'brief' select * from t1 intersect select * from t2; +id estRows task access object operator info +HashJoin 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)], other cond:nulleq(cast(executor__executor.t1.b, double BINARY), cast(executor__executor.t2.b, double BINARY)) +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, executor__executor.t1.b, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a, funcs:firstrow(executor__executor.t1.b)->executor__executor.t1.b + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, executor__executor.t1.b, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 intersect select * from t2; +a b +NULL NULL +1 1 +2 2 +explain format = 'brief' select * from t1 union all select * from t2 union all select * from t3; +id estRows task access object operator info +Union 30000.00 root +├─Projection 10000.00 root executor__executor.t1.a->Column#10, cast(executor__executor.t1.b, varchar(30) BINARY CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#11 +│ └─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +├─Projection 10000.00 root executor__executor.t2.a->Column#10, cast(executor__executor.t2.b, varchar(30) BINARY CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#11 +│ └─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─Projection 10000.00 root executor__executor.t3.a->Column#10, cast(executor__executor.t3.b, varchar(30) BINARY CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#11 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +select * from t1 union all select * from t2 union all select * from t3; +a b +NULL NULL +NULL NULL +NULL 3 +1 1 +1 1 +1 1 +2 2 +2 2 +2 2.1000000000 +3 3 +3 3.0000000000 +explain format = 'brief' select * from t1 union all select * from t2 except select * from t3; +id estRows task access object operator info +HashJoin 12800.00 root anti semi join, equal:[nulleq(Column#10, executor__executor.t3.a)], other cond:nulleq(cast(Column#11, double BINARY), cast(executor__executor.t3.b, double BINARY)) +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +└─HashAgg(Probe) 16000.00 root group by:Column#10, Column#11, funcs:firstrow(Column#10)->Column#10, funcs:firstrow(Column#11)->Column#11 + └─Union 20000.00 root + ├─Projection 10000.00 root executor__executor.t1.a->Column#10, cast(executor__executor.t1.b, varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#11 + │ └─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select * from t1 union all select * from t2 except select * from t3; +a b +NULL NULL +NULL 3 +1 1 +2 2 +explain format = 'brief' select * from t1 intersect select * from t2 intersect select * from t1; +id estRows task access object operator info +HashJoin 5120.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t1.a) nulleq(executor__executor.t1.b, executor__executor.t1.b)] +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─HashJoin(Probe) 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)], other cond:nulleq(cast(executor__executor.t1.b, double BINARY), cast(executor__executor.t2.b, double BINARY)) + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, executor__executor.t1.b, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a, funcs:firstrow(executor__executor.t1.b)->executor__executor.t1.b + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, executor__executor.t1.b, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 intersect select * from t2 intersect select * from t1; +a b +NULL NULL +1 1 +2 2 +explain format = 'brief' select * from t1 union all select * from t2 intersect select * from t3; +id estRows task access object operator info +Union 16400.00 root +├─Projection 10000.00 root executor__executor.t1.a->Column#10, cast(executor__executor.t1.b, varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#11 +│ └─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─HashJoin 6400.00 root semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)], other cond:nulleq(cast(executor__executor.t2.b, double BINARY), cast(executor__executor.t3.b, double BINARY)) + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, executor__executor.t2.b, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a, funcs:firstrow(executor__executor.t2.b)->executor__executor.t2.b + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, executor__executor.t2.b, + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select * from t1 union all select * from t2 intersect select * from t3; +a b +NULL NULL +1 1 +1 1 +2 2 +3 3 +explain format = 'brief' select * from t1 except select * from t2 intersect select * from t3; +id estRows task access object operator info +HashJoin 6400.00 root anti semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)], other cond:nulleq(cast(executor__executor.t1.b, double BINARY), cast(executor__executor.t2.b, double BINARY)) +├─HashJoin(Build) 6400.00 root semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)], other cond:nulleq(cast(executor__executor.t2.b, double BINARY), cast(executor__executor.t3.b, double BINARY)) +│ ├─TableReader(Build) 10000.00 root data:TableFullScan +│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +│ └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, executor__executor.t2.b, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a, funcs:firstrow(executor__executor.t2.b)->executor__executor.t2.b +│ └─TableReader 8000.00 root data:HashAgg +│ └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, executor__executor.t2.b, +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, executor__executor.t1.b, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a, funcs:firstrow(executor__executor.t1.b)->executor__executor.t1.b + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, executor__executor.t1.b, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 except select * from t2 intersect select * from t3; +a b +NULL NULL +1 1 +2 2 +3 3 +set tidb_cost_model_version=2; +drop table if exists t; +create table t (c1 year(4), c2 int, key(c1)); +insert into t values(2001, 1); +explain format = 'brief' select t1.c1, t2.c1 from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +MergeJoin 0.00 root inner join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 +├─TableDual(Build) 0.00 root rows:0 +└─TableDual(Probe) 0.00 root rows:0 +select t1.c1, t2.c1 from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +c1 c1 +explain format = 'brief' select * from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +MergeJoin 0.00 root inner join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 +├─TableDual(Build) 0.00 root rows:0 +└─TableDual(Probe) 0.00 root rows:0 +select * from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +c1 c2 c1 c2 +explain format = 'brief' select count(*) from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +StreamAgg 1.00 root funcs:count(1)->Column#7 +└─MergeJoin 0.00 root inner join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 + ├─TableDual(Build) 0.00 root rows:0 + └─TableDual(Probe) 0.00 root rows:0 +select count(*) from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +count(*) +0 +explain format = 'brief' select t1.c1, t2.c1 from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +MergeJoin 0.00 root left outer join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 +├─TableDual(Build) 0.00 root rows:0 +└─TableDual(Probe) 0.00 root rows:0 +select t1.c1, t2.c1 from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +c1 c1 +explain format = 'brief' select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +MergeJoin 0.00 root left outer join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 +├─TableDual(Build) 0.00 root rows:0 +└─TableDual(Probe) 0.00 root rows:0 +select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +c1 c2 c1 c2 +explain format = 'brief' select count(*) from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +StreamAgg 1.00 root funcs:count(1)->Column#7 +└─MergeJoin 0.00 root left outer join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 + ├─TableDual(Build) 0.00 root rows:0 + └─TableDual(Probe) 0.00 root rows:0 +select count(*) from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +count(*) +0 +explain format = 'brief' select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 is not NULL; +id estRows task access object operator info +HashJoin 12487.50 root left outer join, equal:[eq(executor__executor.t.c1, executor__executor.t.c1)] +├─TableReader(Build) 9990.00 root data:Selection +│ └─Selection 9990.00 cop[tikv] not(isnull(executor__executor.t.c1)) +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(executor__executor.t.c1)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 is not NULL; +c1 c2 c1 c2 +2001 1 2001 1 +set tidb_cost_model_version=2; +drop table if exists t1, t2, t3; +create table t1(a int); +create table t2 like t1; +create table t3 like t1; +insert into t1 values (1),(1),(2),(3),(null); +insert into t2 values (1),(2),(null),(null); +insert into t3 values (2),(3); +explain format='brief' select * from t3 union select * from t1; +id estRows task access object operator info +HashAgg 16000.00 root group by:Column#5, funcs:firstrow(Column#5)->Column#5 +└─Union 20000.00 root + ├─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t3 union select * from t1; +a +NULL +1 +2 +3 +explain format='brief' select * from t2 union all select * from t1; +id estRows task access object operator info +Union 20000.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t2 union all select * from t1; +a +NULL +NULL +NULL +1 +1 +1 +2 +2 +3 +explain format='brief' select * from t1 except select * from t3; +id estRows task access object operator info +HashJoin 6400.00 root anti semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t3.a)] +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 except select * from t3; +a +NULL +1 +explain format='brief' select * from t1 intersect select * from t2; +id estRows task access object operator info +HashJoin 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)] +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 intersect select * from t2; +a +NULL +1 +2 +explain format='brief' select * from t1 union all select * from t2 union all select * from t3; +id estRows task access object operator info +Union 30000.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +select * from t1 union all select * from t2 union all select * from t3; +a +NULL +NULL +NULL +1 +1 +1 +2 +2 +2 +3 +3 +explain format='brief' select * from t1 union all select * from t2 except select * from t3; +id estRows task access object operator info +HashJoin 12800.00 root anti semi join, equal:[nulleq(Column#7, executor__executor.t3.a)] +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +└─HashAgg(Probe) 16000.00 root group by:Column#7, funcs:firstrow(Column#7)->Column#7 + └─Union 20000.00 root + ├─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select * from t1 union all select * from t2 except select * from t3; +a +NULL +1 +explain format='brief' select * from t1 intersect select * from t2 intersect select * from t1; +id estRows task access object operator info +HashJoin 5120.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t1.a)] +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─HashJoin(Probe) 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)] + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 intersect select * from t2 intersect select * from t1; +a +NULL +1 +2 +explain format='brief' select * from t1 union all select * from t2 intersect select * from t3; +id estRows task access object operator info +Union 16400.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─HashJoin 6400.00 root semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)] + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select * from t1 union all select * from t2 intersect select * from t3; +a +NULL +1 +1 +2 +2 +3 +explain format='brief' select * from t1 except select * from t2 intersect select * from t3; +id estRows task access object operator info +HashJoin 6400.00 root anti semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)] +├─HashJoin(Build) 6400.00 root semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)] +│ ├─TableReader(Build) 10000.00 root data:TableFullScan +│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +│ └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a +│ └─TableReader 8000.00 root data:HashAgg +│ └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 except select * from t2 intersect select * from t3; +a +NULL +1 +3 +explain format='brief' select * from t1 intersect (select * from t2 except (select * from t3)); +id estRows task access object operator info +HashJoin 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)] +├─HashJoin(Build) 6400.00 root anti semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)] +│ ├─TableReader(Build) 10000.00 root data:TableFullScan +│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +│ └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a +│ └─TableReader 8000.00 root data:HashAgg +│ └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 intersect (select * from t2 except (select * from t3)); +a +NULL +1 +explain format='brief' select * from t1 union all (select * from t2 except select * from t3); +id estRows task access object operator info +Union 16400.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─HashJoin 6400.00 root anti semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)] + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select * from t1 union all (select * from t2 except select * from t3); +a +NULL +NULL +1 +1 +1 +2 +3 +explain format='brief' select * from t1 union (select * from t2 union all select * from t3); +id estRows task access object operator info +HashAgg 24000.00 root group by:Column#8, funcs:firstrow(Column#8)->Column#8 +└─Union 30000.00 root + ├─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Union 20000.00 root + ├─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +select * from t1 union (select * from t2 union all select * from t3); +a +NULL +1 +2 +3 +explain format='brief' (select * from t1 intersect select * from t1) except (select * from t2 union select * from t3); +id estRows task access object operator info +HashJoin 5120.00 root anti semi join, equal:[nulleq(executor__executor.t1.a, Column#9)] +├─HashAgg(Build) 16000.00 root group by:Column#9, funcs:firstrow(Column#9)->Column#9 +│ └─Union 20000.00 root +│ ├─TableReader 10000.00 root data:TableFullScan +│ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +│ └─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +└─HashJoin(Probe) 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t1.a)] + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +(select * from t1 intersect select * from t1) except (select * from t2 union select * from t3); +a +drop table if exists issue40279; +CREATE TABLE `issue40279` (`a` char(155) NOT NULL DEFAULT 'on1unvbxp5sko6mbetn3ku26tuiyju7w3wc0olzto9ew7gsrx',`b` mediumint(9) NOT NULL DEFAULT '2525518',PRIMARY KEY (`b`,`a`) /*T![clustered_index] CLUSTERED */); +insert into `issue40279` values (); +( select `issue40279`.`b` as r0 , from_base64( `issue40279`.`a` ) as r1 from `issue40279` ) except ( select `issue40279`.`a` as r0 , elt(2, `issue40279`.`a` , `issue40279`.`a` ) as r1 from `issue40279`); +r0 r1 +2525518 NULL +drop table if exists t2; +CREATE TABLE `t2` ( `a` varchar(20) CHARACTER SET gbk COLLATE gbk_chinese_ci DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t2 values(0xCED2); +(select elt(2,t2.a,t2.a) from t2) except (select 0xCED2 from t2); +elt(2,t2.a,t2.a) +drop table if exists t; +create table t(a datetime, b bigint, c bigint); +insert into t values(cast('2023-08-09 00:00:00' as datetime), 20230809, 20231310); +select a > 20230809 from t; +a > 20230809 +0 +select a = 20230809 from t; +a = 20230809 +1 +select a < 20230810 from t; +a < 20230810 +1 +select a < 20231310 from t; +a < 20231310 +0 +select 20230809 < a from t; +20230809 < a +0 +select 20230809 = a from t; +20230809 = a +1 +select 20230810 > a from t; +20230810 > a +1 +select 20231310 > a from t; +20231310 > a +0 +select cast('2023-08-09 00:00:00' as datetime) > 20230809 from t; +cast('2023-08-09 00:00:00' as datetime) > 20230809 +1 +select cast('2023-08-09 00:00:00' as datetime) = 20230809 from t; +cast('2023-08-09 00:00:00' as datetime) = 20230809 +0 +select cast('2023-08-09 00:00:00' as datetime) < 20230810 from t; +cast('2023-08-09 00:00:00' as datetime) < 20230810 +0 +select cast('2023-08-09 00:00:00' as datetime) < 20231310 from t; +cast('2023-08-09 00:00:00' as datetime) < 20231310 +0 +select 20230809 < cast('2023-08-09 00:00:00' as datetime) from t; +20230809 < cast('2023-08-09 00:00:00' as datetime) +1 +select 20230809 = cast('2023-08-09 00:00:00' as datetime) from t; +20230809 = cast('2023-08-09 00:00:00' as datetime) +0 +select 20230810 > cast('2023-08-09 00:00:00' as datetime) from t; +20230810 > cast('2023-08-09 00:00:00' as datetime) +0 +select 20231310 > cast('2023-08-09 00:00:00' as datetime) from t; +20231310 > cast('2023-08-09 00:00:00' as datetime) +0 +select a > b from t; +a > b +1 +select a = b from t; +a = b +0 +select a < b + 1 from t; +a < b + 1 +0 +select a < c from t; +a < c +0 +select b < a from t; +b < a +1 +select b = a from t; +b = a +0 +select b > a from t; +b > a +0 +select c > a from t; +c > a +0 +load stats; +Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 11 near ";" +load stats ./xxx.json; +Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 12 near "./xxx.json;" +drop database if exists test_show; +create database test_show; +use test_show; +show engines; +Engine Support Comment Transactions XA Savepoints +InnoDB DEFAULT Supports transactions, row-level locking, and foreign keys YES YES YES +drop table if exists t; +create table t(a int primary key); +show index in t; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Visible Expression Clustered Global +t 0 PRIMARY 1 a A 0 NULL NULL BTREE YES NULL YES NO +show index from t; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Visible Expression Clustered Global +t 0 PRIMARY 1 a A 0 NULL NULL BTREE YES NULL YES NO +show master status; +File Position Binlog_Do_DB Binlog_Ignore_DB Executed_Gtid_Set +tidb-binlog 0 +show create database test_show; +Database Create Database +test_show CREATE DATABASE `test_show` /*!40100 DEFAULT CHARACTER SET utf8mb4 */ +show privileges; +Privilege Context Comment +Alter Tables To alter the table +Alter routine Functions,Procedures To alter or drop stored functions/procedures +Config Server Admin To use SHOW CONFIG and SET CONFIG statements +Create Databases,Tables,Indexes To create new databases and tables +Create routine Databases To use CREATE FUNCTION/PROCEDURE +Create role Server Admin To create new roles +Create temporary tables Databases To use CREATE TEMPORARY TABLE +Create view Tables To create new views +Create user Server Admin To create new users +Delete Tables To delete existing rows +Drop Databases,Tables To drop databases, tables, and views +Drop role Server Admin To drop roles +Event Server Admin To create, alter, drop and execute events +Execute Functions,Procedures To execute stored routines +File File access on server To read and write files on the server +Grant option Databases,Tables,Functions,Procedures To give to other users those privileges you possess +Index Tables To create or drop indexes +Insert Tables To insert data into tables +Lock tables Databases To use LOCK TABLES (together with SELECT privilege) +Process Server Admin To view the plain text of currently executing queries +Proxy Server Admin To make proxy user possible +References Databases,Tables To have references on tables +Reload Server Admin To reload or refresh tables, logs and privileges +Replication client Server Admin To ask where the slave or master servers are +Replication slave Server Admin To read binary log events from the master +Select Tables To retrieve rows from table +Show databases Server Admin To see all databases with SHOW DATABASES +Show view Tables To see views with SHOW CREATE VIEW +Shutdown Server Admin To shut down the server +Super Server Admin To use KILL thread, SET GLOBAL, CHANGE MASTER, etc. +Trigger Tables To use triggers +Create tablespace Server Admin To create/alter/drop tablespaces +Update Tables To update existing rows +Usage Server Admin No privileges - allow connect only +BACKUP_ADMIN Server Admin +RESTORE_ADMIN Server Admin +SYSTEM_USER Server Admin +SYSTEM_VARIABLES_ADMIN Server Admin +ROLE_ADMIN Server Admin +CONNECTION_ADMIN Server Admin +PLACEMENT_ADMIN Server Admin +DASHBOARD_CLIENT Server Admin +RESTRICTED_TABLES_ADMIN Server Admin +RESTRICTED_STATUS_ADMIN Server Admin +RESTRICTED_VARIABLES_ADMIN Server Admin +RESTRICTED_USER_ADMIN Server Admin +RESTRICTED_CONNECTION_ADMIN Server Admin +RESTRICTED_REPLICA_WRITER_ADMIN Server Admin +RESOURCE_GROUP_ADMIN Server Admin +RESOURCE_GROUP_USER Server Admin +show table status; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t InnoDB 10 Compact 0 0 0 0 0 0 NULL 0 NULL NULL utf8mb4_bin +drop database test_show; +use executor__executor; +select \N; +NULL +NULL +select "\N"; +N +N +drop table if exists test; +create table test (`\N` int); +insert into test values (1); +select * from test; +\N +1 +select \N from test; +NULL +NULL +select (\N) from test; +NULL +NULL +select `\N` from test; +\N +1 +select (`\N`) from test; +\N +1 +select '\N' from test; +N +N +select ('\N') from test; +N +N +select nUll; +NULL +NULL +select (null); +NULL +NULL +select null+NULL; +null+NULL +NULL +select 'abc'; +abc +abc +select (('abc')); +abc +abc +select 'abc'+'def'; +'abc'+'def' +0 +select '\n'; + + + +select '\t col'; +col + col +select '\t Col'; +Col + Col +select '\n\t 中文 col'; +中文 col + + 中文 col +select ' \r\n .col'; +.col + + .col +select ' 😆col'; +😆col + 😆col +select 'abc '; +abc +abc +select ' abc 123 '; +abc 123 + abc 123 +select 'a' ' ' 'string'; +a +a string +select 'a' " " "string"; +a +a string +select 'string' 'string'; +string +stringstring +select "ss" "a"; +ss +ssa +select "ss" "a" "b"; +ss +ssab +select "ss" "a" ' ' "b"; +ss +ssa b +select "ss" "a" ' ' "b" ' ' "d"; +ss +ssa b d +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) ); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.v = 20, b.v = 100, a.k1 = a.k1 + 1, b.k1 = b.k1 + 1, a.k2 = a.k2 + 2, b.k2 = b.k2 + 2; +select * from b; +a k1 k2 v +2 3 4 100 +select * from a; +k1 k2 v +2 3 20 +3 4 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) ); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +2 3 4 100 +select * from a; +k1 k2 v +2 3 20 +3 4 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +12 13 20 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b right join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +12 13 20 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +11 11 11 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, a.v = 20 where exists (select 1 from b where a.k1 = b.k1 and a.k2 = b.k2); +select * from b; +a k1 k2 v +22 22 22 22 +select * from a; +k1 k2 v +11 11 11 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) clustered); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.v = 20, b.v = 100, a.k1 = a.k1 + 1, b.k1 = b.k1 + 1, a.k2 = a.k2 + 2, b.k2 = b.k2 + 2; +select * from b; +a k1 k2 v +2 3 4 100 +select * from a; +k1 k2 v +2 3 20 +3 4 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) clustered); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +2 3 4 100 +select * from a; +k1 k2 v +2 3 20 +3 4 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +12 13 20 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b right join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +12 13 20 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +11 11 11 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, a.v = 20 where exists (select 1 from b where a.k1 = b.k1 and a.k2 = b.k2); +select * from b; +a k1 k2 v +22 22 22 22 +select * from a; +k1 k2 v +11 11 11 +23 24 20 +admin check table a; +admin check table b; +set @@tidb_enable_clustered_index=On; +drop table if exists t; +create table t (a int, b int, c int, primary key(a,b)); +explain format = 'brief' select t1.a from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b; +id estRows task access object operator info +TableReader 10000.00 root data:TableFullScan +└─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +set @@tidb_enable_clustered_index=default; +drop table if exists t; +create table t (c1 bit(2)); +insert into t values (0), (1), (2), (3); +insert into t values (4); +Error 1406 (22001): Data too long for column 'c1' at row 1 +insert into t values ('a'); +Error 1406 (22001): Data too long for column 'c1' at row 1 +select hex(c1) from t where c1 = 2; +hex(c1) +2 +drop table if exists t; +create table t (c1 bit(31)); +insert into t values (0x7fffffff); +insert into t values (0x80000000); +Error 1406 (22001): Data too long for column 'c1' at row 1 +insert into t values (0xffffffff); +Error 1406 (22001): Data too long for column 'c1' at row 1 +insert into t values ('123'); +insert into t values ('1234'); +insert into t values ('12345); +Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 30 near "'12345);" +drop table if exists t; +create table t (c1 bit(62)); +insert into t values ('12345678'); +drop table if exists t; +create table t (c1 bit(61)); +insert into t values ('12345678'); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t; +create table t (c1 bit(32)); +insert into t values (0x7fffffff); +insert into t values (0xffffffff); +insert into t values (0x1ffffffff); +Error 1406 (22001): Data too long for column 'c1' at row 1 +insert into t values ('1234'); +insert into t values ('12345'); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t; +create table t (c1 bit(64)); +insert into t values (0xffffffffffffffff); +insert into t values ('12345678'); +insert into t values ('123456789'); +Error 1366 (HY000): Incorrect bit value: '123456789' for column 'c1' at row 1 +drop table if exists t; +create table t (c1 bit(64)); +insert into t values (0xffffffffffffffff); +insert into t values ('12345678'); +select hex(c1) from t where c1; +hex(c1) +FFFFFFFFFFFFFFFF +3132333435363738 +drop table if exists t, t1; +create table t (ts timestamp); +set time_zone = '+00:00'; +insert into t values ('2017-04-27 22:40:42'); +set time_zone = '+10:00'; +select * from t; +ts +2017-04-28 08:40:42 +set time_zone = '-6:00'; +select * from t; +ts +2017-04-27 16:40:42 +drop table if exists t1; +CREATE TABLE t1 ( +id bigint(20) NOT NULL AUTO_INCREMENT, +uid int(11) DEFAULT NULL, +datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, +ip varchar(128) DEFAULT NULL, +PRIMARY KEY (id), +KEY i_datetime (datetime), +KEY i_userid (uid) +); +INSERT INTO t1 VALUES (123381351,1734,"2014-03-31 08:57:10","127.0.0.1"); +select datetime from t1; +datetime +2014-03-31 08:57:10 +select datetime from t1 where datetime='2014-03-31 08:57:10'; +datetime +2014-03-31 08:57:10 +select * from t1 where datetime='2014-03-31 08:57:10'; +id uid datetime ip +123381351 1734 2014-03-31 08:57:10 127.0.0.1 +set time_zone = 'Asia/Shanghai'; +drop table if exists t1; +CREATE TABLE t1 ( +id bigint(20) NOT NULL AUTO_INCREMENT, +datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, +PRIMARY KEY (id) +); +INSERT INTO t1 VALUES (123381351,"2014-03-31 08:57:10"); +select * from t1 where datetime="2014-03-31 08:57:10"; +id datetime +123381351 2014-03-31 08:57:10 +alter table t1 add key i_datetime (datetime); +select * from t1 where datetime="2014-03-31 08:57:10"; +id datetime +123381351 2014-03-31 08:57:10 +select * from t1; +id datetime +123381351 2014-03-31 08:57:10 +select datetime from t1 where datetime='2014-03-31 08:57:10'; +datetime +2014-03-31 08:57:10 +set time_zone=default; +drop table if exists t2; +create table t2(a int, b int, c int); +insert into t2 values (11, 8, (select not b)); +Error 1054 (42S22): Unknown column 'b' in 'field list' +insert into t2 set a = 11, b = 8, c = (select b)); +Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 49 near ");" +insert into t2 values(1, 1, (select b from t2)); +select * from t2; +a b c +1 1 NULL +insert into t2 set a = 1, b = 1, c = (select b+1 from t2); +select * from t2; +a b c +1 1 NULL +1 1 2 +delete from t2; +insert into t2 values(2, 4, a); +select * from t2; +a b c +2 4 2 +insert into t2 set a = 3, b = 5, c = b; +select * from t2; +a b c +2 4 2 +3 5 5 +drop table if exists t; +create table t(a int, b int); +insert into t values ( 81, ( select ( SELECT '1' AS `c0` WHERE '1' >= `subq_0`.`c0` ) as `c1` FROM ( SELECT '1' AS `c0` ) AS `subq_0` ) ); +Error 1105 (HY000): Insert's SET operation or VALUES_LIST doesn't support complex subqueries now +insert into t set a = 81, b = (select ( SELECT '1' AS `c0` WHERE '1' >= `subq_0`.`c0` ) as `c1` FROM ( SELECT '1' AS `c0` ) AS `subq_0` ); +Error 1105 (HY000): Insert's SET operation or VALUES_LIST doesn't support complex subqueries now +drop table if exists t2; +drop table if exists t; +create table t (id bit(16), key id(id)); +insert into t values (65); +select * from t where id not in (-1,2); +id +A +select * from t where id in (-1, -2); +Error 1582 (42000): Incorrect parameter count in the call to native function 'in' +drop table if exists t; +drop table if exists t1; +create table t(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) clustered); +create table t1(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) nonclustered); +insert into t(k1) select 1; +insert into t1(k1) select 1; +set @@tidb_enable_vectorized_expression = 0; +(select k1, hex(v) from t where false) union(select k1, hex(v) from t for update); +k1 hex(v) +1 1D5E4CF7F +(select k1, hex(v) from t1 where false) union(select k1, hex(v) from t1 for update); +k1 hex(v) +1 1D5E4CF7F +set @@tidb_enable_vectorized_expression = 1; +(select k1, hex(v) from t where false) union(select k1, hex(v) from t for update); +k1 hex(v) +1 1D5E4CF7F +(select k1, hex(v) from t1 where false) union(select k1, hex(v) from t1 for update); +k1 hex(v) +1 1D5E4CF7F +set @@tidb_enable_vectorized_expression = default; +drop table if exists t; +drop view if exists v; +create table t(a int); +insert into t values(1), (2), (3); +create definer='root'@'localhost' view v as select count(*) as c1 from t; +select * from v; +c1 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select count(*) from t) s; +select * from v order by 1; +count(*) +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select avg(a) from t group by a) s; +select * from v order by 1; +avg(a) +1.0000 +2.0000 +3.0000 +drop view v; +create definer='root'@'localhost' view v as select * from (select sum(a) from t group by a) s; +select * from v order by 1; +sum(a) +1 +2 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select group_concat(a) from t group by a) s; +select * from v order by 1; +group_concat(a) +1 +2 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select count(0) as c1 from t) s; +select * from v order by 1; +c1 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select count(*) as c1 from t) s; +select * from v order by 1; +c1 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select group_concat(a) as `concat(a)` from t group by a) s; +select * from v order by 1; +concat(a) +1 +2 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select a from t group by a) s; +select * from v order by 1; +a +1 +2 +3 +SELECT `s`.`count(a)` FROM (SELECT COUNT(`a`) FROM `executor__executor`.`t`) AS `s`; +Error 1054 (42S22): Unknown column 's.count(a)' in 'field list' +drop view v; +create definer='root'@'localhost' view v as select * from (select count(a) from t) s; +select * from v; +count(a) +3 +drop table if exists t; +create table t(c1 int); +insert into t values(111), (222), (333); +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select row_number() over (order by c1) from t) s); +select * from v; +row_number() over (order by c1) +1 +2 +3 +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select c1, row_number() over (order by c1) from t) s); +select * from v; +c1 row_number() over (order by c1) +111 1 +222 2 +333 3 +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select c1 or 0 from t) s); +select * from v; +c1 or 0 +1 +1 +1 +select `c1 or 0` from v; +c1 or 0 +1 +1 +1 +drop view v; +drop table if exists t, t1, t2; +create table t (a int(11) default null,b int(11) default null,key b (b),key ba (b)); +create table t1 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b)); +create table t2 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b)); +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +DROP TABLE IF EXISTS admin_checksum_partition_test; +CREATE TABLE admin_checksum_partition_test (a INT) PARTITION BY HASH(a) PARTITIONS 4; +INSERT INTO admin_checksum_partition_test VALUES (1), (2); +ADMIN CHECKSUM TABLE admin_checksum_partition_test; +drop table if exists t; +create table t (a tinyint not null); +set sql_mode = 'STRICT_TRANS_TABLES'; +insert t values (); +Error 1364 (HY000): Field 'a' doesn't have a default value +insert t values ('1000'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +create table if not exists tdouble (a double(3,2)); +insert tdouble values (10.23); +Error 1264 (22003): Out of range value for column 'a' at row 1 +set sql_mode = ''; +insert t values (); +show warnings; +Level Code Message +Warning 1364 Field 'a' doesn't have a default value +insert t values (null); +Error 1048 (23000): Column 'a' cannot be null +insert ignore t values (null); +show warnings; +Level Code Message +Warning 1048 Column 'a' cannot be null +insert t select null; +show warnings; +Level Code Message +Warning 1048 Column 'a' cannot be null +insert t values (1000); +select * from t order by a; +a +0 +0 +0 +127 +insert tdouble values (10.23); +select * from tdouble; +a +9.99 +set sql_mode = 'STRICT_TRANS_TABLES'; +set @@global.sql_mode = ''; +drop table if exists t2; +create table t2 (a varchar(3)); +insert t2 values ('abcd'); +select * from t2; +a +abc +insert t2 values ('abcd'); +Error 1406 (22001): Data too long for column 'a' at row 1 +set sql_mode = default; +set @@global.sql_mode = default; +use information_schema; +select count(*)>=4 from schemata; +count(*)>=4 +1 +create database mytest; +use information_schema; +select * from schemata where schema_name = 'mysql'; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +def mysql utf8mb4 utf8mb4_bin NULL NULL +select * from schemata where schema_name like 'my%'; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +def mysql utf8mb4 utf8mb4_bin NULL NULL +def mytest utf8mb4 utf8mb4_bin NULL NULL +select 1 from tables limit 1; +1 +1 +use executor__executor; +set @@sql_mode='NO_ZERO_DATE'; +select date_add('2001-01-00', interval -2 hour); +date_add('2001-01-00', interval -2 hour) +NULL +show warnings; +Level Code Message +Warning 1292 Incorrect datetime value: '2001-01-00' +set @@sql_mode=default; +set @@sql_mode='NO_ZERO_DATE'; +drop table if exists t1; +SELECT STR_TO_DATE('0000-1-01', '%Y-%m-%d'); +STR_TO_DATE('0000-1-01', '%Y-%m-%d') +NULL +show warnings; +Level Code Message +Warning 1411 Incorrect datetime value: '0000-1-01' for function str_to_date +SELECT CAST('4#,8?Q' AS DATE); +CAST('4#,8?Q' AS DATE) +NULL +show warnings; +Level Code Message +Warning 8034 Incorrect datetime value: '4#,8?Q' +CREATE TABLE t1 (c1 INT, c2 TEXT); +INSERT INTO t1 VALUES (1833458842, '0.3503490908550797'); +SELECT CAST(t1.c2 AS DATE) FROM t1; +CAST(t1.c2 AS DATE) +NULL +show warnings; +Level Code Message +Warning 1292 Incorrect datetime value: '0.3503490908550797' +set @@sql_mode=default; +drop table if exists t; +create table t(a decimal(10,2) unsigned); +insert into t values (-1); +Error 1264 (22003): Out of range value for column 'a' at row 1 +insert into t values ("-1.1e-1"); +Error 1264 (22003): Out of range value for column 'a' at row 1 +insert into t values (-1.1); +Error 1264 (22003): Out of range value for column 'a' at row 1 +insert into t values (-0); +set sql_mode=''; +delete from t; +insert into t values (-1); +select a from t limit 1; +a +0.00 +set sql_mode=default; +drop table if exists t; +create table t(a int); +do 1 in (select * from t); +insert into t values(1); +do 1 in (select * from t); +drop table if exists t; +create table t(j JSON); +insert into t values('2010'); +insert into t values('2011'); +insert into t values('2012'); +insert into t values('2010.000'); +insert into t values(cast(18446744073709551615 as JSON)); +insert into t values(cast(18446744073709551616.000000 as JSON)); +select count(distinct j) from t; +count(distinct j) +5 +drop table if exists t; +create table t(id int(11), j JSON, d DOUBLE); +insert into t values(0, '2010', 2010); +insert into t values(1, '2011', 2011); +insert into t values(2, '2012', 2012); +insert into t values(3, cast(18446744073709551615 as JSON), 18446744073709551616.000000); +select /*+inl_hash_join(t2)*/ t1.id, t2.id from t t1 join t t2 on t1.j = t2.d; +id id +0 0 +1 1 +2 2 +drop table if exists catalog_sales, store_sales, date_dim; +create table catalog_sales +( +cs_sold_date_sk int , +cs_sold_time_sk int , +cs_ship_date_sk int , +cs_bill_customer_sk int , +cs_bill_cdemo_sk int , +cs_bill_hdemo_sk int , +cs_bill_addr_sk int , +cs_ship_customer_sk int , +cs_ship_cdemo_sk int , +cs_ship_hdemo_sk int , +cs_ship_addr_sk int , +cs_call_center_sk int , +cs_catalog_page_sk int , +cs_ship_mode_sk int , +cs_warehouse_sk int , +cs_item_sk int not null, +cs_promo_sk int , +cs_order_number int not null, +cs_quantity int , +cs_wholesale_cost decimal(7,2) , +cs_list_price decimal(7,2) , +cs_sales_price decimal(7,2) , +cs_ext_discount_amt decimal(7,2) , +cs_ext_sales_price decimal(7,2) , +cs_ext_wholesale_cost decimal(7,2) , +cs_ext_list_price decimal(7,2) , +cs_ext_tax decimal(7,2) , +cs_coupon_amt decimal(7,2) , +cs_ext_ship_cost decimal(7,2) , +cs_net_paid decimal(7,2) , +cs_net_paid_inc_tax decimal(7,2) , +cs_net_paid_inc_ship decimal(7,2) , +cs_net_paid_inc_ship_tax decimal(7,2) , +cs_net_profit decimal(7,2) , +primary key (cs_item_sk, cs_order_number) +); +create table store_sales +( +ss_sold_date_sk int , +ss_sold_time_sk int , +ss_item_sk int not null, +ss_customer_sk int , +ss_cdemo_sk int , +ss_hdemo_sk int , +ss_addr_sk int , +ss_store_sk int , +ss_promo_sk int , +ss_ticket_number int not null, +ss_quantity int , +ss_wholesale_cost decimal(7,2) , +ss_list_price decimal(7,2) , +ss_sales_price decimal(7,2) , +ss_ext_discount_amt decimal(7,2) , +ss_ext_sales_price decimal(7,2) , +ss_ext_wholesale_cost decimal(7,2) , +ss_ext_list_price decimal(7,2) , +ss_ext_tax decimal(7,2) , +ss_coupon_amt decimal(7,2) , +ss_net_paid decimal(7,2) , +ss_net_paid_inc_tax decimal(7,2) , +ss_net_profit decimal(7,2) , +primary key (ss_item_sk, ss_ticket_number) +); +create table date_dim +( +d_date_sk int not null, +d_date_id char(16) not null, +d_date date , +d_month_seq int , +d_week_seq int , +d_quarter_seq int , +d_year int , +d_dow int , +d_moy int , +d_dom int , +d_qoy int , +d_fy_year int , +d_fy_quarter_seq int , +d_fy_week_seq int , +d_day_name char(9) , +d_quarter_name char(6) , +d_holiday char(1) , +d_weekend char(1) , +d_following_holiday char(1) , +d_first_dom int , +d_last_dom int , +d_same_day_ly int , +d_same_day_lq int , +d_current_day char(1) , +d_current_week char(1) , +d_current_month char(1) , +d_current_quarter char(1) , +d_current_year char(1) , +primary key (d_date_sk) +); +plan replayer dump explain with ssci as ( +select ss_customer_sk customer_sk +,ss_item_sk item_sk +from store_sales,date_dim +where ss_sold_date_sk = d_date_sk +and d_month_seq between 1212 and 1212 + 11 +group by ss_customer_sk +,ss_item_sk), +csci as( +select cs_bill_customer_sk customer_sk +,cs_item_sk item_sk +from catalog_sales,date_dim +where cs_sold_date_sk = d_date_sk +and d_month_seq between 1212 and 1212 + 11 +group by cs_bill_customer_sk +,cs_item_sk) +select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 else 0 end) store_only +,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 else 0 end) catalog_only +,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then 1 else 0 end) store_and_catalog +from ssci left join csci on (ssci.customer_sk=csci.customer_sk +and ssci.item_sk = csci.item_sk) +UNION +select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 else 0 end) store_only +,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 else 0 end) catalog_only +,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then 1 else 0 end) store_and_catalog +from ssci right join csci on (ssci.customer_sk=csci.customer_sk +and ssci.item_sk = csci.item_sk) +limit 100; +admin show bdr role; +BDR_ROLE + +admin set bdr role primary; +admin show bdr role; +BDR_ROLE +primary +admin set bdr role secondary; +admin show bdr role; +BDR_ROLE +secondary +admin unset bdr role; +admin show bdr role; +BDR_ROLE + +admin set bdr role test_err; +Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 27 near "test_err;" +admin show bdr role; +BDR_ROLE + +admin unset bdr role; +set global tidb_mem_oom_action='CANCEL'; +drop table if exists t, t1; +create table t(a int, b int, index idx(a)); +create table t1(a int, c int, index idx(a)); +set tidb_mem_quota_query=10; +select t.a, t1.a from t use index(idx), t1 use index(idx) where t.a = t1.a; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set global tidb_mem_oom_action=default; +set tidb_mem_quota_query=default; +drop table if exists t, t1; +create table t (a int primary key, b double); +insert into t values (1,1); +SET GLOBAL tidb_mem_oom_action='CANCEL'; +set @@tidb_mem_quota_query=1; +select sum(b) from t group by a; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +drop table if exists t,t1; +create table t (a bigint); +create table t1 (a bigint); +set @@tidb_mem_quota_query=200; +insert into t1 values (1),(2),(3),(4),(5); +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +replace into t1 values (1),(2),(3),(4),(5); +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set @@tidb_mem_quota_query=10000; +insert into t1 values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=10; +insert into t select a from t1 order by a desc; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +replace into t select a from t1 order by a desc; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set @@tidb_mem_quota_query=10000; +insert into t values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=244; +delete from t; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set @@tidb_mem_quota_query=10000; +delete from t1; +insert into t1 values(1); +insert into t values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=244; +delete t, t1 from t join t1 on t.a = t1.a; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set @@tidb_mem_quota_query=100000; +truncate table t; +insert into t values(1),(2),(3); +set @@tidb_mem_quota_query=244; +update t set a = 4; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +SET GLOBAL tidb_mem_oom_action = DEFAULT; +set @@tidb_mem_quota_query=DEFAULT; +drop table if exists t; +create table t(a int); +insert into t values(1); +set tidb_track_aggregate_memory_usage = off; +explain analyze select /*+ HASH_AGG() */ sum(a) from t; +id estRows actRows task access object execution info operator info memory disk +HashAgg_9 1.00 1 root funcs:sum(Column#4)->Column#3 N/A N/A +└─TableReader_10 1.00 1 root data:HashAgg_5 Bytes N/A + └─HashAgg_5 1.00 1 cop[tikv] funcs:sum(executor__executor.t.a)->Column#4 N/A N/A + └─TableFullScan_8 10000.00 1 cop[tikv] keep order:false, stats:pseudo N/A N/A +explain analyze select /*+ STREAM_AGG() */ sum(a) from t; +id estRows actRows task access object execution info operator info memory disk +StreamAgg_14 1.00 1 root funcs:sum(Column#4)->Column#3 N/A N/A +└─TableReader_15 1.00 1 root data:StreamAgg_8 Bytes N/A + └─StreamAgg_8 1.00 1 cop[tikv] funcs:sum(executor__executor.t.a)->Column#4 N/A N/A + └─TableFullScan_13 10000.00 1 cop[tikv] keep order:false, stats:pseudo N/A N/A +set tidb_track_aggregate_memory_usage = on; +explain analyze select /*+ HASH_AGG() */ sum(a) from t; +id estRows actRows task access object execution info operator info memory disk +HashAgg_9 1.00 1 root funcs:sum(Column#4)->Column#3 KB Bytes +└─TableReader_10 1.00 1 root data:HashAgg_5 Bytes N/A + └─HashAgg_5 1.00 1 cop[tikv] funcs:sum(executor__executor.t.a)->Column#4 N/A N/A + └─TableFullScan_8 10000.00 1 cop[tikv] keep order:false, stats:pseudo N/A N/A +explain analyze select /*+ STREAM_AGG() */ sum(a) from t; +id estRows actRows task access object execution info operator info memory disk +StreamAgg_14 1.00 1 root funcs:sum(Column#4)->Column#3 KB N/A +└─TableReader_15 1.00 1 root data:StreamAgg_8 Bytes N/A + └─StreamAgg_8 1.00 1 cop[tikv] funcs:sum(executor__executor.t.a)->Column#4 N/A N/A + └─TableFullScan_13 10000.00 1 cop[tikv] keep order:false, stats:pseudo N/A N/A +set tidb_track_aggregate_memory_usage = default; +drop table if exists testbind; +create table testbind(i int, s varchar(20)); +create index index_t on testbind(i,s); +create global binding for select * from testbind using select * from testbind use index for join(index_t); +show global bindings where default_db='executor__executor'; +Original_sql Bind_sql Default_db Status Create_time Update_time Charset Collation Source Sql_digest Plan_digest +select * from `executor__executor` . `testbind` SELECT * FROM `executor__executor`.`testbind` USE INDEX FOR JOIN (`index_t`) executor__executor enabled utf8mb4 utf8mb4_general_ci manual a2fa907992be17801e5976df09b5b3a0d205f4c4aff39a14ab3bc8642026f527 +create session binding for select * from testbind using select * from testbind use index for join(index_t); +show session bindings where default_db='executor__executor'; +Original_sql Bind_sql Default_db Status Create_time Update_time Charset Collation Source Sql_digest Plan_digest +select * from `executor__executor` . `testbind` SELECT * FROM `executor__executor`.`testbind` USE INDEX FOR JOIN (`index_t`) executor__executor enabled utf8mb4 utf8mb4_general_ci manual a2fa907992be17801e5976df09b5b3a0d205f4c4aff39a14ab3bc8642026f527 +drop session binding for select * from testbind using select * from testbind use index for join(index_t); +drop global binding for select * from testbind using select * from testbind use index for join(index_t); +drop table if EXISTS t1; +create table t1(id int primary key, a int, b int, c int, d int, index t1a(a), index t1b(b)); +insert into t1 values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5); +explain analyze select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4; +id estRows actRows task access object execution info operator info memory disk +IndexMerge_8 3334.67 2 root NULL .*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.* type: union KB N/A +├─TableRangeScan_5(Build) 3333.33 1 cop[tikv] table:t1 .*time:.*loops:.*cop_task:.* range:[-inf,2), keep order:false, stats:pseudo Bytes N/A +├─IndexRangeScan_6(Build) 3333.33 1 cop[tikv] table:t1, index:t1a(a) .*time:.*loops:.*cop_task:.* range:(4,+inf], keep order:false, stats:pseudo N/A N/A +└─TableRowIDScan_7(Probe) 3334.67 2 cop[tikv] table:t1 .*time:.*loops:.*cop_task:.* keep order:false, stats:pseudo N/A N/A +set @@tidb_enable_collect_execution_info=0; +select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4 order by a; +id a b c d +1 1 1 1 1 +5 5 5 5 5 +set @@tidb_enable_collect_execution_info=default; +drop table if exists t1; +create table t1 (a int, b int, index(a)); +insert into t1 values (1,2),(2,3),(3,4); +explain analyze select * from t1 use index(a) where a > 1; +id estRows actRows task access object execution info operator info memory disk +IndexLookUp_7 3333.33 2 root NULL .*time:.*loops:.*index_task:.*table_task: {total_time.*num.*concurrency.*}.* NULL KB N/A +├─IndexRangeScan_5(Build) 3333.33 2 cop[tikv] table:t1, index:a(a) .*time:.*loops:.*cop_task:.* range:(1,+inf], keep order:false, stats:pseudo N/A N/A +└─TableRowIDScan_6(Probe) 3333.33 2 cop[tikv] table:t1 .*time:.*loops:.*cop_task:.* keep order:false, stats:pseudo N/A N/A +drop table if exists t1; +create table t1 (a int, b int); +insert into t1 values (1,2),(2,3),(3,4); +explain analyze SELECT /*+ HASH_AGG() */ count(*) FROM t1 WHERE a < 10; +id estRows actRows task access object execution info operator info memory disk +HashAgg_11 1.00 1 root NULL .*time:.*loops:.*partial_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*final_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.* funcs:count(Column#5)->Column#4 KB Bytes +└─TableReader_12 1.00 1 root NULL time.*loops.*cop_task.* data:HashAgg_6 Bytes N/A + └─HashAgg_6 1.00 1 cop[tikv] NULL tikv_task:.* funcs:count(1)->Column#5 N/A N/A + └─Selection_10 3323.33 3 cop[tikv] NULL tikv_task:.* lt(executor__executor.t1.a, 10) N/A N/A + └─TableFullScan_9 10000.00 3 cop[tikv] table:t1 tikv_task:.* keep order:false, stats:pseudo N/A N/A +set global tidb_txn_mode=''; +drop table if exists t, t1; +create table t (c1 int, c2 int, c3 int); +insert t values (11, 2, 3); +insert t values (12, 2, 3); +insert t values (13, 2, 3); +create table t1 (c1 int); +insert t1 values (11); +begin; +select * from t where c1=11 for update; +c1 c2 c3 +11 2 3 +begin; +update t set c2=211 where c1=11; +commit; +commit; +Error 9007 (HY000): Write conflict, reason=Optimistic [try again later] +begin; +select * from t where exists(select null from t1 where t1.c1=t.c1) for update; +c1 c2 c3 +11 211 3 +begin; +update t set c2=211 where c1=12; +commit; +commit; +begin; +select * from t where c1=11 for update; +c1 c2 c3 +11 211 3 +begin; +update t set c2=22 where c1=12; +commit; +commit; +set @@autocommit=1; +select * from t where c1=11 for update; +c1 c2 c3 +11 211 3 +begin; +update t set c2=211 where c1=11; +commit; +commit; +begin; +select * from (select * from t for update) t join t1 for update; +c1 c2 c3 c1 +11 211 3 11 +12 22 3 11 +13 2 3 11 +begin; +update t1 set c1 = 13; +commit; +commit; +Error 9007 (HY000): Write conflict, reason=Optimistic [try again later] +set global tidb_txn_mode=pessimistic; +drop table if exists t, t1; +create table t (i int); +create table t1 (i int); +insert t values (1); +insert t1 values (1); +begin pessimistic; +select * from t, t1 where t.i = t1.i for update of t; +i i +1 1 +begin pessimistic; +select * from t1 for update; +i +1 +select * from t for update nowait; +Error 3572 (HY000): Statement aborted because lock(s) could not be acquired immediately and NOWAIT is set. +rollback; +select * from t for update nowait; +i +1 +rollback; +set session tidb_txn_mode=''; +drop table if exists t; +create table t(a int); +insert into t values (1); +begin; +select 1 as a union select a from t for update; +a +1 +set session tidb_txn_mode=''; +update t set a = a + 1; +commit; +Error 9007 (HY000): Write conflict, reason=Optimistic [try again later] +begin; +select 1 as a union select a from t limit 5 for update; +a +1 +2 +select 1 as a union select a from t order by a for update; +a +1 +2 +update t set a = a + 1; +commit; +Error 9007 (HY000): Write conflict, reason=Optimistic [try again later] +set session tidb_txn_mode=pessimistic; +drop table if exists t; +create table t (id bigint key,b int); +split table t by (10),(20),(30); +TOTAL_SPLIT_REGION SCATTER_FINISH_RATIO +3 1 +insert into t values (0,0),(10,10),(20,20),(30,30); +alter table t add index idx1(b); +admin show ddl jobs 1; +JOB_ID DB_NAME TABLE_NAME JOB_TYPE SCHEMA_STATE SCHEMA_ID TABLE_ID ROW_COUNT CREATE_TIME START_TIME END_TIME STATE + executor__executor t public 4 synced +insert into t values (1,0),(2,10),(3,20),(4,30); +alter table t add index idx2(b); +admin show ddl jobs 1; +JOB_ID DB_NAME TABLE_NAME JOB_TYPE SCHEMA_STATE SCHEMA_ID TABLE_ID ROW_COUNT CREATE_TIME START_TIME END_TIME STATE + executor__executor t public 8 synced +drop table if exists t; +create table t(a int, b int as(-a)); +insert into t(a) values(1), (3), (7); +SET GLOBAL tidb_mem_oom_action='CANCEL'; +set @@tidb_mem_quota_query=1; +update t set t.a = t.a - 1 where t.a in (select a from t where a < 4); +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set @@tidb_mem_quota_query=1000000000; +select stmt_type from information_schema.statements_summary where digest_text = 'update `t` set `t` . `a` = `t` . `a` - ? where `t` . `a` in ( select `a` from `t` where `a` < ? )'; +stmt_type +Update +set @@tidb_mem_quota_query=default; +set global tidb_mem_oom_action=default; +drop table if exists t; +drop user if exists 'testuser'@'localhost'; +create table t(a int); +create user 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE; +Error 1044 (42000): Access denied for user 'testuser'@'localhost' to database 'executor__executor' +GRANT LOCK TABLES ON executor__executor.* to 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE; +Error 1142 (42000): SELECT command denied to user 'testuser'@'localhost' for table 't' +REVOKE ALL ON executor__executor.* FROM 'testuser'@'localhost'; +GRANT SELECT ON executor__executor.* to 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE; +Error 1044 (42000): Access denied for user 'testuser'@'localhost' to database 'executor__executor' +GRANT LOCK TABLES ON executor__executor.* to 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE; +drop database if exists test2; +create database test2; +create table test2.t2(a int); +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; +Error 1044 (42000): Access denied for user 'testuser'@'localhost' to database 'test2' +GRANT LOCK TABLES ON test2.* to 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; +Error 1142 (42000): SELECT command denied to user 'testuser'@'localhost' for table 't2' +GRANT SELECT ON test2.* to 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; +Error 8020 (HY000): Table 't' was locked in WRITE by server: session: +unlock tables; +unlock tables; +drop user 'testuser'@'localhost'; +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) diff --git a/tests/integrationtest/r/executor/insert.result b/tests/integrationtest/r/executor/insert.result new file mode 100644 index 0000000000000..a2250b63e7478 --- /dev/null +++ b/tests/integrationtest/r/executor/insert.result @@ -0,0 +1,2180 @@ +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(a char(20), b int, primary key(a)); +insert into t values('aa', 1), ('bb', 1); +insert into t values('aa', 2); +Error 1062 (23000): Duplicate entry 'aa' for key 't.PRIMARY' +drop table t; +create table t(a char(20), b varchar(30), c varchar(10), primary key(a, b, c)); +insert into t values ('a', 'b', 'c'), ('b', 'a', 'c'); +insert into t values ('a', 'b', 'c'); +Error 1062 (23000): Duplicate entry 'a-b-c' for key 't.PRIMARY' +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1; +c1 +1.0000 +set tidb_enable_clustered_index = default; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2004'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = 2004; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 bit); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal(6,4)); +insert into t1 set c1 = '1.1'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal); +insert into t1 set c1 = 1.1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 numeric); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 float); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.3; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 real); +insert into t1 set c1 = 1.4; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 date); +insert into t1 set c1 = '2020-01-01'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 time); +insert into t1 set c1 = '20:00:00'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 datetime); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 timestamp); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2020'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 char(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varchar(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 binary(3)); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varbinary(3)); +insert into t1 set c1 = 'b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 blob); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 text); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 enum('a', 'b')); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 set('a', 'b')); +insert into t1 set c1 = 'a,b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists c; +create table c(i int,j int,k int,primary key(i,j,k)); +insert into c values(1,2,3); +insert into c values(1,2,4); +update c set i=1,j=2,k=4 where i=1 and j=2 and k=3; +Error 1062 (23000): Duplicate entry '1-2-4' for key 'c.PRIMARY' +drop table if exists t1, t2; +create table t1 (a int(11) ,b varchar(100) ,primary key (a)); +create table t2 (c int(11) ,d varchar(100) ,primary key (c)); +prepare in1 from 'insert into t1 (a,b) select c,null from t2 t on duplicate key update b=t.d'; +execute in1; +drop table if exists t1; +create table t1(a bigint); +insert into t1 values("asfasdfsajhlkhlksdaf"); +Error 1366 (HY000): Incorrect bigint value: 'asfasdfsajhlkhlksdaf' for column 'a' at row 1 +drop table if exists t1; +create table t1(a varchar(10)) charset ascii; +insert into t1 values('我'); +Error 1366 (HY000): Incorrect string value '\xE6\x88\x91' for column 'a' +drop table if exists t1; +create table t1(a char(10) charset utf8); +insert into t1 values('我'); +alter table t1 add column b char(10) charset ascii as ((a)); +select * from t1; +a b +我 ? +drop table if exists t; +create table t (a year); +insert into t values(2156); +Error 1264 (22003): Out of range value for column 'a' at row 1 +DROP TABLE IF EXISTS ts; +CREATE TABLE ts (id int DEFAULT NULL, time1 TIMESTAMP NULL DEFAULT NULL); +SET @@sql_mode=''; +INSERT INTO ts (id, time1) VALUES (1, TIMESTAMP '1018-12-23 00:00:00'); +SHOW WARNINGS; +Level Code Message +Warning 1292 Incorrect timestamp value: '1018-12-23 00:00:00' for column 'time1' at row 1 +SELECT * FROM ts ORDER BY id; +id time1 +1 0000-00-00 00:00:00 +SET @@sql_mode='STRICT_TRANS_TABLES'; +INSERT INTO ts (id, time1) VALUES (2, TIMESTAMP '1018-12-24 00:00:00'); +Error 1292 (22007): Incorrect timestamp value: '1018-12-24 00:00:00' for column 'time1' at row 1 +DROP TABLE ts; +CREATE TABLE t0(c0 SMALLINT AUTO_INCREMENT PRIMARY KEY); +INSERT IGNORE INTO t0(c0) VALUES (194626268); +INSERT IGNORE INTO t0(c0) VALUES ('*'); +SHOW WARNINGS; +Level Code Message +Warning 1366 Incorrect smallint value: '*' for column 'c0' at row 1 +Warning 1690 constant 32768 overflows smallint +Warning 1467 Failed to read auto-increment value from storage engine +SET @@sql_mode=default; +drop table if exists t1; +create table t1(a decimal(15,2)); +insert into t1 values (1111111111111.01); +select * from t1; +a +1111111111111.01 +select cast(a as decimal) from t1; +cast(a as decimal) +9999999999 +drop table if exists t1; +create table t1(a json, b int, unique index idx((cast(a as signed array)))); +insert into t1 values ('[1,11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert into t1 values ('[2, 222]', 2); +Error 1062 (23000): Duplicate entry '2' for key 't1.idx' +replace into t1 values ('[1, 10]', 10); +select * from t1; +a b +[2, 22] 2 +[1, 10] 10 +replace into t1 values ('[1, 2]', 1); +select * from t1; +a b +[1, 2] 1 +replace into t1 values ('[1, 11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert ignore into t1 values ('[1]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert ignore into t1 values ('[1, 2]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert into t1 values ('[2]', 2) on duplicate key update b = 10; +select * from t1; +a b +[1, 11] 1 +[2, 22] 10 +insert into t1 values ('[2, 1]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +insert into t1 values ('[1,2]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +insert into t1 values ('[11, 22]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +set time_zone="+09:00"; +drop table if exists t; +create table t (id int, c1 datetime not null default CURRENT_TIMESTAMP); +set TIMESTAMP = 1234; +insert t (id) values (1); +select * from t; +id c1 +1 1970-01-01 09:20:34 +drop table if exists t; +create table t (dt datetime); +set @@time_zone='+08:00'; +delete from t; +insert into t values ('2020-10-22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10-22-16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22 16-31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16:31-15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16:31:15-10'); +select * from t; +dt +2020-10-23 10:31:15 +delete from t; +insert into t values ('2020.10-22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10.22-16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22.16-31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16.31-15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16.31.15+14'); +select * from t; +dt +2020-10-22 10:31:15 +delete from t; +insert into t values ('2020-10:22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10-22:16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22-16:31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16-31:15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16.31.15+09:30'); +select * from t; +dt +2020-10-22 15:01:15 +delete from t; +insert into t values ('2020.10-22:16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10.22-16:31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22.16-31:15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16:31.15+09:30'); +select * from t; +dt +2020-10-22 15:01:15 +drop table if exists t; +create table t (dt datetime, ts timestamp); +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+00:00'; +select * from t; +dt ts +2020-10-23 00:53:40 2020-10-22 16:53:40 +delete from t; +set @@time_zone='-08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 08:53:40 2020-10-23 00:53:40 +delete from t; +set @@time_zone='-03:00'; +insert into t values ('2020-10-22T16:53:40+03:00', '2020-10-22T16:53:40+03:00'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 10:53:40 2020-10-22 21:53:40 +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40+08:00', '2020-10-22T16:53:40+08:00'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 16:53:40 2020-10-22 16:53:40 +drop table if exists t; +create table t (ts timestamp); +insert into t values ('2020-10-22T12:00:00Z'), ('2020-10-22T13:00:00Z'), ('2020-10-22T14:00:00Z'); +select count(*) from t where ts > '2020-10-22T12:00:00Z'; +count(*) +2 +set @@time_zone='+08:00'; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10+00:00', '2020-10-27T14:39:10.10+00:00'); +select * from t; +dt ts +2020-10-27 22:39:10.10 2020-10-27 22:39:10.10 +drop table if exists t; +create table t (dt datetime(1), ts timestamp(1)); +insert into t values ('2020-10-27T14:39:10.3+0200', '2020-10-27T14:39:10.3+0200'); +select * from t; +dt ts +2020-10-27 20:39:10.3 2020-10-27 20:39:10.3 +drop table if exists t; +create table t (dt datetime(6), ts timestamp(6)); +insert into t values ('2020-10-27T14:39:10.3-02', '2020-10-27T14:39:10.3-02'); +select * from t; +dt ts +2020-10-28 00:39:10.300000 2020-10-28 00:39:10.300000 +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10Z', '2020-10-27T14:39:10.10Z'); +select * from t; +dt ts +2020-10-27 22:39:10.10 2020-10-27 22:39:10.10 +set time_zone=default; +set timestamp=default; +drop table if exists t1; +create table t1(a year(4)); +insert into t1 values(0000),(00),("0000"),("000"), ("00"), ("0"), (79), ("79"); +select * from t1; +a +0000 +0000 +0000 +2000 +2000 +2000 +1979 +1979 +drop table if exists t; +create table t(f_year year NOT NULL DEFAULT '0000')ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(); +select * from t; +f_year +0000 +insert into t values('0000'); +select * from t; +f_year +0000 +0000 +drop table if exists t1, t2, t3, t4; +create table t1(d date); +create table t2(d datetime); +create table t3(d date); +create table t4(d datetime); +set sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +select year(d), month(d), day(d) from t2; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +truncate t1;truncate t2;truncate t3;truncate t4; +set sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +select year(d), month(d), day(d) from t2; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +set sql_mode=default; +drop table if exists t1, t2, t3; +create table t1 (a int,b int,primary key(a,b)) partition by range(a) (partition p0 values less than (100),partition p1 values less than (1000)); +insert into t1 set a=1, b=1; +insert into t1 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t1; +a b +1 1 +create table t2 (a int,b int,primary key(a,b)) partition by hash(a) partitions 4; +insert into t2 set a=1,b=1; +insert into t2 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t2; +a b +1 1 +CREATE TABLE t3 (a int, b int, c int, d int, e int, +PRIMARY KEY (a,b), +UNIQUE KEY (b,c,d) +) PARTITION BY RANGE ( b ) ( +PARTITION p0 VALUES LESS THAN (4), +PARTITION p1 VALUES LESS THAN (7), +PARTITION p2 VALUES LESS THAN (11) +); +insert into t3 values (1,2,3,4,5); +insert into t3 values (1,2,3,4,5),(6,2,3,4,6) on duplicate key update e = e + values(e); +select * from t3; +a b c d e +1 2 3 4 16 +drop table if exists t1; +create table t1 (a bit(3)); +insert into t1 values(-1); +Error 1406 (22001): Data too long for column 'a' at row 1 +insert into t1 values(9); +Error 1406 (22001): Data too long for column 'a' at row 1 +create table t64 (a bit(64)); +insert into t64 values(-1); +insert into t64 values(18446744073709551615); +insert into t64 values(18446744073709551616); +Error 1264 (22003): Out of range value for column 'a' at row 1 +drop table if exists bug; +create table bug (a varchar(100)); +insert into bug select ifnull(JSON_UNQUOTE(JSON_EXTRACT('[{"amount":2000,"feeAmount":0,"merchantNo":"20190430140319679394","shareBizCode":"20160311162_SECOND"}]', '$[0].merchantNo')),'') merchant_no union SELECT '20180531557' merchant_no; +select * from bug; +a +20180531557 +20190430140319679394 +drop table if exists t; +create table t (a int, b double); +insert into t values (ifnull('',0)+0, 0); +insert into t values (0, ifnull('',0)+0); +select * from t; +a b +0 0 +0 0 +insert into t values ('', 0); +Error 1366 (HY000): Incorrect int value: '' for column 'a' at row 1 +insert into t values (0, ''); +Error 1366 (HY000): Incorrect double value: '' for column 'b' at row 1 +update t set a = ''; +Error 1292 (22007): Truncated incorrect DOUBLE value: '' +update t set b = ''; +Error 1292 (22007): Truncated incorrect DOUBLE value: '' +update t set a = ifnull('',0)+0; +update t set b = ifnull('',0)+0; +delete from t where a = ''; +select * from t; +a b +drop table if exists t,t1; +create table t(col1 FLOAT, col2 FLOAT(10,2), col3 DOUBLE, col4 DOUBLE(10,2), col5 DECIMAL, col6 DECIMAL(10,2)); +insert into t values (-3.402823466E+68, -34028234.6611, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +Error 1264 (22003): Out of range value for column 'col1' at row 1 +insert into t values (-34028234.6611, -3.402823466E+68, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +Error 1264 (22003): Out of range value for column 'col2' at row 1 +create table t1(id1 float,id2 float); +insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999); +select @@warning_count; +@@warning_count +2 +select convert(id1,decimal(65)),convert(id2,decimal(65)) from t1; +convert(id1,decimal(65)) convert(id2,decimal(65)) +340282346638528860000000000000000000000 -340282346638528860000000000000000000000 +set sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_ALL_TABLES,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t1; +CREATE TABLE t1(c1 mediumtext); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 8777215)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +set sql_mode = 'ONLY_FULL_GROUP_BY,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +select length(c1) from t1; +length(c1) +254 +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +select length(c1) from t1; +length(c1) +65534 +set sql_mode = default; +set @@allow_auto_random_explicit_insert = true; +drop table if exists ar; +create table ar (id bigint key clustered auto_random, name char(10)); +insert into ar(id) values (1); +select id from ar; +id +1 +select last_insert_id(); +last_insert_id() +0 +delete from ar; +insert into ar(id) values (1), (2); +select id from ar; +id +1 +2 +select last_insert_id(); +last_insert_id() +0 +delete from ar; +drop table ar; +set @@allow_auto_random_explicit_insert = default; +drop table if exists t, t1; +create table t (a int primary key, b datetime, d date); +insert into t values (1, '2019-02-11 30:00:00', '2019-01-31'); +Error 1292 (22007): Incorrect datetime value: '2019-02-11 30:00:00' for column 'b' at row 1 +CREATE TABLE t1 (a BINARY(16) PRIMARY KEY); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +Error 1062 (23000): Duplicate entry '{ W]\xA1\x06u\x9D\xBD\xB1\xA3.\xE2\xD9\xA7t' for key 't1.PRIMARY' +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +Error 1062 (23000): Duplicate entry '\x0C\x1E\x8DG`\xEB\x93 F&BC\xF0\xB5\xF4\xB7' for key 't1.PRIMARY' +drop table if exists t1; +create table t1 (a bit primary key) engine=innodb; +insert into t1 values (b'0'); +insert into t1 values (b'0'); +Error 1062 (23000): Duplicate entry '\x00' for key 't1.PRIMARY' +drop table if exists t; +create table t(c numeric primary key); +insert ignore into t values(null); +insert into t values(0); +Error 1062 (23000): Duplicate entry '0' for key 't.PRIMARY' +set tidb_enable_clustered_index = on; +drop table if exists t1pk; +create table t1pk(id varchar(200) primary key, v int); +insert into t1pk(id, v) values('abc', 1); +select * from t1pk; +id v +abc 1 +set @@tidb_constraint_check_in_place=true; +insert into t1pk(id, v) values('abc', 2); +Error 1062 (23000): Duplicate entry 'abc' for key 't1pk.PRIMARY' +set @@tidb_constraint_check_in_place=false; +insert into t1pk(id, v) values('abc', 3); +Error 1062 (23000): Duplicate entry 'abc' for key 't1pk.PRIMARY' +select v, id from t1pk; +v id +1 abc +select id from t1pk where id = 'abc'; +id +abc +select v, id from t1pk where id = 'abc'; +v id +1 abc +drop table if exists t3pk; +create table t3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 1); +select * from t3pk; +id1 id2 v id3 +abc xyz 1 100 +set @@tidb_constraint_check_in_place=true; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 2); +Error 1062 (23000): Duplicate entry 'abc-xyz-100' for key 't3pk.PRIMARY' +set @@tidb_constraint_check_in_place=false; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 3); +Error 1062 (23000): Duplicate entry 'abc-xyz-100' for key 't3pk.PRIMARY' +select v, id3, id2, id1 from t3pk; +v id3 id2 id1 +1 100 xyz abc +select id3, id2, id1 from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +id3 id2 id1 +100 xyz abc +select id3, id2, id1, v from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +id3 id2 id1 v +100 xyz abc 1 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 101, 1); +insert into t3pk(id1, id2, id3, v) values('abc', 'zzz', 101, 1); +drop table if exists t1pku; +create table t1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into t1pku(id, uk, v) values('abc', 1, 2); +select * from t1pku where id = 'abc'; +id uk v +abc 1 2 +insert into t1pku(id, uk, v) values('aaa', 1, 3); +Error 1062 (23000): Duplicate entry '1' for key 't1pku.ukk' +select * from t1pku; +id uk v +abc 1 2 +select * from t3pk where (id1, id2, id3) in (('abc', 'xyz', 100), ('abc', 'xyz', 101), ('abc', 'zzz', 101)); +id1 id2 v id3 +abc xyz 1 100 +abc xyz 1 101 +abc zzz 1 101 +set @@tidb_constraint_check_in_place=default; +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists it1pk; +create table it1pk(id varchar(200) primary key, v int); +insert into it1pk(id, v) values('abc', 1); +insert ignore into it1pk(id, v) values('abc', 2); +select * from it1pk where id = 'abc'; +id v +abc 1 +drop table if exists it2pk; +create table it2pk(id1 varchar(200), id2 varchar(200), v int, primary key(id1, id2)); +insert into it2pk(id1, id2, v) values('abc', 'cba', 1); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +id1 id2 v +abc cba 1 +insert ignore into it2pk(id1, id2, v) values('abc', 'cba', 2); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +id1 id2 v +abc cba 1 +drop table if exists it1pku; +create table it1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into it1pku(id, uk, v) values('abc', 1, 2); +select * from it1pku where id = 'abc'; +id uk v +abc 1 2 +insert ignore into it1pku(id, uk, v) values('aaa', 1, 3), ('bbb', 2, 1); +select * from it1pku; +id uk v +abc 1 2 +bbb 2 1 +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists dt1pi; +create table dt1pi(id varchar(200) primary key, v int); +insert into dt1pi(id, v) values('abb', 1),('acc', 2); +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1; +select * from dt1pi; +id v +abb 2 +acc 2 +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1pi; +id v +acc 2 +xxx 3 +drop table if exists dt1piu; +create table dt1piu(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +insert into dt1piu(id, uk, v) values('abb', 1, 10),('acc', 2, 20); +insert into dt1piu(id, uk, v) values('xyz', 1, 100) on duplicate key update v = v + 1; +select * from dt1piu; +id uk v +abb 1 11 +acc 2 20 +insert into dt1piu(id, uk, v) values('abb', 1, 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1piu; +id uk v +acc 2 20 +xxx 1 12 +drop table if exists ts1pk; +create table ts1pk(id1 timestamp, id2 timestamp, v int, primary key(id1, id2)); +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 1); +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:11 2018-01-01 11:11:11 1 +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v); +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:11 2018-01-01 11:11:11 2 +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v), id1 = '2018-01-01 11:11:12'; +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:12 2018-01-01 11:11:11 2 +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists pkt1; +CREATE TABLE pkt1 (a varchar(255), b int, index idx(b), primary key(a,b)); +insert into pkt1 values ('aaa',1); +select b from pkt1 where b = 1; +b +1 +drop table if exists pkt2; +CREATE TABLE pkt2 (a varchar(255), b int, unique index idx(b), primary key(a,b)); +insert into pkt2 values ('aaa',1); +select b from pkt2 where b = 1; +b +1 +drop table if exists issue_18232; +create table issue_18232 (a int, b int, c int, d int, primary key (a, b), index idx(c)); +select a from issue_18232 use index (idx); +a +select b from issue_18232 use index (idx); +b +select a,b from issue_18232 use index (idx); +a b +select c from issue_18232 use index (idx); +c +select a,c from issue_18232 use index (idx); +a c +select b,c from issue_18232 use index (idx); +b c +select a,b,c from issue_18232 use index (idx); +a b c +select d from issue_18232 use index (idx); +d +select a,d from issue_18232 use index (idx); +a d +select b,d from issue_18232 use index (idx); +b d +select a,b,d from issue_18232 use index (idx); +a b d +select c,d from issue_18232 use index (idx); +c d +select a,c,d from issue_18232 use index (idx); +a c d +select b,c,d from issue_18232 use index (idx); +b c d +select a,b,c,d from issue_18232 use index (idx); +a b c d +set tidb_enable_clustered_index = default; +drop table if exists t1, t2; +create table t1(a year, primary key(a)); +insert ignore into t1 values(null); +create table t2(a int, key(a)); +insert into t2 values(0); +select /*+ hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_join(t2) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +drop table if exists vctt; +create table vctt (v varchar(4), c char(4)); +insert into vctt values ('ab ', 'ab '); +select * from vctt; +v c +ab ab +delete from vctt; +insert into vctt values ('ab\n\n\n', 'ab\n\n\n'), ('ab\t\t\t', 'ab\t\t\t'), ('ab ', 'ab '), ('ab\r\r\r', 'ab\r\r\r'); +show warnings; +Level Code Message +Warning 1265 Data truncated for column 'v' at row 1 +Warning 1265 Data truncated for column 'v' at row 2 +Warning 1265 Data truncated for column 'v' at row 3 +Warning 1265 Data truncated for column 'v' at row 4 +select * from vctt; +v c +ab + + ab + + +ab ab +ab ab +ab ab +select length(v), length(c) from vctt; +length(v) length(c) +4 4 +4 4 +4 2 +4 4 +drop table if exists t1; +create table t1(a int, b varchar(20), primary key(a,b(3)) clustered); +insert into t1 values(1,'aaaaa'); +insert into t1 values(1,'aaaaa'); +Error 1062 (23000): Duplicate entry '1-aaa' for key 't1.PRIMARY' +insert into t1 select 1, 'aaa'; +Error 1062 (23000): Duplicate entry '1-aaa' for key 't1.PRIMARY' +insert into t1 select 1, 'bb'; +insert into t1 select 1, 'bb'; +Error 1062 (23000): Duplicate entry '1-bb' for key 't1.PRIMARY' +drop table if exists bintest; +create table bintest (h enum(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; +h +a +drop table if exists bintest; +create table bintest (h set(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; +h +a +drop table if exists temp_test; +create global temporary table temp_test(id int primary key auto_increment) on commit delete rows; +insert into temp_test(id) values(0); +select * from temp_test; +id +begin; +insert into temp_test(id) values(0); +select * from temp_test; +id +1 +commit; +begin; +insert into temp_test(id) values(0); +select * from temp_test; +id +1 +insert into temp_test(id) values(0); +select id from temp_test order by id; +id +1 +2 +commit; +begin; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +1 +2 +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +1 +2 +3 +4 +commit; +begin; +insert into temp_test(id) values(10); +insert into temp_test(id) values(0); +select id from temp_test order by id; +id +10 +11 +insert into temp_test(id) values(20), (30); +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +10 +11 +20 +30 +31 +32 +commit; +drop table if exists temp_test; +drop table if exists temp_test; +create global temporary table temp_test(id int) on commit delete rows; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +1 +commit; +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +1 +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +commit; +begin; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +3 +4 +commit; +drop table if exists temp_test; +drop table if exists t1; +create table t1(c1 date); +insert into t1 values('2020-02-31'); +Error 1292 (22007): Incorrect date value: '2020-02-31' for column 'c1' at row 1 +set @@sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values('2020-02-31'); +select * from t1; +c1 +2020-02-31 +set @@sql_mode='STRICT_TRANS_TABLES'; +insert into t1 values('2020-02-31'); +Error 1292 (22007): Incorrect date value: '2020-02-31' for column 'c1' at row 1 +set sql_mode=default; +drop table if exists t; +create table t (id decimal(10)); +insert into t values('1sdf'); +Error 1366 (HY000): Incorrect decimal value: '1sdf' for column 'id' at row 1 +insert into t values('1edf'); +Error 1366 (HY000): Incorrect decimal value: '1edf' for column 'id' at row 1 +insert into t values('12Ea'); +Error 1366 (HY000): Incorrect decimal value: '12Ea' for column 'id' at row 1 +insert into t values('1E'); +Error 1366 (HY000): Incorrect decimal value: '1E' for column 'id' at row 1 +insert into t values('1e'); +Error 1366 (HY000): Incorrect decimal value: '1e' for column 'id' at row 1 +insert into t values('1.2A'); +Error 1366 (HY000): Incorrect decimal value: '1.2A' for column 'id' at row 1 +insert into t values('1.2.3.4.5'); +Error 1366 (HY000): Incorrect decimal value: '1.2.3.4.5' for column 'id' at row 1 +insert into t values('1.2.'); +Error 1366 (HY000): Incorrect decimal value: '1.2.' for column 'id' at row 1 +insert into t values('1,999.00'); +Error 1366 (HY000): Incorrect decimal value: '1,999.00' for column 'id' at row 1 +insert into t values('12e-3'); +show warnings; +Level Code Message +Warning 1366 Incorrect decimal value: '12e-3' for column 'id' at row 1 +select id from t; +id +0 +drop table if exists t; +SET sql_mode='NO_ENGINE_SUBSTITUTION'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a tinyint not null auto_increment primary key, b char(20)); +INSERT INTO t1 VALUES (127,'maxvalue'); +REPLACE INTO t1 VALUES (0,'newmaxvalue'); +Error 1467 (HY000): Failed to read auto-increment value from storage engine +set sql_mode=default; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a INT) ENGINE = InnoDB; +INSERT IGNORE into t1(SELECT SLEEP(NULL)); +SHOW WARNINGS; +Level Code Message +Warning 1210 Incorrect arguments to sleep +INSERT IGNORE into t1(SELECT SLEEP(-1)); +SHOW WARNINGS; +Level Code Message +Warning 1210 Incorrect arguments to sleep +INSERT IGNORE into t1(SELECT SLEEP(1)); +SELECT * FROM t1; +a +0 +0 +0 +DROP TABLE t1; +drop table if exists t1; +create table t1(c1 float); +insert into t1 values(999.99); +select cast(t1.c1 as decimal(4, 1)) from t1; +cast(t1.c1 as decimal(4, 1)) +999.9 +select cast(t1.c1 as decimal(5, 1)) from t1; +cast(t1.c1 as decimal(5, 1)) +1000.0 +drop table if exists t1; +create table t1(c1 decimal(6, 4)); +insert into t1 values(99.9999); +select cast(t1.c1 as decimal(5, 3)) from t1; +cast(t1.c1 as decimal(5, 3)) +99.999 +select cast(t1.c1 as decimal(6, 3)) from t1; +cast(t1.c1 as decimal(6, 3)) +100.000 +drop table if exists t1; +create table t1(id int, a int); +set @@SQL_MODE='STRICT_TRANS_TABLES'; +insert into t1 values(1, '1e100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +insert into t1 values(2, '-1e100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +select id, a from t1; +id a +set @@SQL_MODE=''; +insert into t1 values(1, '1e100'); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +insert into t1 values(2, '-1e100'); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +select id, a from t1 order by id asc; +id a +1 2147483647 +2 -2147483648 +set sql_mode=default; +drop table if exists tf; +create table tf(a float(1, 0) unsigned); +insert into tf values('-100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +set @@sql_mode=''; +insert into tf values('-100'); +select * from tf; +a +0 +set @@sql_mode=default; +drop table if exists tt1; +create table tt1 (c1 decimal(64)); +insert into tt1 values(89000000000000000000000000000000000000000000000000000000000000000000000000000000000000000); +Error 1264 (22003): Out of range value for column 'c1' at row 1 +insert into tt1 values(89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000); +Error 1264 (22003): Out of range value for column 'c1' at row 1 +insert ignore into tt1 values(89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1292 Truncated incorrect DECIMAL value: '789012345678901234567890123456789012345678901234567890123456789012345678900000000' +select c1 from tt1; +c1 +9999999999999999999999999999999999999999999999999999999999999999 +update tt1 set c1 = 89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000; +Error 1264 (22003): Out of range value for column 'c1' at row 1 +drop table if exists tt1; +insert into tt1 values(4556414e723532); +Error 1367 (22007): Illegal double '4556414e723532' value found during parsing +select 888888888888888888888888888888888888888888888888888888888888888888888888888888888888; +888888888888888888888888888888888888888888888888888888888888888888888888888888888888 +99999999999999999999999999999999999999999999999999999999999999999 +show warnings; +Level Code Message +Warning 1292 Truncated incorrect DECIMAL value: '888888888888888888888888888888888888888888888888888888888888888888888888888888888' +drop table if exists t; +create table t (id smallint auto_increment primary key); +alter table t add column c1 int default 1; +insert ignore into t(id) values (194626268); +affected rows: 1 +info: +select * from t; +id c1 +32767 1 +insert ignore into t(id) values ('*') on duplicate key update c1 = 2; +affected rows: 2 +info: +select * from t; +id c1 +32767 2 +drop table if exists t; +create table t (i int not null primary key, j int unique key); +insert into t values (1, 1), (2, 2); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +insert ignore into t values(1, 1) on duplicate key update i = 2; +affected rows: 0 +info: +select * from t; +i j +1 1 +2 2 +insert ignore into t values(1, 1) on duplicate key update j = 2; +affected rows: 0 +info: +select * from t; +i j +1 1 +2 2 +drop table if exists t2; +create table t2(`col_25` set('Alice','Bob','Charlie','David') NOT NULL,`col_26` date NOT NULL DEFAULT '2016-04-15', PRIMARY KEY (`col_26`) clustered, UNIQUE KEY `idx_9` (`col_25`,`col_26`),UNIQUE KEY `idx_10` (`col_25`)); +insert into t2(col_25, col_26) values('Bob', '1989-03-23'),('Alice', '2023-11-24'), ('Charlie', '2023-12-05'); +insert ignore into t2 (col_25,col_26) values ( 'Bob','1977-11-23' ) on duplicate key update col_25 = 'Alice', col_26 = '2036-12-13'; +show warnings; +Level Code Message +Warning 1062 Duplicate entry 'Alice' for key 't2.idx_10' +select * from t2; +col_25 col_26 +Alice 2023-11-24 +Bob 1989-03-23 +Charlie 2023-12-05 +drop table if exists t4; +create table t4(id int primary key clustered, k int, v int, unique key uk1(k)); +insert into t4 values (1, 10, 100), (3, 30, 300); +insert ignore into t4 (id, k, v) values(1, 0, 0) on duplicate key update id = 2, k = 30; +show warnings; +Level Code Message +Warning 1062 Duplicate entry '30' for key 't4.uk1' +select * from t4; +id k v +1 10 100 +3 30 300 +drop table if exists t5; +create table t5(k1 varchar(100), k2 varchar(100), uk1 int, v int, primary key(k1, k2) clustered, unique key ukk1(uk1), unique key ukk2(v)); +insert into t5(k1, k2, uk1, v) values('1', '1', 1, '100'), ('1', '3', 2, '200'); +update ignore t5 set k2 = '2', uk1 = 2 where k1 = '1' and k2 = '1'; +show warnings; +Level Code Message +Warning 1062 Duplicate entry '2' for key 't5.ukk1' +select * from t5; +k1 k2 uk1 v +1 1 1 100 +1 3 2 200 +drop table if exists t6; +create table t6 (a int, b int, c int, primary key(a, b) clustered, unique key idx_14(b), unique key idx_15(b), unique key idx_16(a, b)); +insert into t6 select 10, 10, 20; +insert ignore into t6 set a = 20, b = 10 on duplicate key update a = 100; +select * from t6; +a b c +100 10 20 +insert ignore into t6 set a = 200, b= 10 on duplicate key update c = 1000; +select * from t6; +a b c +100 10 1000 +drop table if exists insert_autoinc_test; +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(c1) values (1), (2); +begin; +select * from insert_autoinc_test; +id c1 +1 1 +2 2 +commit; +begin; +insert into insert_autoinc_test(id, c1) values (5,5); +insert into insert_autoinc_test(c1) values (6); +commit; +begin; +select * from insert_autoinc_test; +id c1 +1 1 +2 2 +5 5 +6 6 +commit; +begin; +insert into insert_autoinc_test(id, c1) values (3,3); +commit; +begin; +select * from insert_autoinc_test; +id c1 +1 1 +2 2 +3 3 +5 5 +6 6 +commit; +begin; +insert into insert_autoinc_test(c1) values (7); +commit; +begin; +select * from insert_autoinc_test; +id c1 +1 1 +2 2 +3 3 +5 5 +6 6 +7 7 +commit; +drop table if exists insert_autoinc_test; +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(id, c1) values (0.3, 1); +select * from insert_autoinc_test; +id c1 +1 1 +insert into insert_autoinc_test(id, c1) values (-0.3, 2); +select * from insert_autoinc_test; +id c1 +1 1 +2 2 +insert into insert_autoinc_test(id, c1) values (-3.3, 3); +select * from insert_autoinc_test; +id c1 +-3 3 +1 1 +2 2 +insert into insert_autoinc_test(id, c1) values (4.3, 4); +select * from insert_autoinc_test; +id c1 +-3 3 +1 1 +2 2 +4 4 +insert into insert_autoinc_test(c1) values (5); +select * from insert_autoinc_test; +id c1 +-3 3 +1 1 +2 2 +4 4 +5 5 +insert into insert_autoinc_test(id, c1) values (null, 6); +select * from insert_autoinc_test; +id c1 +-3 3 +1 1 +2 2 +4 4 +5 5 +6 6 +drop table if exists insert_autoinc_test; +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(id, c1) values (5, 1); +select * from insert_autoinc_test; +id c1 +5 1 +insert into insert_autoinc_test(id, c1) values (0, 2); +select * from insert_autoinc_test; +id c1 +5 1 +6 2 +insert into insert_autoinc_test(id, c1) values (0, 3); +select * from insert_autoinc_test; +id c1 +5 1 +6 2 +7 3 +set SQL_MODE=NO_AUTO_VALUE_ON_ZERO; +insert into insert_autoinc_test(id, c1) values (0, 4); +select * from insert_autoinc_test; +id c1 +0 4 +5 1 +6 2 +7 3 +insert into insert_autoinc_test(id, c1) values (0, 5); +Error 1062 (23000): Duplicate entry '0' for key 'insert_autoinc_test.PRIMARY' +insert into insert_autoinc_test(c1) values (6); +select * from insert_autoinc_test; +id c1 +0 4 +5 1 +6 2 +7 3 +8 6 +insert into insert_autoinc_test(id, c1) values (null, 7); +select * from insert_autoinc_test; +id c1 +0 4 +5 1 +6 2 +7 3 +8 6 +9 7 +set SQL_MODE=''; +insert into insert_autoinc_test(id, c1) values (0, 8); +select * from insert_autoinc_test; +id c1 +0 4 +5 1 +6 2 +7 3 +8 6 +9 7 +10 8 +insert into insert_autoinc_test(id, c1) values (null, 9); +select * from insert_autoinc_test; +id c1 +0 4 +5 1 +6 2 +7 3 +8 6 +9 7 +10 8 +11 9 +set sql_mode = default; +drop table if exists insert_test; +create table insert_test (id int PRIMARY KEY AUTO_INCREMENT, c1 int, c2 int, c3 int default 1); +insert insert_test (c1) values (1),(2),(NULL); +affected rows: 3 +info: Records: 3 Duplicates: 0 Warnings: 0 +begin; +insert insert_test (c1) values (); +Error 1136 (21S01): Column count doesn't match value count at row 1 +rollback; +begin; +insert insert_test (c1, c2) values (1,2),(1); +Error 1136 (21S01): Column count doesn't match value count at row 2 +rollback; +begin; +insert insert_test (xxx) values (3); +Error 1054 (42S22): Unknown column 'xxx' in 'field list' +rollback; +begin; +insert insert_test_xxx (c1) values (); +Error 1146 (42S02): Table 'executor__insert.insert_test_xxx' doesn't exist +rollback; +insert insert_test set c1 = 3; +affected rows: 1 +info: +begin; +insert insert_test set c1 = 4, c1 = 5; +Error 1110 (42000): Column 'c1' specified twice +rollback; +begin; +insert insert_test set xxx = 6; +Error 1054 (42S22): Unknown column 'xxx' in 'field list' +rollback; +drop table if exists insert_test_1, insert_test_2; +create table insert_test_1 (id int, c1 int); +insert insert_test_1 select id, c1 from insert_test; +affected rows: 4 +info: Records: 4 Duplicates: 0 Warnings: 0 +create table insert_test_2 (id int, c1 int); +insert insert_test_1 select id, c1 from insert_test union select id * 10, c1 * 10 from insert_test; +affected rows: 8 +info: Records: 8 Duplicates: 0 Warnings: 0 +begin; +insert insert_test_1 select c1 from insert_test; +Error 1136 (21S01): Column count doesn't match value count at row 1 +rollback; +begin; +insert insert_test_1 values(default, default, default, default, default); +Error 1136 (21S01): Column count doesn't match value count at row 1 +rollback; +select * from insert_test where id = 1; +id c1 c2 c3 +1 1 NULL 1 +insert into insert_test (id, c3) values (1, 2) on duplicate key update id=values(id), c2=10; +affected rows: 2 +info: +select * from insert_test where id = 1; +id c1 c2 c3 +1 1 10 1 +insert into insert_test (id, c2) values (1, 1) on duplicate key update insert_test.c2=10; +affected rows: 0 +info: +insert into insert_test (id, c2) values(1, 1) on duplicate key update t.c2 = 10; +Error 1054 (42S22): Unknown column 't.c2' in 'field list' +INSERT INTO insert_test (id, c3) VALUES (1, 2) ON DUPLICATE KEY UPDATE c3=values(c3)+c3+3; +affected rows: 2 +info: +select * from insert_test where id = 1; +id c1 c2 c3 +1 1 10 6 +INSERT IGNORE INTO insert_test (id, c3) VALUES (1, 2) ON DUPLICATE KEY UPDATE c3=values(c3)+c3+3; +affected rows: 2 +info: +select * from insert_test where id = 1; +id c1 c2 c3 +1 1 10 11 +drop table if exists insert_err; +create table insert_err (id int, c1 varchar(8)); +insert insert_err values (1, 'abcdabcdabcd'); +Error 1406 (22001): Data too long for column 'c1' at row 1 +insert insert_err values (1, '你好,世界'); +create table TEST1 (ID INT NOT NULL, VALUE INT DEFAULT NULL, PRIMARY KEY (ID)); +INSERT INTO TEST1(id,value) VALUE(3,3) on DUPLICATE KEY UPDATE VALUE=4; +affected rows: 1 +info: +drop table if exists t; +create table t (id int); +insert into t values(1); +update t t1 set id = (select count(*) + 1 from t t2 where t1.id = t2.id); +select * from t; +id +2 +drop table if exists t; +create table t(c decimal(5, 5)); +insert into t value(0); +insert into t value(1); +Error 1264 (22003): Out of range value for column 'c' at row 1 +drop table if exists t; +create table t(c binary(255)); +insert into t value(1); +select length(c) from t; +length(c) +255 +drop table if exists t; +create table t(c varbinary(255)); +insert into t value(1); +select length(c) from t; +length(c) +1 +drop table if exists t; +create table t(c int); +set @@time_zone = '+08:00'; +insert into t value(Unix_timestamp('2002-10-27 01:00')); +select * from t; +c +1035651600 +set @@time_zone = default; +drop table if exists t1; +create table t1 (b char(0)); +insert into t1 values (""); +DROP TABLE IF EXISTS t; +CREATE TABLE t(a DECIMAL(4,2)); +INSERT INTO t VALUES (1.000001); +SHOW WARNINGS; +Level Code Message +Warning 1366 Incorrect decimal value: '1.000001' for column 'a' at row 1 +INSERT INTO t VALUES (1.000000); +SHOW WARNINGS; +Level Code Message +DROP TABLE IF EXISTS t; +CREATE TABLE t(a datetime); +INSERT INTO t VALUES('2017-00-00'); +Error 1292 (22007): Incorrect datetime value: '2017-00-00' for column 'a' at row 1 +set sql_mode = ''; +INSERT INTO t VALUES('2017-00-00'); +SELECT * FROM t; +a +2017-00-00 00:00:00 +set sql_mode = 'strict_all_tables'; +SELECT * FROM t; +a +2017-00-00 00:00:00 +set sql_mode = default; +drop table if exists test; +CREATE TABLE test(id int(10) UNSIGNED NOT NULL AUTO_INCREMENT, p int(10) UNSIGNED NOT NULL, PRIMARY KEY(p), KEY(id)); +insert into test(p) value(1); +select * from test; +id p +1 1 +select * from test use index (id) where id = 1; +id p +1 1 +insert into test values(NULL, 2); +select * from test use index (id) where id = 2; +id p +2 2 +insert into test values(2, 3); +select * from test use index (id) where id = 2; +id p +2 2 +2 3 +drop table if exists t; +create table t(a bigint unsigned); +set @@sql_mode = 'strict_all_tables'; +insert into t value (-1); +Error 1264 (22003): Out of range value for column 'a' at row 1 +set @@sql_mode = ''; +insert into t value (-1); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +insert into t select -1; +show warnings; +Level Code Message +Warning 1690 constant -1 overflows bigint +insert into t select cast(-1 as unsigned); +insert into t value (-1.111); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +insert into t value ('-1.111'); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +update t set a = -1 limit 1; +show warnings; +Level Code Message +Warning 1690 constant -1 overflows bigint +select * from t; +a +0 +0 +18446744073709551615 +0 +0 +set @@sql_mode = default; +drop table if exists t; +create table t(a time(6)); +insert into t value('20070219173709.055870'), ('20070219173709.055'), ('20070219173709.055870123'); +select * from t; +a +17:37:09.055870 +17:37:09.055000 +17:37:09.055870 +truncate table t; +insert into t value(20070219173709.055870), (20070219173709.055), (20070219173709.055870123); +select * from t; +a +17:37:09.055870 +17:37:09.055000 +17:37:09.055870 +insert into t value(-20070219173709.055870); +Error 1292 (22007): Incorrect time value: '-20070219173709.055870' for column 'a' at row 1 +drop table if exists t; +set @@sql_mode=''; +create table t(a float unsigned, b double unsigned); +insert into t value(-1.1, -1.1), (-2.1, -2.1), (0, 0), (1.1, 1.1); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'a' at row 2 +Warning 1264 Out of range value for column 'b' at row 2 +select * from t; +a b +0 0 +0 0 +0 0 +1.1 1.1 +set @@sql_mode=default; +drop table if exists t; +create table t(a int default 1, b int default 2); +insert into t values(default, default); +select * from t; +a b +1 2 +truncate table t; +insert into t values(default(b), default(a)); +select * from t; +a b +2 1 +truncate table t; +insert into t (b) values(default); +select * from t; +a b +1 2 +truncate table t; +insert into t (b) values(default(a)); +select * from t; +a b +1 1 +drop view if exists v; +create view v as select * from t; +insert into v values(1,2); +Error 1105 (HY000): insert into view v is not supported now +replace into v values(1,2); +Error 1105 (HY000): replace into view v is not supported now +drop view v; +drop sequence if exists seq; +create sequence seq; +insert into seq values(); +Error 1105 (HY000): insert into sequence seq is not supported now +replace into seq values(); +Error 1105 (HY000): replace into sequence seq is not supported now +drop sequence seq; +drop table if exists t; +create table t(name varchar(255), b int, c int, primary key(name(2))); +insert into t(name, b) values("cha", 3); +insert into t(name, b) values("chb", 3); +Error 1062 (23000): Duplicate entry 'ch' for key 't.PRIMARY' +insert into t(name, b) values("测试", 3); +insert into t(name, b) values("测试", 3); +Error 1062 (23000): Duplicate entry 'æµ' for key 't.PRIMARY' +drop table if exists t; +create table t (i int unique key); +insert into t values (1),(2); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +select * from t; +i +1 +2 +insert into t values (1), (2) on duplicate key update i = values(i); +affected rows: 0 +info: Records: 2 Duplicates: 0 Warnings: 0 +select * from t; +i +1 +2 +insert into t values (2), (3) on duplicate key update i = 3; +affected rows: 2 +info: Records: 2 Duplicates: 1 Warnings: 0 +select * from t; +i +1 +3 +drop table if exists t; +create table t (i int primary key, j int unique key); +insert into t values (-1, 1); +affected rows: 1 +info: +select * from t; +i j +-1 1 +insert into t values (1, 1) on duplicate key update j = values(j); +affected rows: 0 +info: +select * from t; +i j +-1 1 +drop table if exists test; +create table test (i int primary key, j int unique); +begin; +insert into test values (1,1); +insert into test values (2,1) on duplicate key update i = -i, j = -j; +commit; +select * from test; +i j +-1 -1 +delete from test; +insert into test values (1, 1); +begin; +delete from test where i = 1; +insert into test values (2, 1) on duplicate key update i = -i, j = -j; +commit; +select * from test; +i j +2 1 +delete from test; +insert into test values (1, 1); +begin; +update test set i = 2, j = 2 where i = 1; +insert into test values (1, 3) on duplicate key update i = -i, j = -j; +insert into test values (2, 4) on duplicate key update i = -i, j = -j; +commit; +select * from test order by i; +i j +-2 -2 +1 3 +delete from test; +begin; +insert into test values (1, 3), (1, 3) on duplicate key update i = values(i), j = values(j); +commit; +select * from test order by i; +i j +1 3 +create table tmp (id int auto_increment, code int, primary key(id, code)); +create table m (id int primary key auto_increment, code int unique); +insert tmp (code) values (1); +insert tmp (code) values (1); +set tidb_init_chunk_size=1; +insert m (code) select code from tmp on duplicate key update code = values(code); +select * from m; +id code +1 1 +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT AUTO_INCREMENT PRIMARY KEY, +f2 VARCHAR(5) NOT NULL UNIQUE); +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +affected rows: 1 +info: +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +affected rows: 0 +info: +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT AUTO_INCREMENT UNIQUE, +f2 VARCHAR(5) NOT NULL UNIQUE); +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +affected rows: 1 +info: +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +affected rows: 0 +info: +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = 2; +affected rows: 2 +info: +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT); +INSERT t1 VALUES (1) ON DUPLICATE KEY UPDATE f1 = 1; +affected rows: 1 +info: +SELECT * FROM t1; +f1 +1 +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT PRIMARY KEY, f2 INT NOT NULL UNIQUE); +INSERT t1 VALUES (1, 1); +affected rows: 1 +info: +INSERT t1 VALUES (1, 1), (1, 1) ON DUPLICATE KEY UPDATE f1 = 2, f2 = 2; +affected rows: 3 +info: Records: 2 Duplicates: 1 Warnings: 0 +SELECT * FROM t1 order by f1; +f1 f2 +1 1 +2 2 +INSERT t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +Error 1048 (23000): Column 'f2' cannot be null +INSERT IGNORE t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +affected rows: 2 +info: +show warnings; +Level Code Message +Warning 1048 Column 'f2' cannot be null +SELECT * FROM t1 order by f1; +f1 f2 +1 0 +2 2 +SET sql_mode=''; +INSERT t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +Error 1048 (23000): Column 'f2' cannot be null +SELECT * FROM t1 order by f1; +f1 f2 +1 0 +2 2 +set sql_mode=default; +set tidb_init_chunk_size=default; +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +insert into t1 values(1, 100); +affected rows: 1 +info: +insert into t2 values(1, 200); +affected rows: 1 +info: +insert into t1 select a2, b2 from t2 on duplicate key update b1 = a2; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from t1; +a1 b1 +1 1 +insert into t1 select a2, b2 from t2 on duplicate key update b1 = b2; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from t1; +a1 b1 +1 200 +insert into t1 select a2, b2 from t2 on duplicate key update a1 = a2; +affected rows: 0 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from t1; +a1 b1 +1 200 +insert into t1 select a2, b2 from t2 on duplicate key update b1 = 300; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from t1; +a1 b1 +1 300 +insert into t1 values(1, 1) on duplicate key update b1 = 400; +affected rows: 2 +info: +select * from t1; +a1 b1 +1 400 +insert into t1 select 1, 500 from t2 on duplicate key update b1 = 400; +affected rows: 0 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from t1; +a1 b1 +1 400 +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +insert into t1 select * from t2 on duplicate key update c = t2.b; +Error 1054 (42S22): Unknown column 'c' in 'field list' +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +insert into t1 select * from t2 on duplicate key update a = b; +Error 1052 (23000): Column 'b' in field list is ambiguous +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +insert into t1 select * from t2 on duplicate key update c = b; +Error 1054 (42S22): Unknown column 'c' in 'field list' +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +insert into t1 select * from t2 on duplicate key update a1 = values(b2); +Error 1054 (42S22): Unknown column 'b2' in 'field list' +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +insert into t1 values(1, 100); +affected rows: 1 +info: +insert into t2 values(1, 200); +affected rows: 1 +info: +insert into t1 select * from t2 on duplicate key update b1 = values(b1) + b2; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from t1; +a1 b1 +1 400 +insert into t1 select * from t2 on duplicate key update b1 = values(b1) + b2; +affected rows: 0 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from t1; +a1 b1 +1 400 +drop table if exists t; +create table t(k1 bigint, k2 bigint, val bigint, primary key(k1, k2)); +insert into t (val, k1, k2) values (3, 1, 2); +affected rows: 1 +info: +select * from t; +k1 k2 val +1 2 3 +insert into t (val, k1, k2) select c, a, b from (select 1 as a, 2 as b, 4 as c) tmp on duplicate key update val = tmp.c; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from t; +k1 k2 val +1 2 4 +drop table if exists t; +create table t(k1 double, k2 double, v double, primary key(k1, k2)); +insert into t (v, k1, k2) select c, a, b from (select "3" c, "1" a, "2" b) tmp on duplicate key update v=c; +affected rows: 1 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from t; +k1 k2 v +1 2 3 +insert into t (v, k1, k2) select c, a, b from (select "3" c, "1" a, "2" b) tmp on duplicate key update v=c; +affected rows: 0 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from t; +k1 k2 v +1 2 3 +drop table if exists t1, t2; +create table t1(id int, a int, b int); +insert into t1 values (1, 1, 1); +affected rows: 1 +info: +insert into t1 values (2, 2, 1); +affected rows: 1 +info: +insert into t1 values (3, 3, 1); +affected rows: 1 +info: +create table t2(a int primary key, b int, unique(b)); +insert into t2 select a, b from t1 order by id on duplicate key update a=t1.a, b=t1.b; +affected rows: 5 +info: Records: 3 Duplicates: 2 Warnings: 0 +select * from t2 order by a; +a b +3 1 +drop table if exists t1, t2; +create table t1(id int, a int, b int); +insert into t1 values (1, 1, 1); +affected rows: 1 +info: +insert into t1 values (2, 1, 2); +affected rows: 1 +info: +insert into t1 values (3, 3, 1); +affected rows: 1 +info: +create table t2(a int primary key, b int, unique(b)); +insert into t2 select a, b from t1 order by id on duplicate key update a=t1.a, b=t1.b; +affected rows: 4 +info: Records: 3 Duplicates: 1 Warnings: 0 +select * from t2 order by a; +a b +1 2 +3 1 +drop table if exists t1, t2; +create table t1(id int, a int, b int, c int); +insert into t1 values (1, 1, 1, 1); +affected rows: 1 +info: +insert into t1 values (2, 2, 1, 2); +affected rows: 1 +info: +insert into t1 values (3, 3, 2, 2); +affected rows: 1 +info: +insert into t1 values (4, 4, 2, 2); +affected rows: 1 +info: +create table t2(a int primary key, b int, c int, unique(b), unique(c)); +insert into t2 select a, b, c from t1 order by id on duplicate key update b=t2.b, c=t2.c; +affected rows: 2 +info: Records: 4 Duplicates: 0 Warnings: 0 +select * from t2 order by a; +a b c +1 1 1 +3 2 2 +drop table if exists t1; +create table t1(a int primary key, b int); +insert into t1 values(1,1),(2,2),(3,3),(4,4),(5,5); +affected rows: 5 +info: Records: 5 Duplicates: 0 Warnings: 0 +insert into t1 values(4,14),(5,15),(6,16),(7,17),(8,18) on duplicate key update b=b+10; +affected rows: 7 +info: Records: 5 Duplicates: 2 Warnings: 0 +drop table if exists a, b; +create table a(x int primary key); +create table b(x int, y int); +insert into a values(1); +affected rows: 1 +info: +insert into b values(1, 2); +affected rows: 1 +info: +insert into a select x from b ON DUPLICATE KEY UPDATE a.x=b.y; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from a; +x +2 +## Test issue 28078. +## Use different types of columns so that there's likely to be error if the types mismatches. +drop table if exists a, b; +create table a(id int, a1 timestamp, a2 varchar(10), a3 float, unique(id)); +create table b(id int, b1 time, b2 varchar(10), b3 int); +insert into a values (1, '2022-01-04 07:02:04', 'a', 1.1), (2, '2022-01-04 07:02:05', 'b', 2.2); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +insert into b values (2, '12:34:56', 'c', 10), (3, '01:23:45', 'd', 20); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +insert into a (id) select id from b on duplicate key update a.a2 = b.b2, a.a3 = 3.3; +affected rows: 3 +info: Records: 2 Duplicates: 1 Warnings: 0 +select * from a; +id a1 a2 a3 +1 2022-01-04 07:02:04 a 1.1 +2 2022-01-04 07:02:05 c 3.3 +3 NULL NULL NULL +insert into a (id) select 4 from b where b3 = 20 on duplicate key update a.a3 = b.b3; +affected rows: 1 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from a; +id a1 a2 a3 +1 2022-01-04 07:02:04 a 1.1 +2 2022-01-04 07:02:05 c 3.3 +3 NULL NULL NULL +4 NULL NULL NULL +insert into a (a2, a3) select 'x', 1.2 from b on duplicate key update a.a2 = b.b3; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +select * from a; +id a1 a2 a3 +1 2022-01-04 07:02:04 a 1.1 +2 2022-01-04 07:02:05 c 3.3 +3 NULL NULL NULL +4 NULL NULL NULL +NULL NULL x 1.2 +NULL NULL x 1.2 +## reproduce insert on duplicate key update bug under new row format. +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1 use index(primary); +c1 +1.0000 +drop table if exists t; +create table t (d int); +insert into t values (cast('18446744073709551616' as unsigned)); +Error 1690 (22003): BIGINT UNSIGNED value is out of range in '18446744073709551616' +set sql_mode=''; +insert into t values (cast('18446744073709551616' as unsigned)); +Level Code Message +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1292 Truncated incorrect INTEGER value: '18446744073709551616' +set sql_mode=DEFAULT; +drop table if exists parent, child; +create table parent (id int primary key, ref int, key(ref)); +create table child (id int primary key, ref int, foreign key (ref) references parent(ref)); +insert into parent values (1, 1), (2, 2); +insert into child values (1, 1); +insert into child values (1, 2) on duplicate key update ref = 2; +insert into child values (1, 3) on duplicate key update ref = 3; +Error 1452 (23000): Cannot add or update a child row: a foreign key constraint fails (`executor__insert`.`child`, CONSTRAINT `fk_1` FOREIGN KEY (`ref`) REFERENCES `parent` (`ref`)) +insert ignore into child values (1, 3) on duplicate key update ref = 3; +Level Code Message +Warning 1452 Cannot add or update a child row: a foreign key constraint fails (`executor__insert`.`child`, CONSTRAINT `fk_1` FOREIGN KEY (`ref`) REFERENCES `parent` (`ref`)) +insert into parent values (2, 3) on duplicate key update ref = 3; +Error 1451 (23000): Cannot delete or update a parent row: a foreign key constraint fails (`executor__insert`.`child`, CONSTRAINT `fk_1` FOREIGN KEY (`ref`) REFERENCES `parent` (`ref`)) +insert ignore into parent values (2, 3) on duplicate key update ref = 3; +drop table if exists t1, t2; +create table t1 (id int primary key, col1 varchar(10) not null default ''); +create table t2 (id int primary key, col1 varchar(10)); +insert into t2 values (1, null); +insert ignore into t1 values(5, null); +set session sql_mode = ''; +insert into t1 values(1, null); +Error 1048 (23000): Column 'col1' cannot be null +insert into t1 set id = 1, col1 = null; +Error 1048 (23000): Column 'col1' cannot be null +insert t1 VALUES (5, 5) ON DUPLICATE KEY UPDATE col1 = null; +Error 1048 (23000): Column 'col1' cannot be null +insert t1 VALUES (5, 5), (6, null) ON DUPLICATE KEY UPDATE col1 = null; +select * from t1; +id col1 +5 +6 +insert into t1 select * from t2; +show warnings; +Level Code Message +Warning 1048 Column 'col1' cannot be null +insert into t1 values(2, null), (3, 3), (4, 4); +show warnings; +Level Code Message +Warning 1048 Column 'col1' cannot be null +update t1 set col1 = null where id = 3; +show warnings; +Level Code Message +Warning 1048 Column 'col1' cannot be null +insert ignore t1 VALUES (4, 4) ON DUPLICATE KEY UPDATE col1 = null; +select * from t1; +id col1 +1 +2 +3 +4 +5 +6 diff --git a/tests/integrationtest/t/executor/executor.test b/tests/integrationtest/t/executor/executor.test index 802ba034b84c5..6cd8bafd9226e 100644 --- a/tests/integrationtest/t/executor/executor.test +++ b/tests/integrationtest/t/executor/executor.test @@ -1690,3 +1690,1231 @@ select a-1 from t; select -10*a from t; select a/-2 from t; --disable_warnings +<<<<<<< HEAD +======= + +# TestSetOperationOnDiffColType +drop table if exists t1, t2, t3; +create table t1(a int, b int); +create table t2(a int, b varchar(20)); +create table t3(a int, b decimal(30,10)); +insert into t1 values (1,1),(1,1),(2,2),(3,3),(null,null); +insert into t2 values (1,'1'),(2,'2'),(null,null),(null,'3'); +insert into t3 values (2,2.1),(3,3); +explain format = 'brief' select * from t3 union select * from t1; +--sorted_result +select * from t3 union select * from t1; +explain format = 'brief' select * from t2 union all select * from t1; +--sorted_result +select * from t2 union all select * from t1; +explain format = 'brief' select * from t1 except select * from t3; +--sorted_result +select * from t1 except select * from t3; +explain format = 'brief' select * from t1 intersect select * from t2; +--sorted_result +select * from t1 intersect select * from t2; +explain format = 'brief' select * from t1 union all select * from t2 union all select * from t3; +--sorted_result +select * from t1 union all select * from t2 union all select * from t3; +explain format = 'brief' select * from t1 union all select * from t2 except select * from t3; +--sorted_result +select * from t1 union all select * from t2 except select * from t3; +explain format = 'brief' select * from t1 intersect select * from t2 intersect select * from t1; +--sorted_result +select * from t1 intersect select * from t2 intersect select * from t1; +explain format = 'brief' select * from t1 union all select * from t2 intersect select * from t3; +--sorted_result +select * from t1 union all select * from t2 intersect select * from t3; +explain format = 'brief' select * from t1 except select * from t2 intersect select * from t3; +--sorted_result +select * from t1 except select * from t2 intersect select * from t3; + +# TestIndexScanWithYearCol +# issue-23038: wrong key range of index scan for year column +set tidb_cost_model_version=2; +drop table if exists t; +create table t (c1 year(4), c2 int, key(c1)); +insert into t values(2001, 1); +explain format = 'brief' select t1.c1, t2.c1 from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select t1.c1, t2.c1 from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select * from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select * from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select count(*) from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select count(*) from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select t1.c1, t2.c1 from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select t1.c1, t2.c1 from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select count(*) from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select count(*) from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 is not NULL; +--sorted_result +select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 is not NULL; + +# TestSetOperation +set tidb_cost_model_version=2; +drop table if exists t1, t2, t3; +create table t1(a int); +create table t2 like t1; +create table t3 like t1; +insert into t1 values (1),(1),(2),(3),(null); +insert into t2 values (1),(2),(null),(null); +insert into t3 values (2),(3); +explain format='brief' select * from t3 union select * from t1; +--sorted_result +select * from t3 union select * from t1; +explain format='brief' select * from t2 union all select * from t1; +--sorted_result +select * from t2 union all select * from t1; +explain format='brief' select * from t1 except select * from t3; +--sorted_result +select * from t1 except select * from t3; +explain format='brief' select * from t1 intersect select * from t2; +--sorted_result +select * from t1 intersect select * from t2; +explain format='brief' select * from t1 union all select * from t2 union all select * from t3; +--sorted_result +select * from t1 union all select * from t2 union all select * from t3; +explain format='brief' select * from t1 union all select * from t2 except select * from t3; +--sorted_result +select * from t1 union all select * from t2 except select * from t3; +explain format='brief' select * from t1 intersect select * from t2 intersect select * from t1; +--sorted_result +select * from t1 intersect select * from t2 intersect select * from t1; +explain format='brief' select * from t1 union all select * from t2 intersect select * from t3; +--sorted_result +select * from t1 union all select * from t2 intersect select * from t3; +explain format='brief' select * from t1 except select * from t2 intersect select * from t3; +--sorted_result +select * from t1 except select * from t2 intersect select * from t3; +explain format='brief' select * from t1 intersect (select * from t2 except (select * from t3)); +--sorted_result +select * from t1 intersect (select * from t2 except (select * from t3)); +explain format='brief' select * from t1 union all (select * from t2 except select * from t3); +--sorted_result +select * from t1 union all (select * from t2 except select * from t3); +explain format='brief' select * from t1 union (select * from t2 union all select * from t3); +--sorted_result +select * from t1 union (select * from t2 union all select * from t3); +explain format='brief' (select * from t1 intersect select * from t1) except (select * from t2 union select * from t3); +--sorted_result +(select * from t1 intersect select * from t1) except (select * from t2 union select * from t3); + +# https://github.com/pingcap/tidb/issues/40279 +drop table if exists issue40279; +CREATE TABLE `issue40279` (`a` char(155) NOT NULL DEFAULT 'on1unvbxp5sko6mbetn3ku26tuiyju7w3wc0olzto9ew7gsrx',`b` mediumint(9) NOT NULL DEFAULT '2525518',PRIMARY KEY (`b`,`a`) /*T![clustered_index] CLUSTERED */); +insert into `issue40279` values (); +( select `issue40279`.`b` as r0 , from_base64( `issue40279`.`a` ) as r1 from `issue40279` ) except ( select `issue40279`.`a` as r0 , elt(2, `issue40279`.`a` , `issue40279`.`a` ) as r1 from `issue40279`); +drop table if exists t2; +CREATE TABLE `t2` ( `a` varchar(20) CHARACTER SET gbk COLLATE gbk_chinese_ci DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t2 values(0xCED2); +(select elt(2,t2.a,t2.a) from t2) except (select 0xCED2 from t2); + +# TestCompareIssue38361 +drop table if exists t; +create table t(a datetime, b bigint, c bigint); +insert into t values(cast('2023-08-09 00:00:00' as datetime), 20230809, 20231310); +select a > 20230809 from t; +select a = 20230809 from t; +select a < 20230810 from t; +# 20231310 can't be converted to valid datetime, thus should be compared using real date type,and datetime will be +# converted to something like 'YYYYMMDDHHMMSS', bigger than 20231310 +select a < 20231310 from t; +select 20230809 < a from t; +select 20230809 = a from t; +select 20230810 > a from t; +select 20231310 > a from t; +# constant datetime cmp numeric constant should be compared as real data type +select cast('2023-08-09 00:00:00' as datetime) > 20230809 from t; +select cast('2023-08-09 00:00:00' as datetime) = 20230809 from t; +select cast('2023-08-09 00:00:00' as datetime) < 20230810 from t; +select cast('2023-08-09 00:00:00' as datetime) < 20231310 from t; +select 20230809 < cast('2023-08-09 00:00:00' as datetime) from t; +select 20230809 = cast('2023-08-09 00:00:00' as datetime) from t; +select 20230810 > cast('2023-08-09 00:00:00' as datetime) from t; +select 20231310 > cast('2023-08-09 00:00:00' as datetime) from t; +# datetime column cmp numeric column should be compared as real data type +select a > b from t; +select a = b from t; +select a < b + 1 from t; +select a < c from t; +select b < a from t; +select b = a from t; +select b > a from t; +select c > a from t; + +# TestLoadStats +-- error 1064 +load stats; +-- error 1064 +load stats ./xxx.json; + +# TestShow +drop database if exists test_show; +create database test_show; +use test_show; +show engines; +drop table if exists t; +create table t(a int primary key); +show index in t; +show index from t; +--replace_column 2 0 +show master status; +show create database test_show; +show privileges; +--replace_column 12 0 +show table status; + +drop database test_show; +use executor__executor; + +# TestSelectBackslashN +# Issue 3685. +select \N; +select "\N"; + +drop table if exists test; +create table test (`\N` int); +insert into test values (1); +select * from test; +select \N from test; +select (\N) from test; +select `\N` from test; +select (`\N`) from test; +select '\N' from test; +select ('\N') from test; + +# TestSelectNull +# Issue #4053. +select nUll; +select (null); +select null+NULL; + +# TestSelectStringLiteral Issue #3686. +select 'abc'; +select (('abc')); +select 'abc'+'def'; +## Below checks whether leading invalid chars are trimmed. +select '\n'; +## Lowercased letter is a valid char. +select '\t col'; +## Uppercased letter is a valid char. +select '\t Col'; +## Chinese char is a valid char. +select '\n\t 中文 col'; +## Punctuation is a valid char. +select ' \r\n .col'; +## Emoji is a valid char. +select ' 😆col'; +## Below checks whether trailing invalid chars are preserved. +select 'abc '; +select ' abc 123 '; +## Issue #4239. +select 'a' ' ' 'string'; +select 'a' " " "string"; +select 'string' 'string'; +select "ss" "a"; +select "ss" "a" "b"; +select "ss" "a" ' ' "b"; +select "ss" "a" ' ' "b" ' ' "d"; + +# TestUpdateClustered +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) ); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.v = 20, b.v = 100, a.k1 = a.k1 + 1, b.k1 = b.k1 + 1, a.k2 = a.k2 + 2, b.k2 = b.k2 + 2; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) ); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b right join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, a.v = 20 where exists (select 1 from b where a.k1 = b.k1 and a.k2 = b.k2); +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) clustered); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.v = 20, b.v = 100, a.k1 = a.k1 + 1, b.k1 = b.k1 + 1, a.k2 = a.k2 + 2, b.k2 = b.k2 + 2; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) clustered); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b right join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, a.v = 20 where exists (select 1 from b where a.k1 = b.k1 and a.k2 = b.k2); +select * from b; +select * from a; +admin check table a; +admin check table b; + +# TestClusterIndexOuterJoinElimination +set @@tidb_enable_clustered_index=On; +drop table if exists t; +create table t (a int, b int, c int, primary key(a,b)); +explain format = 'brief' select t1.a from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b; +set @@tidb_enable_clustered_index=default; + +# TestExecutorBit +drop table if exists t; +create table t (c1 bit(2)); +insert into t values (0), (1), (2), (3); +-- error 1406 +insert into t values (4); +-- error 1406 +insert into t values ('a'); +select hex(c1) from t where c1 = 2; +drop table if exists t; +create table t (c1 bit(31)); +insert into t values (0x7fffffff); +-- error 1406 +insert into t values (0x80000000); +-- error 1406 +insert into t values (0xffffffff); +insert into t values ('123'); +insert into t values ('1234'); +-- error 1064 +insert into t values ('12345); +drop table if exists t; +create table t (c1 bit(62)); +insert into t values ('12345678'); +drop table if exists t; +create table t (c1 bit(61)); +-- error 1406 +insert into t values ('12345678'); +drop table if exists t; +create table t (c1 bit(32)); +insert into t values (0x7fffffff); +insert into t values (0xffffffff); +-- error 1406 +insert into t values (0x1ffffffff); +insert into t values ('1234'); +-- error 1406 +insert into t values ('12345'); +drop table if exists t; +create table t (c1 bit(64)); +insert into t values (0xffffffffffffffff); +insert into t values ('12345678'); +-- error 1366 +insert into t values ('123456789'); +drop table if exists t; +create table t (c1 bit(64)); +insert into t values (0xffffffffffffffff); +insert into t values ('12345678'); +select hex(c1) from t where c1; + +# TestTimestampTimeZone +drop table if exists t, t1; +create table t (ts timestamp); +set time_zone = '+00:00'; +insert into t values ('2017-04-27 22:40:42'); +set time_zone = '+10:00'; +select * from t; +set time_zone = '-6:00'; +select * from t; + +## For issue https://github.com/pingcap/tidb/issues/3467 +drop table if exists t1; +CREATE TABLE t1 ( + id bigint(20) NOT NULL AUTO_INCREMENT, + uid int(11) DEFAULT NULL, + datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + ip varchar(128) DEFAULT NULL, +PRIMARY KEY (id), + KEY i_datetime (datetime), + KEY i_userid (uid) +); +INSERT INTO t1 VALUES (123381351,1734,"2014-03-31 08:57:10","127.0.0.1"); +select datetime from t1; +select datetime from t1 where datetime='2014-03-31 08:57:10'; +select * from t1 where datetime='2014-03-31 08:57:10'; + +## For issue https://github.com/pingcap/tidb/issues/3485 +set time_zone = 'Asia/Shanghai'; +drop table if exists t1; +CREATE TABLE t1 ( + id bigint(20) NOT NULL AUTO_INCREMENT, + datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (id) +); +INSERT INTO t1 VALUES (123381351,"2014-03-31 08:57:10"); +select * from t1 where datetime="2014-03-31 08:57:10"; +alter table t1 add key i_datetime (datetime); +select * from t1 where datetime="2014-03-31 08:57:10"; +select * from t1; +select datetime from t1 where datetime='2014-03-31 08:57:10'; +set time_zone=default; + +# TestInsertValuesWithSubQuery +# this is from jira issue #5856 +drop table if exists t2; +create table t2(a int, b int, c int); +-- error 1054 +insert into t2 values (11, 8, (select not b)); +-- error 1064 +insert into t2 set a = 11, b = 8, c = (select b)); +insert into t2 values(1, 1, (select b from t2)); +select * from t2; +insert into t2 set a = 1, b = 1, c = (select b+1 from t2); +select * from t2; +delete from t2; +insert into t2 values(2, 4, a); +select * from t2; +insert into t2 set a = 3, b = 5, c = b; +select * from t2; + +## issue #30626 +drop table if exists t; +create table t(a int, b int); +## TODO: should insert success and get (81,1) from the table +-- error 1105 +insert into t values ( 81, ( select ( SELECT '1' AS `c0` WHERE '1' >= `subq_0`.`c0` ) as `c1` FROM ( SELECT '1' AS `c0` ) AS `subq_0` ) ); +-- error 1105 +insert into t set a = 81, b = (select ( SELECT '1' AS `c0` WHERE '1' >= `subq_0`.`c0` ) as `c1` FROM ( SELECT '1' AS `c0` ) AS `subq_0` ); +drop table if exists t2; + +# TestBitColumnIn +# fix issue https://github.com/pingcap/tidb/issues/32871 +drop table if exists t; +create table t (id bit(16), key id(id)); +insert into t values (65); +select * from t where id not in (-1,2); +-- error 1582 +select * from t where id in (-1, -2); + +# TestProjectionBitType +drop table if exists t; +drop table if exists t1; +create table t(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) clustered); +create table t1(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) nonclustered); +insert into t(k1) select 1; +insert into t1(k1) select 1; +set @@tidb_enable_vectorized_expression = 0; +(select k1, hex(v) from t where false) union(select k1, hex(v) from t for update); +(select k1, hex(v) from t1 where false) union(select k1, hex(v) from t1 for update); +set @@tidb_enable_vectorized_expression = 1; +(select k1, hex(v) from t where false) union(select k1, hex(v) from t for update); +(select k1, hex(v) from t1 where false) union(select k1, hex(v) from t1 for update); + +set @@tidb_enable_vectorized_expression = default; + +# TestIssue24933 +drop table if exists t; +drop view if exists v; +create table t(a int); +insert into t values(1), (2), (3); +create definer='root'@'localhost' view v as select count(*) as c1 from t; +select * from v; +drop view v; +create definer='root'@'localhost' view v as select * from (select count(*) from t) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select avg(a) from t group by a) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select sum(a) from t group by a) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select group_concat(a) from t group by a) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select count(0) as c1 from t) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select count(*) as c1 from t) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select group_concat(a) as `concat(a)` from t group by a) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select a from t group by a) s; +select * from v order by 1; +-- error 1054 +SELECT `s`.`count(a)` FROM (SELECT COUNT(`a`) FROM `executor__executor`.`t`) AS `s`; +drop view v; +create definer='root'@'localhost' view v as select * from (select count(a) from t) s; +select * from v; +drop table if exists t; +create table t(c1 int); +insert into t values(111), (222), (333); +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select row_number() over (order by c1) from t) s); +select * from v; +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select c1, row_number() over (order by c1) from t) s); +select * from v; +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select c1 or 0 from t) s); +select * from v; +select `c1 or 0` from v; +drop view v; + +# TestCTEWithIndexLookupJoinDeadLock +drop table if exists t, t1, t2; +create table t (a int(11) default null,b int(11) default null,key b (b),key ba (b)); +create table t1 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b)); +create table t2 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b)); + +## It's easy to reproduce this problem in 30 times execution of IndexLookUpJoin. +--disable_result_log +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +--enable_result_log + +# TestAdminChecksumOfPartitionedTable +DROP TABLE IF EXISTS admin_checksum_partition_test; +CREATE TABLE admin_checksum_partition_test (a INT) PARTITION BY HASH(a) PARTITIONS 4; +INSERT INTO admin_checksum_partition_test VALUES (1), (2); +## The result is different with TiKV and unistore +--disable_result_log +ADMIN CHECKSUM TABLE admin_checksum_partition_test; +--enable_result_log + +# TestSQLMode +drop table if exists t; +create table t (a tinyint not null); +set sql_mode = 'STRICT_TRANS_TABLES'; +-- error 1364 +insert t values (); +-- error 1264 +insert t values ('1000'); +create table if not exists tdouble (a double(3,2)); +-- error 1264 +insert tdouble values (10.23); +set sql_mode = ''; +insert t values (); +show warnings; +-- error 1048 +insert t values (null); +insert ignore t values (null); +show warnings; +insert t select null; +show warnings; +insert t values (1000); +select * from t order by a; +insert tdouble values (10.23); +select * from tdouble; +set sql_mode = 'STRICT_TRANS_TABLES'; +set @@global.sql_mode = ''; + +connect (conn1, localhost, root,, executor__executor); +drop table if exists t2; +create table t2 (a varchar(3)); +insert t2 values ('abcd'); +select * from t2; +connection default; +disconnect conn1; + +-- error 1406 +insert t2 values ('abcd'); +set sql_mode = default; +set @@global.sql_mode = default; + +# TestTableScan +use information_schema; +## There must be these tables: information_schema, mysql, performance_schema and test. +select count(*)>=4 from schemata; +create database mytest; +use information_schema; +select * from schemata where schema_name = 'mysql'; +select * from schemata where schema_name like 'my%'; +select 1 from tables limit 1; +use executor__executor; + +# TestAddDateBuiltinWithWarnings +set @@sql_mode='NO_ZERO_DATE'; +select date_add('2001-01-00', interval -2 hour); +show warnings; +set @@sql_mode=default; + +# TestStrToDateBuiltinWithWarnings +set @@sql_mode='NO_ZERO_DATE'; +drop table if exists t1; +SELECT STR_TO_DATE('0000-1-01', '%Y-%m-%d'); +show warnings; +SELECT CAST('4#,8?Q' AS DATE); +show warnings; +CREATE TABLE t1 (c1 INT, c2 TEXT); +INSERT INTO t1 VALUES (1833458842, '0.3503490908550797'); +SELECT CAST(t1.c2 AS DATE) FROM t1; +show warnings; +set @@sql_mode=default; + +# TestUnsignedDecimalOverflow +drop table if exists t; +create table t(a decimal(10,2) unsigned); +-- error 1264 +insert into t values (-1); +-- error 1264 +insert into t values ("-1.1e-1"); +-- error 1264 +insert into t values (-1.1); +insert into t values (-0); +set sql_mode=''; +delete from t; +insert into t values (-1); +select a from t limit 1; +set sql_mode=default; + +# TestDoSubquery +drop table if exists t; +create table t(a int); +do 1 in (select * from t); +insert into t values(1); +do 1 in (select * from t); + +# TestCountDistinctJSON +drop table if exists t; +create table t(j JSON); +insert into t values('2010'); +insert into t values('2011'); +insert into t values('2012'); +insert into t values('2010.000'); +insert into t values(cast(18446744073709551615 as JSON)); +insert into t values(cast(18446744073709551616.000000 as JSON)); +select count(distinct j) from t; + +# TestHashJoinJSON +drop table if exists t; +create table t(id int(11), j JSON, d DOUBLE); +insert into t values(0, '2010', 2010); +insert into t values(1, '2011', 2011); +insert into t values(2, '2012', 2012); +insert into t values(3, cast(18446744073709551615 as JSON), 18446744073709551616.000000); +select /*+inl_hash_join(t2)*/ t1.id, t2.id from t t1 join t t2 on t1.j = t2.d; + +# TestPlanReplayerDumpTPCDS +drop table if exists catalog_sales, store_sales, date_dim; +create table catalog_sales +( + cs_sold_date_sk int , + cs_sold_time_sk int , + cs_ship_date_sk int , + cs_bill_customer_sk int , + cs_bill_cdemo_sk int , + cs_bill_hdemo_sk int , + cs_bill_addr_sk int , + cs_ship_customer_sk int , + cs_ship_cdemo_sk int , + cs_ship_hdemo_sk int , + cs_ship_addr_sk int , + cs_call_center_sk int , + cs_catalog_page_sk int , + cs_ship_mode_sk int , + cs_warehouse_sk int , + cs_item_sk int not null, + cs_promo_sk int , + cs_order_number int not null, + cs_quantity int , + cs_wholesale_cost decimal(7,2) , + cs_list_price decimal(7,2) , + cs_sales_price decimal(7,2) , + cs_ext_discount_amt decimal(7,2) , + cs_ext_sales_price decimal(7,2) , + cs_ext_wholesale_cost decimal(7,2) , + cs_ext_list_price decimal(7,2) , + cs_ext_tax decimal(7,2) , + cs_coupon_amt decimal(7,2) , + cs_ext_ship_cost decimal(7,2) , + cs_net_paid decimal(7,2) , + cs_net_paid_inc_tax decimal(7,2) , + cs_net_paid_inc_ship decimal(7,2) , + cs_net_paid_inc_ship_tax decimal(7,2) , + cs_net_profit decimal(7,2) , + primary key (cs_item_sk, cs_order_number) +); +create table store_sales +( + ss_sold_date_sk int , + ss_sold_time_sk int , + ss_item_sk int not null, + ss_customer_sk int , + ss_cdemo_sk int , + ss_hdemo_sk int , + ss_addr_sk int , + ss_store_sk int , + ss_promo_sk int , + ss_ticket_number int not null, + ss_quantity int , + ss_wholesale_cost decimal(7,2) , + ss_list_price decimal(7,2) , + ss_sales_price decimal(7,2) , + ss_ext_discount_amt decimal(7,2) , + ss_ext_sales_price decimal(7,2) , + ss_ext_wholesale_cost decimal(7,2) , + ss_ext_list_price decimal(7,2) , + ss_ext_tax decimal(7,2) , + ss_coupon_amt decimal(7,2) , + ss_net_paid decimal(7,2) , + ss_net_paid_inc_tax decimal(7,2) , + ss_net_profit decimal(7,2) , + primary key (ss_item_sk, ss_ticket_number) +); +create table date_dim +( + d_date_sk int not null, + d_date_id char(16) not null, + d_date date , + d_month_seq int , + d_week_seq int , + d_quarter_seq int , + d_year int , + d_dow int , + d_moy int , + d_dom int , + d_qoy int , + d_fy_year int , + d_fy_quarter_seq int , + d_fy_week_seq int , + d_day_name char(9) , + d_quarter_name char(6) , + d_holiday char(1) , + d_weekend char(1) , + d_following_holiday char(1) , + d_first_dom int , + d_last_dom int , + d_same_day_ly int , + d_same_day_lq int , + d_current_day char(1) , + d_current_week char(1) , + d_current_month char(1) , + d_current_quarter char(1) , + d_current_year char(1) , + primary key (d_date_sk) +); +--disable_result_log +plan replayer dump explain with ssci as ( +select ss_customer_sk customer_sk + ,ss_item_sk item_sk +from store_sales,date_dim +where ss_sold_date_sk = d_date_sk + and d_month_seq between 1212 and 1212 + 11 +group by ss_customer_sk + ,ss_item_sk), +csci as( + select cs_bill_customer_sk customer_sk + ,cs_item_sk item_sk +from catalog_sales,date_dim +where cs_sold_date_sk = d_date_sk + and d_month_seq between 1212 and 1212 + 11 +group by cs_bill_customer_sk + ,cs_item_sk) + select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 else 0 end) store_only + ,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 else 0 end) catalog_only + ,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then 1 else 0 end) store_and_catalog +from ssci left join csci on (ssci.customer_sk=csci.customer_sk + and ssci.item_sk = csci.item_sk) +UNION + select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 else 0 end) store_only + ,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 else 0 end) catalog_only + ,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then 1 else 0 end) store_and_catalog +from ssci right join csci on (ssci.customer_sk=csci.customer_sk + and ssci.item_sk = csci.item_sk) +limit 100; +--enable_result_log + +# TestBDRRole +admin show bdr role; +admin set bdr role primary; +admin show bdr role; +admin set bdr role secondary; +admin show bdr role; +admin unset bdr role; +admin show bdr role; +--error 1064 +admin set bdr role test_err; +admin show bdr role; +admin unset bdr role; + +# TestCompileOutOfMemoryQuota +# Test for issue: https://github.com/pingcap/tidb/issues/38322 +set global tidb_mem_oom_action='CANCEL'; +drop table if exists t, t1; +create table t(a int, b int, index idx(a)); +create table t1(a int, c int, index idx(a)); +set tidb_mem_quota_query=10; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +select t.a, t1.a from t use index(idx), t1 use index(idx) where t.a = t1.a; +set global tidb_mem_oom_action=default; +set tidb_mem_quota_query=default; + +# TestOOMPanicAction +drop table if exists t, t1; +create table t (a int primary key, b double); +insert into t values (1,1); +SET GLOBAL tidb_mem_oom_action='CANCEL'; +set @@tidb_mem_quota_query=1; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +select sum(b) from t group by a; + +## Test insert from select oom panic. +drop table if exists t,t1; +create table t (a bigint); +create table t1 (a bigint); +set @@tidb_mem_quota_query=200; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +insert into t1 values (1),(2),(3),(4),(5); +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +replace into t1 values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=10000; +insert into t1 values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=10; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +insert into t select a from t1 order by a desc; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +replace into t select a from t1 order by a desc; +set @@tidb_mem_quota_query=10000; +insert into t values (1),(2),(3),(4),(5); +## Set the memory quota to 244 to make this SQL panic during the DeleteExec +## instead of the TableReaderExec. +set @@tidb_mem_quota_query=244; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +delete from t; +set @@tidb_mem_quota_query=10000; +delete from t1; +insert into t1 values(1); +insert into t values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=244; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +delete t, t1 from t join t1 on t.a = t1.a; +set @@tidb_mem_quota_query=100000; +truncate table t; +insert into t values(1),(2),(3); +## set the memory to quota to make the SQL panic during UpdateExec instead +## of TableReader. +set @@tidb_mem_quota_query=244; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +update t set a = 4; + +SET GLOBAL tidb_mem_oom_action = DEFAULT; +set @@tidb_mem_quota_query=DEFAULT; + +# TestTrackAggMemoryUsage +drop table if exists t; +create table t(a int); +insert into t values(1); +set tidb_track_aggregate_memory_usage = off; + +--replace_column 5 6 +--replace_regex /[0-9]+ Bytes/ Bytes/ /[.0-9]+ KB/ KB/ +explain analyze select /*+ HASH_AGG() */ sum(a) from t; + +--replace_column 5 6 +--replace_regex /[0-9]+ Bytes/ Bytes/ /[.0-9]+ KB/ KB/ +explain analyze select /*+ STREAM_AGG() */ sum(a) from t; + +set tidb_track_aggregate_memory_usage = on; + +--replace_column 5 6 +--replace_regex /[0-9]+ Bytes/ Bytes/ /[.0-9]+ KB/ KB/ +explain analyze select /*+ HASH_AGG() */ sum(a) from t; + +--replace_column 5 6 +--replace_regex /[0-9]+ Bytes/ Bytes/ /[.0-9]+ KB/ KB/ +explain analyze select /*+ STREAM_AGG() */ sum(a) from t; + +set tidb_track_aggregate_memory_usage = default; + +# TestBind +drop table if exists testbind; +create table testbind(i int, s varchar(20)); +create index index_t on testbind(i,s); +create global binding for select * from testbind using select * from testbind use index for join(index_t); +--replace_column 5 6 +show global bindings where default_db='executor__executor'; +create session binding for select * from testbind using select * from testbind use index for join(index_t); +--replace_column 5 6 +show session bindings where default_db='executor__executor'; + +drop session binding for select * from testbind using select * from testbind use index for join(index_t); +drop global binding for select * from testbind using select * from testbind use index for join(index_t); + +# TestIndexMergeRuntimeStats +drop table if EXISTS t1; +create table t1(id int primary key, a int, b int, c int, d int, index t1a(a), index t1b(b)); +insert into t1 values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5); +--replace_regex /.*time:.*loops:.*cop_task:.*/.*time:.*loops:.*cop_task:.*/ /.*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.*/.*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.*/ /[0-9]+ Bytes/ Bytes/ /[.0-9]+ KB/ KB/ +explain analyze select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4; +set @@tidb_enable_collect_execution_info=0; +select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4 order by a; +set @@tidb_enable_collect_execution_info=default; + +# TestIndexLookupRuntimeStats +drop table if exists t1; +create table t1 (a int, b int, index(a)); +insert into t1 values (1,2),(2,3),(3,4); +--replace_regex /.*time:.*loops:.*index_task:.*table_task: {total_time.*num.*concurrency.*}.*/.*time:.*loops:.*index_task:.*table_task: {total_time.*num.*concurrency.*}.*/ /.*time:.*loops:.*cop_task:.*/.*time:.*loops:.*cop_task:.*/ /[.0-9]+ KB/ KB/ /[0-9]+ Bytes/ Bytes/ +explain analyze select * from t1 use index(a) where a > 1; + +# TestHashAggRuntimeStats +drop table if exists t1; +create table t1 (a int, b int); +insert into t1 values (1,2),(2,3),(3,4); +--replace_regex /.*time:.*loops:.*partial_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*final_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*/.*time:.*loops:.*partial_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*final_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*/ /time:.*loops:.*cop_task.*/time.*loops.*cop_task.*/ /tikv_task:.*/tikv_task:.*/ /[.0-9]+ KB/ KB/ /[.0-9]+ Bytes/ Bytes/ +explain analyze SELECT /*+ HASH_AGG() */ count(*) FROM t1 WHERE a < 10; + +# TestSelectForUpdate +set global tidb_txn_mode=''; +drop table if exists t, t1; +create table t (c1 int, c2 int, c3 int); +insert t values (11, 2, 3); +insert t values (12, 2, 3); +insert t values (13, 2, 3); +create table t1 (c1 int); +insert t1 values (11); + +connect (conn1, localhost, root,, executor__executor); +begin; +select * from t where c1=11 for update; + +connect (conn2, localhost, root,, executor__executor); +begin; +update t set c2=211 where c1=11; +commit; + +connection conn1; +--replace_regex /txnStartTS.*reason/ reason/ +--error 9007 +commit; + +begin; +select * from t where exists(select null from t1 where t1.c1=t.c1) for update; + +connection conn2; +begin; +update t set c2=211 where c1=12; +commit; + +connection conn1; +commit; + +begin; +select * from t where c1=11 for update; + +connection conn2; +begin; +update t set c2=22 where c1=12; +commit; + +connection conn1; +commit; + +set @@autocommit=1; +select * from t where c1=11 for update; + +connection conn2; +begin; +update t set c2=211 where c1=11; +commit; + +connection conn1; +commit; + +begin; +--sorted_result +select * from (select * from t for update) t join t1 for update; + +connection conn2; +begin; +update t1 set c1 = 13; +commit; + +connection conn1; +--replace_regex /txnStartTS.*reason/ reason/ +--error 9007 +commit; + +disconnect conn1; +disconnect conn2; +set global tidb_txn_mode=pessimistic; + +# TestSelectForUpdateOf +drop table if exists t, t1; +create table t (i int); +create table t1 (i int); +insert t values (1); +insert t1 values (1); +begin pessimistic; +select * from t, t1 where t.i = t1.i for update of t; + +connect (conn1, localhost, root,, executor__executor); +begin pessimistic; +select * from t1 for update; +--error 3572 +select * from t for update nowait; + +connection default; +rollback; + +connection conn1; +select * from t for update nowait; +rollback; +disconnect conn1; + +# TestForSelectScopeInUnion +set session tidb_txn_mode=''; +# A union B for update, the "for update" option belongs to union statement, so +# it should works on both A and B. +drop table if exists t; +create table t(a int); +insert into t values (1); +begin; +select 1 as a union select a from t for update; + +connect (conn1, localhost, root,, executor__executor); +set session tidb_txn_mode=''; +update t set a = a + 1; + +connection default; +## As tk1 use select 'for update', it should detect conflict and fail. +--replace_regex /txnStartTS.*reason/ reason/ +--error 9007 +commit; + +begin; +--sorted_result +select 1 as a union select a from t limit 5 for update; +select 1 as a union select a from t order by a for update; + +connection conn1; +update t set a = a + 1; + +connection default; +--replace_regex /txnStartTS.*reason/ reason/ +--error 9007 +commit; + +disconnect conn1; +set session tidb_txn_mode=pessimistic; + +# TestAdminShowDDLJobsRowCount +# https://github.com/pingcap/tidb/issues/25968 +drop table if exists t; +create table t (id bigint key,b int); +split table t by (10),(20),(30); +insert into t values (0,0),(10,10),(20,20),(30,30); +alter table t add index idx1(b); +--replace_column 1 4 6 7 9 10 11 +admin show ddl jobs 1; + +insert into t values (1,0),(2,10),(3,20),(4,30); +alter table t add index idx2(b); +--replace_column 1 4 6 7 9 10 11 +admin show ddl jobs 1; + +# TestSummaryFailedUpdate +drop table if exists t; +create table t(a int, b int as(-a)); +insert into t(a) values(1), (3), (7); +SET GLOBAL tidb_mem_oom_action='CANCEL'; +set @@tidb_mem_quota_query=1; +--replace_regex /conn=[-0-9]+/conn=/ +--error 8175 +update t set t.a = t.a - 1 where t.a in (select a from t where a < 4); +set @@tidb_mem_quota_query=1000000000; +select stmt_type from information_schema.statements_summary where digest_text = 'update `t` set `t` . `a` = `t` . `a` - ? where `t` . `a` in ( select `a` from `t` where `a` < ? )'; + +set @@tidb_mem_quota_query=default; +set global tidb_mem_oom_action=default; + + +# TestTableLockPrivilege +drop table if exists t; +drop user if exists 'testuser'@'localhost'; +create table t(a int); +create user 'testuser'@'localhost'; + +connect (conn1, localhost, testuser,,); +--error 1044 +LOCK TABLE executor__executor.t WRITE; + +connection default; +GRANT LOCK TABLES ON executor__executor.* to 'testuser'@'localhost'; + +connection conn1; +--error 1142 +LOCK TABLE executor__executor.t WRITE; + +connection default; +REVOKE ALL ON executor__executor.* FROM 'testuser'@'localhost'; +GRANT SELECT ON executor__executor.* to 'testuser'@'localhost'; + +connection conn1; +--error 1044 +LOCK TABLE executor__executor.t WRITE; + +connection default; +GRANT LOCK TABLES ON executor__executor.* to 'testuser'@'localhost'; + +connection conn1; +LOCK TABLE executor__executor.t WRITE; + +connection default; +drop database if exists test2; +create database test2; +create table test2.t2(a int); + +connection conn1; +--error 1044 +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; + +connection default; +GRANT LOCK TABLES ON test2.* to 'testuser'@'localhost'; + +connection conn1; +--error 1142 +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; + +connection default; +GRANT SELECT ON test2.* to 'testuser'@'localhost'; + +connection conn1; +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; + +connection default; +--replace_regex /server: .*session: .*/server: session: / +--error 8020 +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; + +connection conn1; +unlock tables; + +disconnect conn1; +unlock tables; +drop user 'testuser'@'localhost'; +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)) diff --git a/tests/integrationtest/t/executor/insert.test b/tests/integrationtest/t/executor/insert.test new file mode 100644 index 0000000000000..07cf8bcd82c8a --- /dev/null +++ b/tests/integrationtest/t/executor/insert.test @@ -0,0 +1,1646 @@ +# TestClusterIndexInsertOnDuplicateKey +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(a char(20), b int, primary key(a)); +insert into t values('aa', 1), ('bb', 1); +-- error 1062 +insert into t values('aa', 2); +drop table t; +create table t(a char(20), b varchar(30), c varchar(10), primary key(a, b, c)); +insert into t values ('a', 'b', 'c'), ('b', 'a', 'c'); +-- error 1062 +insert into t values ('a', 'b', 'c'); +set tidb_enable_clustered_index = default; + +# TestPaddingCommonHandle +set tidb_enable_clustered_index = on; +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1; +set tidb_enable_clustered_index = default; + +# TestInsertReorgDelete +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2004'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = 2004; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 bit); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal(6,4)); +insert into t1 set c1 = '1.1'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal); +insert into t1 set c1 = 1.1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 numeric); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 float); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.3; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 real); +insert into t1 set c1 = 1.4; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 date); +insert into t1 set c1 = '2020-01-01'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 time); +insert into t1 set c1 = '20:00:00'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 datetime); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 timestamp); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2020'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 char(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varchar(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 binary(3)); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varbinary(3)); +insert into t1 set c1 = 'b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 blob); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 text); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 enum('a', 'b')); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 set('a', 'b')); +insert into t1 set c1 = 'a,b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +# TestUpdateDuplicateKey +drop table if exists c; +create table c(i int,j int,k int,primary key(i,j,k)); +insert into c values(1,2,3); +insert into c values(1,2,4); +-- error 1062 +update c set i=1,j=2,k=4 where i=1 and j=2 and k=3; + +# TestIssue37187 +drop table if exists t1, t2; +create table t1 (a int(11) ,b varchar(100) ,primary key (a)); +create table t2 (c int(11) ,d varchar(100) ,primary key (c)); +prepare in1 from 'insert into t1 (a,b) select c,null from t2 t on duplicate key update b=t.d'; +execute in1; + +# TestInsertWrongValueForField +drop table if exists t1; +create table t1(a bigint); +-- error 1366 +insert into t1 values("asfasdfsajhlkhlksdaf"); +drop table if exists t1; +create table t1(a varchar(10)) charset ascii; +-- error 1366 +insert into t1 values('我'); +drop table if exists t1; +create table t1(a char(10) charset utf8); +insert into t1 values('我'); +alter table t1 add column b char(10) charset ascii as ((a)); +select * from t1; +drop table if exists t; +create table t (a year); +-- error 1264 +insert into t values(2156); +DROP TABLE IF EXISTS ts; +CREATE TABLE ts (id int DEFAULT NULL, time1 TIMESTAMP NULL DEFAULT NULL); +SET @@sql_mode=''; +INSERT INTO ts (id, time1) VALUES (1, TIMESTAMP '1018-12-23 00:00:00'); +SHOW WARNINGS; +SELECT * FROM ts ORDER BY id; +SET @@sql_mode='STRICT_TRANS_TABLES'; +-- error 1292 +INSERT INTO ts (id, time1) VALUES (2, TIMESTAMP '1018-12-24 00:00:00'); +DROP TABLE ts; +CREATE TABLE t0(c0 SMALLINT AUTO_INCREMENT PRIMARY KEY); +INSERT IGNORE INTO t0(c0) VALUES (194626268); +INSERT IGNORE INTO t0(c0) VALUES ('*'); +SHOW WARNINGS; +SET @@sql_mode=default; + +# TestInsertValueForCastDecimalField +drop table if exists t1; +create table t1(a decimal(15,2)); +insert into t1 values (1111111111111.01); +select * from t1; +select cast(a as decimal) from t1; + +# TestInsertForMultiValuedIndex +drop table if exists t1; +create table t1(a json, b int, unique index idx((cast(a as signed array)))); +insert into t1 values ('[1,11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +-- error 1062 +insert into t1 values ('[2, 222]', 2); +replace into t1 values ('[1, 10]', 10); +select * from t1; +replace into t1 values ('[1, 2]', 1); +select * from t1; +replace into t1 values ('[1, 11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +insert ignore into t1 values ('[1]', 2); +select * from t1; +insert ignore into t1 values ('[1, 2]', 2); +select * from t1; +insert into t1 values ('[2]', 2) on duplicate key update b = 10; +select * from t1; +-- error 1062 +insert into t1 values ('[2, 1]', 2) on duplicate key update a = '[1,2]'; +-- error 1062 +insert into t1 values ('[1,2]', 2) on duplicate key update a = '[1,2]'; +-- error 1062 +insert into t1 values ('[11, 22]', 2) on duplicate key update a = '[1,2]'; + +# TestInsertDateTimeWithTimeZone +set time_zone="+09:00"; +drop table if exists t; +create table t (id int, c1 datetime not null default CURRENT_TIMESTAMP); +set TIMESTAMP = 1234; +insert t (id) values (1); +select * from t; +drop table if exists t; +create table t (dt datetime); +set @@time_zone='+08:00'; +delete from t; +insert into t values ('2020-10-22'); +select * from t; +delete from t; +insert into t values ('2020-10-22-16'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16-31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16:31-15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16:31:15-10'); +select * from t; +delete from t; +insert into t values ('2020.10-22'); +select * from t; +delete from t; +insert into t values ('2020-10.22-16'); +select * from t; +delete from t; +insert into t values ('2020-10-22.16-31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16.31-15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16.31.15+14'); +select * from t; +delete from t; +insert into t values ('2020-10:22'); +select * from t; +delete from t; +insert into t values ('2020-10-22:16'); +select * from t; +delete from t; +insert into t values ('2020-10-22-16:31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16-31:15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16.31.15+09:30'); +select * from t; +delete from t; +insert into t values ('2020.10-22:16'); +select * from t; +delete from t; +insert into t values ('2020-10.22-16:31'); +select * from t; +delete from t; +insert into t values ('2020-10-22.16-31:15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16:31.15+09:30'); +select * from t; +drop table if exists t; +create table t (dt datetime, ts timestamp); +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+00:00'; +select * from t; +delete from t; +set @@time_zone='-08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+08:00'; +select * from t; +delete from t; +set @@time_zone='-03:00'; +insert into t values ('2020-10-22T16:53:40+03:00', '2020-10-22T16:53:40+03:00'); +set @@time_zone='+08:00'; +select * from t; +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40+08:00', '2020-10-22T16:53:40+08:00'); +set @@time_zone='+08:00'; +select * from t; +drop table if exists t; +create table t (ts timestamp); +insert into t values ('2020-10-22T12:00:00Z'), ('2020-10-22T13:00:00Z'), ('2020-10-22T14:00:00Z'); +select count(*) from t where ts > '2020-10-22T12:00:00Z'; +set @@time_zone='+08:00'; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10+00:00', '2020-10-27T14:39:10.10+00:00'); +select * from t; +drop table if exists t; +create table t (dt datetime(1), ts timestamp(1)); +insert into t values ('2020-10-27T14:39:10.3+0200', '2020-10-27T14:39:10.3+0200'); +select * from t; +drop table if exists t; +create table t (dt datetime(6), ts timestamp(6)); +insert into t values ('2020-10-27T14:39:10.3-02', '2020-10-27T14:39:10.3-02'); +select * from t; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10Z', '2020-10-27T14:39:10.10Z'); +select * from t; +set time_zone=default; +set timestamp=default; + +# TestInsertZeroYear +drop table if exists t1; +create table t1(a year(4)); +insert into t1 values(0000),(00),("0000"),("000"), ("00"), ("0"), (79), ("79"); +select * from t1; +drop table if exists t; +create table t(f_year year NOT NULL DEFAULT '0000')ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(); +select * from t; +insert into t values('0000'); +select * from t; + +# TestAllowInvalidDates +drop table if exists t1, t2, t3, t4; +create table t1(d date); +create table t2(d datetime); +create table t3(d date); +create table t4(d datetime); +set sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +select year(d), month(d), day(d) from t2; +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; + +truncate t1;truncate t2;truncate t3;truncate t4; +set sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +select year(d), month(d), day(d) from t2; +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +set sql_mode=default; + +# TestPartitionInsertOnDuplicate +drop table if exists t1, t2, t3; +create table t1 (a int,b int,primary key(a,b)) partition by range(a) (partition p0 values less than (100),partition p1 values less than (1000)); +insert into t1 set a=1, b=1; +insert into t1 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t1; +create table t2 (a int,b int,primary key(a,b)) partition by hash(a) partitions 4; +insert into t2 set a=1,b=1; +insert into t2 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t2; +CREATE TABLE t3 (a int, b int, c int, d int, e int, + PRIMARY KEY (a,b), + UNIQUE KEY (b,c,d) +) PARTITION BY RANGE ( b ) ( + PARTITION p0 VALUES LESS THAN (4), + PARTITION p1 VALUES LESS THAN (7), + PARTITION p2 VALUES LESS THAN (11) +); +insert into t3 values (1,2,3,4,5); +insert into t3 values (1,2,3,4,5),(6,2,3,4,6) on duplicate key update e = e + values(e); +select * from t3; + +# TestBit +drop table if exists t1; +create table t1 (a bit(3)); +-- error 1406 +insert into t1 values(-1); +-- error 1406 +insert into t1 values(9); +create table t64 (a bit(64)); +insert into t64 values(-1); +insert into t64 values(18446744073709551615); +-- error 1264 +insert into t64 values(18446744073709551616); + +# TestJiraIssue5366 +drop table if exists bug; +create table bug (a varchar(100)); +insert into bug select ifnull(JSON_UNQUOTE(JSON_EXTRACT('[{"amount":2000,"feeAmount":0,"merchantNo":"20190430140319679394","shareBizCode":"20160311162_SECOND"}]', '$[0].merchantNo')),'') merchant_no union SELECT '20180531557' merchant_no; +--sorted_result +select * from bug; + +# TestDMLCast +drop table if exists t; +create table t (a int, b double); +insert into t values (ifnull('',0)+0, 0); +insert into t values (0, ifnull('',0)+0); +select * from t; +-- error 1366 +insert into t values ('', 0); +-- error 1366 +insert into t values (0, ''); +-- error 1292 +update t set a = ''; +-- error 1292 +update t set b = ''; +update t set a = ifnull('',0)+0; +update t set b = ifnull('',0)+0; +delete from t where a = ''; +select * from t; + +# TestInsertFloatOverflow +drop table if exists t,t1; +create table t(col1 FLOAT, col2 FLOAT(10,2), col3 DOUBLE, col4 DOUBLE(10,2), col5 DECIMAL, col6 DECIMAL(10,2)); +-- error 1264 +insert into t values (-3.402823466E+68, -34028234.6611, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +-- error 1264 +insert into t values (-34028234.6611, -3.402823466E+68, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +create table t1(id1 float,id2 float); +insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999); +select @@warning_count; +select convert(id1,decimal(65)),convert(id2,decimal(65)) from t1; + +# TestTextTooLongError +# Fix https://github.com/pingcap/tidb/issues/32601 +set sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_ALL_TABLES,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +# For max_allowed_packet default value is big enough to ensure tinytext, text can test correctly +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +drop table if exists t1; +CREATE TABLE t1(c1 mediumtext); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 8777215)); +# For long text, max_allowed_packet default value can not allow 4GB package, skip the test case. +# Set non strict sql_mode, we are not supposed to raise an error but to truncate the value. +set sql_mode = 'ONLY_FULL_GROUP_BY,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +select length(c1) from t1; +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +select length(c1) from t1; +# For mediumtext or bigger size, for tikv limit, we will get:ERROR 8025 (HY000): entry too large, the max entry size is 6291456, the size of data is 16777247, no need to test. +set sql_mode = default; + +# TestAutoRandomIDExplicit +set @@allow_auto_random_explicit_insert = true; +drop table if exists ar; +create table ar (id bigint key clustered auto_random, name char(10)); +insert into ar(id) values (1); +select id from ar; +select last_insert_id(); +delete from ar; +insert into ar(id) values (1), (2); +select id from ar; +select last_insert_id(); +delete from ar; +drop table ar; +set @@allow_auto_random_explicit_insert = default; + +# TestInsertErrorMsg +drop table if exists t, t1; +create table t (a int primary key, b datetime, d date); +-- error 1292 +insert into t values (1, '2019-02-11 30:00:00', '2019-01-31'); +CREATE TABLE t1 (a BINARY(16) PRIMARY KEY); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +-- error 1062 +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +-- error 1062 +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +drop table if exists t1; +create table t1 (a bit primary key) engine=innodb; +insert into t1 values (b'0'); +-- error 1062 +insert into t1 values (b'0'); + +# TestIssue16366 +drop table if exists t; +create table t(c numeric primary key); +insert ignore into t values(null); +-- error 1062 +insert into t values(0); + +# TestClusterPrimaryTablePlainInsert +set tidb_enable_clustered_index = on; +drop table if exists t1pk; +create table t1pk(id varchar(200) primary key, v int); +insert into t1pk(id, v) values('abc', 1); +select * from t1pk; +set @@tidb_constraint_check_in_place=true; +-- error 1062 +insert into t1pk(id, v) values('abc', 2); +set @@tidb_constraint_check_in_place=false; +-- error 1062 +insert into t1pk(id, v) values('abc', 3); +select v, id from t1pk; +select id from t1pk where id = 'abc'; +select v, id from t1pk where id = 'abc'; +drop table if exists t3pk; +create table t3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 1); +select * from t3pk; +set @@tidb_constraint_check_in_place=true; +-- error 1062 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 2); +set @@tidb_constraint_check_in_place=false; +-- error 1062 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 3); +select v, id3, id2, id1 from t3pk; +select id3, id2, id1 from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +select id3, id2, id1, v from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 101, 1); +insert into t3pk(id1, id2, id3, v) values('abc', 'zzz', 101, 1); +drop table if exists t1pku; +create table t1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into t1pku(id, uk, v) values('abc', 1, 2); +select * from t1pku where id = 'abc'; +-- error 1062 +insert into t1pku(id, uk, v) values('aaa', 1, 3); +select * from t1pku; +select * from t3pk where (id1, id2, id3) in (('abc', 'xyz', 100), ('abc', 'xyz', 101), ('abc', 'zzz', 101)); +set @@tidb_constraint_check_in_place=default; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryTableInsertIgnore +set tidb_enable_clustered_index = on; +drop table if exists it1pk; +create table it1pk(id varchar(200) primary key, v int); +insert into it1pk(id, v) values('abc', 1); +insert ignore into it1pk(id, v) values('abc', 2); +select * from it1pk where id = 'abc'; +drop table if exists it2pk; +create table it2pk(id1 varchar(200), id2 varchar(200), v int, primary key(id1, id2)); +insert into it2pk(id1, id2, v) values('abc', 'cba', 1); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +insert ignore into it2pk(id1, id2, v) values('abc', 'cba', 2); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +drop table if exists it1pku; +create table it1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into it1pku(id, uk, v) values('abc', 1, 2); +select * from it1pku where id = 'abc'; +insert ignore into it1pku(id, uk, v) values('aaa', 1, 3), ('bbb', 2, 1); +select * from it1pku; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryTableInsertDuplicate +set tidb_enable_clustered_index = on; +drop table if exists dt1pi; +create table dt1pi(id varchar(200) primary key, v int); +insert into dt1pi(id, v) values('abb', 1),('acc', 2); +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1; +select * from dt1pi; +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1pi; +drop table if exists dt1piu; +create table dt1piu(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +insert into dt1piu(id, uk, v) values('abb', 1, 10),('acc', 2, 20); +insert into dt1piu(id, uk, v) values('xyz', 1, 100) on duplicate key update v = v + 1; +select * from dt1piu; +insert into dt1piu(id, uk, v) values('abb', 1, 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1piu; +drop table if exists ts1pk; +create table ts1pk(id1 timestamp, id2 timestamp, v int, primary key(id1, id2)); +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 1); +select id1, id2, v from ts1pk; +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v); +select id1, id2, v from ts1pk; +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v), id1 = '2018-01-01 11:11:12'; +select id1, id2, v from ts1pk; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryKeyForIndexScan +set tidb_enable_clustered_index = on; +drop table if exists pkt1; +CREATE TABLE pkt1 (a varchar(255), b int, index idx(b), primary key(a,b)); +insert into pkt1 values ('aaa',1); +select b from pkt1 where b = 1; +drop table if exists pkt2; +CREATE TABLE pkt2 (a varchar(255), b int, unique index idx(b), primary key(a,b)); +insert into pkt2 values ('aaa',1); +select b from pkt2 where b = 1; +drop table if exists issue_18232; +create table issue_18232 (a int, b int, c int, d int, primary key (a, b), index idx(c)); +select a from issue_18232 use index (idx); +select b from issue_18232 use index (idx); +select a,b from issue_18232 use index (idx); +select c from issue_18232 use index (idx); +select a,c from issue_18232 use index (idx); +select b,c from issue_18232 use index (idx); +select a,b,c from issue_18232 use index (idx); +select d from issue_18232 use index (idx); +select a,d from issue_18232 use index (idx); +select b,d from issue_18232 use index (idx); +select a,b,d from issue_18232 use index (idx); +select c,d from issue_18232 use index (idx); +select a,c,d from issue_18232 use index (idx); +select b,c,d from issue_18232 use index (idx); +select a,b,c,d from issue_18232 use index (idx); +set tidb_enable_clustered_index = default; + +# TestIssue20768 +drop table if exists t1, t2; +create table t1(a year, primary key(a)); +insert ignore into t1 values(null); +create table t2(a int, key(a)); +insert into t2 values(0); +select /*+ hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_join(t2) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; + +# TestIssue10402 +drop table if exists vctt; +create table vctt (v varchar(4), c char(4)); +insert into vctt values ('ab ', 'ab '); +select * from vctt; +delete from vctt; +insert into vctt values ('ab\n\n\n', 'ab\n\n\n'), ('ab\t\t\t', 'ab\t\t\t'), ('ab ', 'ab '), ('ab\r\r\r', 'ab\r\r\r'); +show warnings; +select * from vctt; +select length(v), length(c) from vctt; + +# TestDuplicatedEntryErr +# See https://github.com/pingcap/tidb/issues/24582 +drop table if exists t1; +create table t1(a int, b varchar(20), primary key(a,b(3)) clustered); +insert into t1 values(1,'aaaaa'); +-- error 1062 +insert into t1 values(1,'aaaaa'); +-- error 1062 +insert into t1 select 1, 'aaa'; +insert into t1 select 1, 'bb'; +-- error 1062 +insert into t1 select 1, 'bb'; + +# TestBinaryLiteralInsertToEnum +drop table if exists bintest; +create table bintest (h enum(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; + +# TestBinaryLiteralInsertToSet +drop table if exists bintest; +create table bintest (h set(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; + +# TestGlobalTempTableAutoInc +drop table if exists temp_test; +create global temporary table temp_test(id int primary key auto_increment) on commit delete rows; + +## Data is cleared after transaction auto commits. +insert into temp_test(id) values(0); +select * from temp_test; + +## Data is not cleared inside a transaction. +begin; +insert into temp_test(id) values(0); +select * from temp_test; +commit; + +## AutoID allocator is cleared. +begin; +insert into temp_test(id) values(0); +select * from temp_test; +## Test whether auto-inc is incremental +insert into temp_test(id) values(0); +select id from temp_test order by id; +commit; + +## multi-value insert +begin; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +commit; + +## rebase +begin; +insert into temp_test(id) values(10); +insert into temp_test(id) values(0); +select id from temp_test order by id; +insert into temp_test(id) values(20), (30); +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +commit; +drop table if exists temp_test; + +# TestGlobalTempTableRowID +drop table if exists temp_test; +create global temporary table temp_test(id int) on commit delete rows; + +## Data is cleared after transaction auto commits. +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; + +## Data is not cleared inside a transaction. +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +commit; + +## AutoID allocator is cleared. +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +## Test whether row id is incremental +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test order by _tidb_rowid; +commit; + +## multi-value insert +begin; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +commit; +drop table if exists temp_test; + +# TestIssue26762 +drop table if exists t1; +create table t1(c1 date); +-- error 1292 +insert into t1 values('2020-02-31'); +set @@sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values('2020-02-31'); +select * from t1; +set @@sql_mode='STRICT_TRANS_TABLES'; +-- error 1292 +insert into t1 values('2020-02-31'); +set sql_mode=default; + +# TestStringtoDecimal +drop table if exists t; +create table t (id decimal(10)); +-- error 1366 +insert into t values('1sdf'); +-- error 1366 +insert into t values('1edf'); +-- error 1366 +insert into t values('12Ea'); +-- error 1366 +insert into t values('1E'); +-- error 1366 +insert into t values('1e'); +-- error 1366 +insert into t values('1.2A'); +-- error 1366 +insert into t values('1.2.3.4.5'); +-- error 1366 +insert into t values('1.2.'); +-- error 1366 +insert into t values('1,999.00'); +## TODO: MySQL8.0 reports Note 1265 Data truncated for column 'id' at row 1 +insert into t values('12e-3'); +show warnings; +select id from t; +drop table if exists t; + +# TestReplaceAllocatingAutoID +# https://github.com/pingcap/tidb/issues/29483 +SET sql_mode='NO_ENGINE_SUBSTITUTION'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a tinyint not null auto_increment primary key, b char(20)); +INSERT INTO t1 VALUES (127,'maxvalue'); +## Note that this error is different from MySQL's duplicated primary key error +-- error 1467 +REPLACE INTO t1 VALUES (0,'newmaxvalue'); +set sql_mode=default; + +# TestInsertIntoSelectError +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a INT) ENGINE = InnoDB; +INSERT IGNORE into t1(SELECT SLEEP(NULL)); +SHOW WARNINGS; +INSERT IGNORE into t1(SELECT SLEEP(-1)); +SHOW WARNINGS; +INSERT IGNORE into t1(SELECT SLEEP(1)); +SELECT * FROM t1; +DROP TABLE t1; + +# TestIssue32213 +drop table if exists t1; +create table t1(c1 float); +insert into t1 values(999.99); +select cast(t1.c1 as decimal(4, 1)) from t1; +select cast(t1.c1 as decimal(5, 1)) from t1; +drop table if exists t1; +create table t1(c1 decimal(6, 4)); +insert into t1 values(99.9999); +select cast(t1.c1 as decimal(5, 3)) from t1; +select cast(t1.c1 as decimal(6, 3)) from t1; + +# TestInsertBigScientificNotation +# https://github.com/pingcap/tidb/issues/47787 +drop table if exists t1; +create table t1(id int, a int); +set @@SQL_MODE='STRICT_TRANS_TABLES'; +-- error 1264 +insert into t1 values(1, '1e100'); +-- error 1264 +insert into t1 values(2, '-1e100'); +select id, a from t1; +set @@SQL_MODE=''; +insert into t1 values(1, '1e100'); +show warnings; +insert into t1 values(2, '-1e100'); +show warnings; +select id, a from t1 order by id asc; +set sql_mode=default; + +# TestUnsignedDecimalFloatInsertNegative +# https://github.com/pingcap/tidb/issues/47945 +drop table if exists tf; +create table tf(a float(1, 0) unsigned); +-- error 1264 +insert into tf values('-100'); +set @@sql_mode=''; +insert into tf values('-100'); +select * from tf; +set @@sql_mode=default; + +# TestIssue17745 +drop table if exists tt1; +create table tt1 (c1 decimal(64)); +-- error 1264 +insert into tt1 values(89000000000000000000000000000000000000000000000000000000000000000000000000000000000000000); +-- error 1264 +insert into tt1 values(89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000); +insert ignore into tt1 values(89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000); +show warnings; +select c1 from tt1; +-- error 1264 +update tt1 set c1 = 89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000; +drop table if exists tt1; +-- error 1367 +insert into tt1 values(4556414e723532); +select 888888888888888888888888888888888888888888888888888888888888888888888888888888888888; +show warnings; + +# TestIssue38950 +drop table if exists t; +create table t (id smallint auto_increment primary key); +alter table t add column c1 int default 1; +--enable_info +insert ignore into t(id) values (194626268); +--disable_info +select * from t; +--enable_info +insert ignore into t(id) values ('*') on duplicate key update c1 = 2; +--disable_info +select * from t; + +# TestInsertIgnoreOnDup +drop table if exists t; +create table t (i int not null primary key, j int unique key); +--enable_info +insert into t values (1, 1), (2, 2); +insert ignore into t values(1, 1) on duplicate key update i = 2; +--disable_info +select * from t; +--enable_info +insert ignore into t values(1, 1) on duplicate key update j = 2; +--disable_info +select * from t; + +drop table if exists t2; +create table t2(`col_25` set('Alice','Bob','Charlie','David') NOT NULL,`col_26` date NOT NULL DEFAULT '2016-04-15', PRIMARY KEY (`col_26`) clustered, UNIQUE KEY `idx_9` (`col_25`,`col_26`),UNIQUE KEY `idx_10` (`col_25`)); +insert into t2(col_25, col_26) values('Bob', '1989-03-23'),('Alice', '2023-11-24'), ('Charlie', '2023-12-05'); +insert ignore into t2 (col_25,col_26) values ( 'Bob','1977-11-23' ) on duplicate key update col_25 = 'Alice', col_26 = '2036-12-13'; +show warnings; +select * from t2; + +drop table if exists t4; +create table t4(id int primary key clustered, k int, v int, unique key uk1(k)); +insert into t4 values (1, 10, 100), (3, 30, 300); +insert ignore into t4 (id, k, v) values(1, 0, 0) on duplicate key update id = 2, k = 30; +show warnings; +select * from t4; + +drop table if exists t5; +create table t5(k1 varchar(100), k2 varchar(100), uk1 int, v int, primary key(k1, k2) clustered, unique key ukk1(uk1), unique key ukk2(v)); +insert into t5(k1, k2, uk1, v) values('1', '1', 1, '100'), ('1', '3', 2, '200'); +update ignore t5 set k2 = '2', uk1 = 2 where k1 = '1' and k2 = '1'; +show warnings; +select * from t5; + +drop table if exists t6; +create table t6 (a int, b int, c int, primary key(a, b) clustered, unique key idx_14(b), unique key idx_15(b), unique key idx_16(a, b)); +insert into t6 select 10, 10, 20; +insert ignore into t6 set a = 20, b = 10 on duplicate key update a = 100; +select * from t6; +insert ignore into t6 set a = 200, b= 10 on duplicate key update c = 1000; +select * from t6; + +# TestInsertAutoInc +drop table if exists insert_autoinc_test; +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(c1) values (1), (2); +begin; +select * from insert_autoinc_test; +commit; +begin; +insert into insert_autoinc_test(id, c1) values (5,5); +insert into insert_autoinc_test(c1) values (6); +commit; +begin; +select * from insert_autoinc_test; +commit; +begin; +insert into insert_autoinc_test(id, c1) values (3,3); +commit; +begin; +select * from insert_autoinc_test; +commit; +begin; +insert into insert_autoinc_test(c1) values (7); +commit; +begin; +select * from insert_autoinc_test; +commit; +drop table if exists insert_autoinc_test; + +## issue-962 +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(id, c1) values (0.3, 1); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (-0.3, 2); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (-3.3, 3); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (4.3, 4); +select * from insert_autoinc_test; +insert into insert_autoinc_test(c1) values (5); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (null, 6); +select * from insert_autoinc_test; +drop table if exists insert_autoinc_test; + +## SQL_MODE=NO_AUTO_VALUE_ON_ZERO +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(id, c1) values (5, 1); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (0, 2); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (0, 3); +select * from insert_autoinc_test; +set SQL_MODE=NO_AUTO_VALUE_ON_ZERO; +insert into insert_autoinc_test(id, c1) values (0, 4); +select * from insert_autoinc_test; +-- error 1062 +insert into insert_autoinc_test(id, c1) values (0, 5); +insert into insert_autoinc_test(c1) values (6); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (null, 7); +select * from insert_autoinc_test; +set SQL_MODE=''; +insert into insert_autoinc_test(id, c1) values (0, 8); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (null, 9); +select * from insert_autoinc_test; +set sql_mode = default; + +# TestInsert +drop table if exists insert_test; +create table insert_test (id int PRIMARY KEY AUTO_INCREMENT, c1 int, c2 int, c3 int default 1); +--enable_info +insert insert_test (c1) values (1),(2),(NULL); +--disable_info +begin; +-- error 1136 +insert insert_test (c1) values (); +rollback; +begin; +-- error 1136 +insert insert_test (c1, c2) values (1,2),(1); +rollback; +begin; +-- error 1054 +insert insert_test (xxx) values (3); +rollback; +begin; +-- error 1146 +insert insert_test_xxx (c1) values (); +rollback; +--enable_info +insert insert_test set c1 = 3; +--disable_info +begin; +-- error 1110 +insert insert_test set c1 = 4, c1 = 5; +rollback; +begin; +-- error 1054 +insert insert_test set xxx = 6; +rollback; + +drop table if exists insert_test_1, insert_test_2; +create table insert_test_1 (id int, c1 int); +--enable_info +insert insert_test_1 select id, c1 from insert_test; +--disable_info +create table insert_test_2 (id int, c1 int); +--enable_info +insert insert_test_1 select id, c1 from insert_test union select id * 10, c1 * 10 from insert_test; +--disable_info +begin; +-- error 1136 +insert insert_test_1 select c1 from insert_test; +rollback; +begin; +-- error 1136 +insert insert_test_1 values(default, default, default, default, default); +rollback; +select * from insert_test where id = 1; +--enable_info +insert into insert_test (id, c3) values (1, 2) on duplicate key update id=values(id), c2=10; +--disable_info +select * from insert_test where id = 1; +--enable_info +insert into insert_test (id, c2) values (1, 1) on duplicate key update insert_test.c2=10; +--disable_info +-- error 1054 +insert into insert_test (id, c2) values(1, 1) on duplicate key update t.c2 = 10; +--enable_info +INSERT INTO insert_test (id, c3) VALUES (1, 2) ON DUPLICATE KEY UPDATE c3=values(c3)+c3+3; +--disable_info +select * from insert_test where id = 1; +--enable_info +INSERT IGNORE INTO insert_test (id, c3) VALUES (1, 2) ON DUPLICATE KEY UPDATE c3=values(c3)+c3+3; +--disable_info +select * from insert_test where id = 1; + +drop table if exists insert_err; +create table insert_err (id int, c1 varchar(8)); +-- error 1406 +insert insert_err values (1, 'abcdabcdabcd'); +insert insert_err values (1, '你好,世界'); +create table TEST1 (ID INT NOT NULL, VALUE INT DEFAULT NULL, PRIMARY KEY (ID)); +--enable_info +INSERT INTO TEST1(id,value) VALUE(3,3) on DUPLICATE KEY UPDATE VALUE=4; +--disable_info + +drop table if exists t; +create table t (id int); +insert into t values(1); +update t t1 set id = (select count(*) + 1 from t t2 where t1.id = t2.id); +select * from t; + +## issue 3235 +drop table if exists t; +create table t(c decimal(5, 5)); +insert into t value(0); +-- error 1264 +insert into t value(1); + +drop table if exists t; +create table t(c binary(255)); +insert into t value(1); +select length(c) from t; + +drop table if exists t; +create table t(c varbinary(255)); +insert into t value(1); +select length(c) from t; + +## issue 3509 +drop table if exists t; +create table t(c int); +set @@time_zone = '+08:00'; +insert into t value(Unix_timestamp('2002-10-27 01:00')); +select * from t; +set @@time_zone = default; + +## issue 3832 +drop table if exists t1; +create table t1 (b char(0)); +insert into t1 values (""); + +## issue 3895 +DROP TABLE IF EXISTS t; +CREATE TABLE t(a DECIMAL(4,2)); +INSERT INTO t VALUES (1.000001); +SHOW WARNINGS; +INSERT INTO t VALUES (1.000000); +SHOW WARNINGS; + +## issue 4653 +DROP TABLE IF EXISTS t; +CREATE TABLE t(a datetime); +-- error 1292 +INSERT INTO t VALUES('2017-00-00'); +set sql_mode = ''; +INSERT INTO t VALUES('2017-00-00'); +SELECT * FROM t; +set sql_mode = 'strict_all_tables'; +SELECT * FROM t; +set sql_mode = default; + +drop table if exists test; +CREATE TABLE test(id int(10) UNSIGNED NOT NULL AUTO_INCREMENT, p int(10) UNSIGNED NOT NULL, PRIMARY KEY(p), KEY(id)); +insert into test(p) value(1); +select * from test; +select * from test use index (id) where id = 1; +insert into test values(NULL, 2); +select * from test use index (id) where id = 2; +insert into test values(2, 3); +select * from test use index (id) where id = 2; + +## issue 6360 +drop table if exists t; +create table t(a bigint unsigned); +set @@sql_mode = 'strict_all_tables'; +-- error 1264 +insert into t value (-1); +set @@sql_mode = ''; +insert into t value (-1); +show warnings; +insert into t select -1; +show warnings; +insert into t select cast(-1 as unsigned); +insert into t value (-1.111); +show warnings; +insert into t value ('-1.111'); +show warnings; +update t set a = -1 limit 1; +show warnings; +select * from t; +set @@sql_mode = default; + +# issue 6424 & issue 20207 +drop table if exists t; +create table t(a time(6)); +insert into t value('20070219173709.055870'), ('20070219173709.055'), ('20070219173709.055870123'); +select * from t; +truncate table t; +insert into t value(20070219173709.055870), (20070219173709.055), (20070219173709.055870123); +select * from t; +-- error 1292 +insert into t value(-20070219173709.055870); + +drop table if exists t; +set @@sql_mode=''; +create table t(a float unsigned, b double unsigned); +insert into t value(-1.1, -1.1), (-2.1, -2.1), (0, 0), (1.1, 1.1); +show warnings; +select * from t; +set @@sql_mode=default; + +## issue 7061 +drop table if exists t; +create table t(a int default 1, b int default 2); +insert into t values(default, default); +select * from t; +truncate table t; +insert into t values(default(b), default(a)); +select * from t; +truncate table t; +insert into t (b) values(default); +select * from t; +truncate table t; +insert into t (b) values(default(a)); +select * from t; + +drop view if exists v; +create view v as select * from t; +-- error 1105 +insert into v values(1,2); +-- error 1105 +replace into v values(1,2); +drop view v; + +drop sequence if exists seq; +create sequence seq; +-- error 1105 +insert into seq values(); +-- error 1105 +replace into seq values(); +drop sequence seq; + +## issue 22851 +drop table if exists t; +create table t(name varchar(255), b int, c int, primary key(name(2))); +insert into t(name, b) values("cha", 3); +-- error 1062 +insert into t(name, b) values("chb", 3); +insert into t(name, b) values("测试", 3); +-- error 1062 +insert into t(name, b) values("测试", 3); + +# TestInsertOnDup +drop table if exists t; +create table t (i int unique key); +--enable_info +insert into t values (1),(2); +--disable_info +select * from t; +--enable_info +insert into t values (1), (2) on duplicate key update i = values(i); +--disable_info +select * from t; +--enable_info +insert into t values (2), (3) on duplicate key update i = 3; +--disable_info +select * from t; + +drop table if exists t; +create table t (i int primary key, j int unique key); +--enable_info +insert into t values (-1, 1); +--disable_info +select * from t; +--enable_info +insert into t values (1, 1) on duplicate key update j = values(j); +--disable_info +select * from t; + +drop table if exists test; +create table test (i int primary key, j int unique); +begin; +insert into test values (1,1); +insert into test values (2,1) on duplicate key update i = -i, j = -j; +commit; +select * from test; +delete from test; +insert into test values (1, 1); +begin; +delete from test where i = 1; +insert into test values (2, 1) on duplicate key update i = -i, j = -j; +commit; +select * from test; +delete from test; +insert into test values (1, 1); +begin; +update test set i = 2, j = 2 where i = 1; +insert into test values (1, 3) on duplicate key update i = -i, j = -j; +insert into test values (2, 4) on duplicate key update i = -i, j = -j; +commit; +select * from test order by i; +delete from test; +begin; +insert into test values (1, 3), (1, 3) on duplicate key update i = values(i), j = values(j); +commit; +select * from test order by i; +create table tmp (id int auto_increment, code int, primary key(id, code)); +create table m (id int primary key auto_increment, code int unique); +insert tmp (code) values (1); +insert tmp (code) values (1); +set tidb_init_chunk_size=1; +insert m (code) select code from tmp on duplicate key update code = values(code); +select * from m; + +## The following two cases are used for guaranteeing the last_insert_id +## to be set as the value of on-duplicate-update assigned. +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT AUTO_INCREMENT PRIMARY KEY, +f2 VARCHAR(5) NOT NULL UNIQUE); +--enable_info +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +--disable_info +SELECT LAST_INSERT_ID(); +--enable_info +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +--disable_info +SELECT LAST_INSERT_ID(); + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT AUTO_INCREMENT UNIQUE, +f2 VARCHAR(5) NOT NULL UNIQUE); +--enable_info +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +--disable_info +SELECT LAST_INSERT_ID(); +--enable_info +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +--disable_info +SELECT LAST_INSERT_ID(); +--enable_info +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = 2; +--disable_info +SELECT LAST_INSERT_ID(); + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT); +--enable_info +INSERT t1 VALUES (1) ON DUPLICATE KEY UPDATE f1 = 1; +--disable_info +SELECT * FROM t1; + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT PRIMARY KEY, f2 INT NOT NULL UNIQUE); +--enable_info +INSERT t1 VALUES (1, 1); +INSERT t1 VALUES (1, 1), (1, 1) ON DUPLICATE KEY UPDATE f1 = 2, f2 = 2; +--disable_info +SELECT * FROM t1 order by f1; +-- error 1048 +INSERT t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +--enable_info +INSERT IGNORE t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +--disable_info +show warnings; +SELECT * FROM t1 order by f1; + +SET sql_mode=''; +-- error 1048 +INSERT t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +SELECT * FROM t1 order by f1; +set sql_mode=default; + +set tidb_init_chunk_size=default; + + +# TestInsertOnDuplicateKey +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +--enable_info +insert into t1 values(1, 100); +insert into t2 values(1, 200); +insert into t1 select a2, b2 from t2 on duplicate key update b1 = a2; +--disable_info +select * from t1; +--enable_info +insert into t1 select a2, b2 from t2 on duplicate key update b1 = b2; +--disable_info +select * from t1; +--enable_info +insert into t1 select a2, b2 from t2 on duplicate key update a1 = a2; +--disable_info +select * from t1; +--enable_info +insert into t1 select a2, b2 from t2 on duplicate key update b1 = 300; +--disable_info +select * from t1; +--enable_info +insert into t1 values(1, 1) on duplicate key update b1 = 400; +--disable_info +select * from t1; +--enable_info +insert into t1 select 1, 500 from t2 on duplicate key update b1 = 400; +--disable_info +select * from t1; + +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +-- error 1054 +insert into t1 select * from t2 on duplicate key update c = t2.b; + +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +-- error 1052 +insert into t1 select * from t2 on duplicate key update a = b; + +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +-- error 1054 +insert into t1 select * from t2 on duplicate key update c = b; + +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +-- error 1054 +insert into t1 select * from t2 on duplicate key update a1 = values(b2); + +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +--enable_info +insert into t1 values(1, 100); +insert into t2 values(1, 200); +insert into t1 select * from t2 on duplicate key update b1 = values(b1) + b2; +--disable_info +select * from t1; +--enable_info +insert into t1 select * from t2 on duplicate key update b1 = values(b1) + b2; +--disable_info +select * from t1; + +drop table if exists t; +create table t(k1 bigint, k2 bigint, val bigint, primary key(k1, k2)); +--enable_info +insert into t (val, k1, k2) values (3, 1, 2); +--disable_info +select * from t; +--enable_info +insert into t (val, k1, k2) select c, a, b from (select 1 as a, 2 as b, 4 as c) tmp on duplicate key update val = tmp.c; +--disable_info +select * from t; + +drop table if exists t; +create table t(k1 double, k2 double, v double, primary key(k1, k2)); +--enable_info +insert into t (v, k1, k2) select c, a, b from (select "3" c, "1" a, "2" b) tmp on duplicate key update v=c; +--disable_info +select * from t; +--enable_info +insert into t (v, k1, k2) select c, a, b from (select "3" c, "1" a, "2" b) tmp on duplicate key update v=c; +--disable_info +select * from t; + +drop table if exists t1, t2; +create table t1(id int, a int, b int); +--enable_info +insert into t1 values (1, 1, 1); +insert into t1 values (2, 2, 1); +insert into t1 values (3, 3, 1); +--disable_info +create table t2(a int primary key, b int, unique(b)); +--enable_info +insert into t2 select a, b from t1 order by id on duplicate key update a=t1.a, b=t1.b; +--disable_info +select * from t2 order by a; + +drop table if exists t1, t2; +create table t1(id int, a int, b int); +--enable_info +insert into t1 values (1, 1, 1); +insert into t1 values (2, 1, 2); +insert into t1 values (3, 3, 1); +--disable_info +create table t2(a int primary key, b int, unique(b)); +--enable_info +insert into t2 select a, b from t1 order by id on duplicate key update a=t1.a, b=t1.b; +--disable_info +select * from t2 order by a; + +drop table if exists t1, t2; +create table t1(id int, a int, b int, c int); +--enable_info +insert into t1 values (1, 1, 1, 1); +insert into t1 values (2, 2, 1, 2); +insert into t1 values (3, 3, 2, 2); +insert into t1 values (4, 4, 2, 2); +--disable_info +create table t2(a int primary key, b int, c int, unique(b), unique(c)); +--enable_info +insert into t2 select a, b, c from t1 order by id on duplicate key update b=t2.b, c=t2.c; +--disable_info +select * from t2 order by a; + +drop table if exists t1; +create table t1(a int primary key, b int); +--enable_info +insert into t1 values(1,1),(2,2),(3,3),(4,4),(5,5); +insert into t1 values(4,14),(5,15),(6,16),(7,17),(8,18) on duplicate key update b=b+10; +--disable_info + +drop table if exists a, b; +create table a(x int primary key); +create table b(x int, y int); +--enable_info +insert into a values(1); +insert into b values(1, 2); +insert into a select x from b ON DUPLICATE KEY UPDATE a.x=b.y; +--disable_info +select * from a; + +--echo ## Test issue 28078. +--echo ## Use different types of columns so that there's likely to be error if the types mismatches. +drop table if exists a, b; +create table a(id int, a1 timestamp, a2 varchar(10), a3 float, unique(id)); +create table b(id int, b1 time, b2 varchar(10), b3 int); +--enable_info +insert into a values (1, '2022-01-04 07:02:04', 'a', 1.1), (2, '2022-01-04 07:02:05', 'b', 2.2); +insert into b values (2, '12:34:56', 'c', 10), (3, '01:23:45', 'd', 20); +insert into a (id) select id from b on duplicate key update a.a2 = b.b2, a.a3 = 3.3; +--disable_info +select * from a; +--enable_info +insert into a (id) select 4 from b where b3 = 20 on duplicate key update a.a3 = b.b3; +--disable_info +select * from a; +--enable_info +insert into a (a2, a3) select 'x', 1.2 from b on duplicate key update a.a2 = b.b3; +--disable_info +select * from a; + +--echo ## reproduce insert on duplicate key update bug under new row format. +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1 use index(primary); + +# TestNonStrictInsertOverflowValue +drop table if exists t; +create table t (d int); +-- error 1690 +insert into t values (cast('18446744073709551616' as unsigned)); +set sql_mode=''; +--enable_warnings +insert into t values (cast('18446744073709551616' as unsigned)); +--disable_warnings +set sql_mode=DEFAULT; + +# TestInsertIgnoreOnDupWithFK +drop table if exists parent, child; +create table parent (id int primary key, ref int, key(ref)); +create table child (id int primary key, ref int, foreign key (ref) references parent(ref)); +insert into parent values (1, 1), (2, 2); +insert into child values (1, 1); + +insert into child values (1, 2) on duplicate key update ref = 2; +-- error 1452 +insert into child values (1, 3) on duplicate key update ref = 3; +--enable_warnings +insert ignore into child values (1, 3) on duplicate key update ref = 3; +--disable_warnings + +-- error 1451 +insert into parent values (2, 3) on duplicate key update ref = 3; +--enable_warnings +insert ignore into parent values (2, 3) on duplicate key update ref = 3; +--disable_warnings + +# TestIssue55457 +drop table if exists t1, t2; +create table t1 (id int primary key, col1 varchar(10) not null default ''); +create table t2 (id int primary key, col1 varchar(10)); +insert into t2 values (1, null); +insert ignore into t1 values(5, null); +set session sql_mode = ''; +-- error 1048 +insert into t1 values(1, null); +-- error 1048 +insert into t1 set id = 1, col1 = null; +-- error 1048 +insert t1 VALUES (5, 5) ON DUPLICATE KEY UPDATE col1 = null; +insert t1 VALUES (5, 5), (6, null) ON DUPLICATE KEY UPDATE col1 = null; +select * from t1; +insert into t1 select * from t2; +show warnings; +insert into t1 values(2, null), (3, 3), (4, 4); +show warnings; +update t1 set col1 = null where id = 3; +show warnings; +insert ignore t1 VALUES (4, 4) ON DUPLICATE KEY UPDATE col1 = null; +select * from t1;