diff --git a/ddl/reorg.go b/ddl/reorg.go index d42025913f3ae..757a19e6bea31 100644 --- a/ddl/reorg.go +++ b/ddl/reorg.go @@ -99,8 +99,169 @@ func newContext(store kv.Storage) sessionctx.Context { const defaultWaitReorgTimeout = 10 * time.Second +<<<<<<< HEAD:ddl/reorg.go // ReorgWaitTimeout is the timeout that wait ddl in write reorganization stage. var ReorgWaitTimeout = 5 * time.Second +======= + ctx := newReorgExprCtx() + evalCtx := ctx.GetStaticEvalCtx().Apply( + exprstatic.WithSQLMode(reorgMeta.SQLMode), + exprstatic.WithLocation(loc), + exprstatic.WithTypeFlags(reorgTypeFlagsWithSQLMode(reorgMeta.SQLMode)), + exprstatic.WithErrLevelMap(reorgErrLevelsWithSQLMode(reorgMeta.SQLMode)), + exprstatic.WithWarnHandler(warnHandler), + ) + return ctx.Apply(exprstatic.WithEvalCtx(evalCtx)), nil +} + +// reorgTableMutateContext implements table.MutateContext for reorganization. +type reorgTableMutateContext struct { + exprCtx exprctx.ExprContext + encodingConfig tblctx.RowEncodingConfig + mutateBuffers *tblctx.MutateBuffers + shardID *variable.RowIDShardGenerator + reservedRowIDAlloc stmtctx.ReservedRowIDAlloc +} + +// AlternativeAllocators implements table.MutateContext.AlternativeAllocators. +func (*reorgTableMutateContext) AlternativeAllocators(*model.TableInfo) (autoid.Allocators, bool) { + // No alternative allocators for all tables because temporary tables + // are not supported (temporary tables do not have any data in TiKV) in reorganization. + return autoid.Allocators{}, false +} + +// GetExprCtx implements table.MutateContext.GetExprCtx. +func (ctx *reorgTableMutateContext) GetExprCtx() exprctx.ExprContext { + return ctx.exprCtx +} + +// ConnectionID implements table.MutateContext.ConnectionID. +func (*reorgTableMutateContext) ConnectionID() uint64 { + return 0 +} + +// InRestrictedSQL implements table.MutateContext.InRestrictedSQL. +func (*reorgTableMutateContext) InRestrictedSQL() bool { + return false +} + +// TxnAssertionLevel implements table.MutateContext.TxnAssertionLevel. +func (*reorgTableMutateContext) TxnAssertionLevel() variable.AssertionLevel { + // Because only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method, + // we can just return `AssertionLevelOff`. + return variable.AssertionLevelOff +} + +// EnableMutationChecker implements table.MutateContext.EnableMutationChecker. +func (*reorgTableMutateContext) EnableMutationChecker() bool { + // Because only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method, + // we can just return false. + return false +} + +// GetRowEncodingConfig implements table.MutateContext.GetRowEncodingConfig. +func (ctx *reorgTableMutateContext) GetRowEncodingConfig() tblctx.RowEncodingConfig { + return ctx.encodingConfig +} + +// GetMutateBuffers implements table.MutateContext.GetMutateBuffers. +func (ctx *reorgTableMutateContext) GetMutateBuffers() *tblctx.MutateBuffers { + return ctx.mutateBuffers +} + +// GetRowIDShardGenerator implements table.MutateContext.GetRowIDShardGenerator. +func (ctx *reorgTableMutateContext) GetRowIDShardGenerator() *variable.RowIDShardGenerator { + return ctx.shardID +} + +// GetReservedRowIDAlloc implements table.MutateContext.GetReservedRowIDAlloc. +func (ctx *reorgTableMutateContext) GetReservedRowIDAlloc() (*stmtctx.ReservedRowIDAlloc, bool) { + return &ctx.reservedRowIDAlloc, true +} + +// GetStatisticsSupport implements table.MutateContext.GetStatisticsSupport. +func (*reorgTableMutateContext) GetStatisticsSupport() (tblctx.StatisticsSupport, bool) { + // We can just return `(nil, false)` because: + // - Only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method. + // - DDL reorg do need to collect statistics in this way. + return nil, false +} + +// GetCachedTableSupport implements table.MutateContext.GetCachedTableSupport. +func (*reorgTableMutateContext) GetCachedTableSupport() (tblctx.CachedTableSupport, bool) { + // We can just return `(nil, false)` because: + // - Only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method. + // - It is not allowed to execute DDL on a cached table. + return nil, false +} + +// GetTemporaryTableSupport implements table.MutateContext.GetTemporaryTableSupport. +func (*reorgTableMutateContext) GetTemporaryTableSupport() (tblctx.TemporaryTableSupport, bool) { + // We can just return `(nil, false)` because: + // - Only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method. + // - Temporary tables do not have any data in TiKV. + return nil, false +} + +// GetExchangePartitionDMLSupport implements table.MutateContext.GetExchangePartitionDMLSupport. +func (*reorgTableMutateContext) GetExchangePartitionDMLSupport() (tblctx.ExchangePartitionDMLSupport, bool) { + // We can just return `(nil, false)` because: + // - Only `index.Create` and `index.Delete` are invoked in reorganization which does not use this method. + return nil, false +} + +// newReorgTableMutateContext creates a new table.MutateContext for reorganization. +func newReorgTableMutateContext(exprCtx exprctx.ExprContext) table.MutateContext { + rowEncoder := &rowcodec.Encoder{ + Enable: variable.GetDDLReorgRowFormat() != variable.DefTiDBRowFormatV1, + } + + encodingConfig := tblctx.RowEncodingConfig{ + IsRowLevelChecksumEnabled: rowEncoder.Enable, + RowEncoder: rowEncoder, + } + + return &reorgTableMutateContext{ + exprCtx: exprCtx, + encodingConfig: encodingConfig, + mutateBuffers: tblctx.NewMutateBuffers(&variable.WriteStmtBufs{}), + // Though currently, `RowIDShardGenerator` is not required in DDL reorg, + // we still provide a valid one to keep the context complete and to avoid panic if it is used in the future. + shardID: variable.NewRowIDShardGenerator( + rand.New(rand.NewSource(time.Now().UnixNano())), // #nosec G404 + variable.DefTiDBShardAllocateStep, + ), + } +} + +func reorgTypeFlagsWithSQLMode(mode mysql.SQLMode) types.Flags { + return types.StrictFlags. + WithTruncateAsWarning(!mode.HasStrictMode()). + WithIgnoreInvalidDateErr(mode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!mode.HasStrictMode() || mode.HasAllowInvalidDatesMode()). + WithCastTimeToYearThroughConcat(true) +} + +func reorgErrLevelsWithSQLMode(mode mysql.SQLMode) errctx.LevelMap { + return errctx.LevelMap{ + errctx.ErrGroupTruncate: errctx.ResolveErrLevel(false, !mode.HasStrictMode()), + errctx.ErrGroupBadNull: errctx.ResolveErrLevel(false, !mode.HasStrictMode()), + errctx.ErrGroupNoDefault: errctx.ResolveErrLevel(false, !mode.HasStrictMode()), + errctx.ErrGroupDividedByZero: errctx.ResolveErrLevel( + !mode.HasErrorForDivisionByZeroMode(), + !mode.HasStrictMode(), + ), + } +} + +func reorgTimeZoneWithTzLoc(tzLoc *model.TimeZoneLocation) (*time.Location, error) { + if tzLoc == nil { + // It is set to SystemLocation to be compatible with nil LocationInfo. + return timeutil.SystemLocation(), nil + } + return tzLoc.GetLocation() +} +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)):pkg/ddl/reorg.go func (rc *reorgCtx) notifyJobState(state model.JobState) { atomic.StoreInt32((*int32)(&rc.jobState), int32(state)) diff --git a/expression/builtin_miscellaneous_vec_test.go b/expression/builtin_miscellaneous_vec_test.go index 6802fe41c83c1..c7c78ae6deee0 100644 --- a/expression/builtin_miscellaneous_vec_test.go +++ b/expression/builtin_miscellaneous_vec_test.go @@ -151,7 +151,14 @@ func TestSleepVectorized(t *testing.T) { warnCnt := counter{} // non-strict model +<<<<<<< HEAD:expression/builtin_miscellaneous_vec_test.go sessVars.StmtCtx.BadNullAsWarning = true +======= + var levels errctx.LevelMap + levels[errctx.ErrGroupBadNull] = errctx.LevelWarn + levels[errctx.ErrGroupNoDefault] = errctx.LevelWarn + sessVars.StmtCtx.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)):pkg/expression/builtin_miscellaneous_vec_test.go input.AppendFloat64(0, 1) err = f.vecEvalInt(input, result) require.NoError(t, err) @@ -184,7 +191,13 @@ func TestSleepVectorized(t *testing.T) { require.Equal(t, uint16(warnCnt.add(2)), sessVars.StmtCtx.WarningCount()) // for error case under the strict model +<<<<<<< HEAD:expression/builtin_miscellaneous_vec_test.go sessVars.StmtCtx.BadNullAsWarning = false +======= + levels[errctx.ErrGroupBadNull] = errctx.LevelError + levels[errctx.ErrGroupNoDefault] = errctx.LevelError + sessVars.StmtCtx.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)):pkg/expression/builtin_miscellaneous_vec_test.go input.Reset() input.AppendNull(0) err = f.vecEvalInt(input, result) diff --git a/expression/evaluator_test.go b/expression/evaluator_test.go index b0d1ded2d70b6..1871bede84507 100644 --- a/expression/evaluator_test.go +++ b/expression/evaluator_test.go @@ -104,7 +104,14 @@ func TestSleep(t *testing.T) { fc := funcs[ast.Sleep] // non-strict model +<<<<<<< HEAD:expression/evaluator_test.go sessVars.StmtCtx.BadNullAsWarning = true +======= + var levels errctx.LevelMap + levels[errctx.ErrGroupBadNull] = errctx.LevelWarn + levels[errctx.ErrGroupNoDefault] = errctx.LevelWarn + sessVars.StmtCtx.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)):pkg/expression/evaluator_test.go d := make([]types.Datum, 1) f, err := fc.getFunction(ctx, datumsToConstants(d)) require.NoError(t, err) @@ -121,7 +128,13 @@ func TestSleep(t *testing.T) { require.Equal(t, int64(0), ret) // for error case under the strict model +<<<<<<< HEAD:expression/evaluator_test.go sessVars.StmtCtx.BadNullAsWarning = false +======= + levels[errctx.ErrGroupBadNull] = errctx.LevelError + levels[errctx.ErrGroupNoDefault] = errctx.LevelError + sessVars.StmtCtx.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)):pkg/expression/evaluator_test.go d[0].SetNull() _, err = fc.getFunction(ctx, datumsToConstants(d)) require.NoError(t, err) diff --git a/pkg/errctx/context.go b/pkg/errctx/context.go new file mode 100644 index 0000000000000..34fae1304463c --- /dev/null +++ b/pkg/errctx/context.go @@ -0,0 +1,266 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errctx + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/errno" + contextutil "github.com/pingcap/tidb/pkg/util/context" + "github.com/pingcap/tidb/pkg/util/intest" +) + +// Level defines the behavior for each error +type Level uint8 + +const ( + // LevelError means the error will be returned + LevelError Level = iota + // LevelWarn means it will be regarded as a warning + LevelWarn + // LevelIgnore means the error will be ignored + LevelIgnore +) + +// LevelMap indicates the map from `ErrGroup` to `Level` +type LevelMap [errGroupCount]Level + +// Context defines how to handle an error +type Context struct { + levelMap LevelMap + warnHandler contextutil.WarnAppender +} + +// LevelMap returns the `levelMap` of the context. +func (ctx *Context) LevelMap() LevelMap { + return ctx.levelMap +} + +// LevelForGroup returns the level for a specified group. +func (ctx *Context) LevelForGroup(errGroup ErrGroup) Level { + return ctx.levelMap[errGroup] +} + +// WithStrictErrGroupLevel makes the context to return the error directly for any kinds of errors. +func (ctx *Context) WithStrictErrGroupLevel() Context { + newCtx := Context{ + warnHandler: ctx.warnHandler, + } + + return newCtx +} + +// WithErrGroupLevel sets a `Level` for an `ErrGroup` +func (ctx *Context) WithErrGroupLevel(eg ErrGroup, l Level) Context { + newCtx := Context{ + levelMap: ctx.levelMap, + warnHandler: ctx.warnHandler, + } + newCtx.levelMap[eg] = l + + return newCtx +} + +// WithErrGroupLevels sets `levelMap` for an `ErrGroup` +func (ctx *Context) WithErrGroupLevels(levels LevelMap) Context { + return Context{ + levelMap: levels, + warnHandler: ctx.warnHandler, + } +} + +// AppendWarning appends the error to warning. If the inner `warnHandler` is nil, do nothing. +func (ctx *Context) AppendWarning(err error) { + intest.Assert(ctx.warnHandler != nil) + if w := ctx.warnHandler; w != nil { + // warnHandler should always not be nil, check fn != nil here to just make code safe. + w.AppendWarning(err) + } +} + +// AppendNote appends the error to warning with level 'Note'. If the inner `warnHandler` is nil, do nothing. +func (ctx *Context) AppendNote(err error) { + intest.Assert(ctx.warnHandler != nil) + if w := ctx.warnHandler; w != nil { + // warnHandler should always not be nil, check fn != nil here to just make code safe. + w.AppendNote(err) + } +} + +// HandleError handles the error according to the contextutil. See the comment of `HandleErrorWithAlias` for detailed logic. +// +// It also allows using `errors.ErrorGroup`, in this case, it'll handle each error in order, and return the first error +// it founds. +func (ctx *Context) HandleError(err error) error { + if err == nil { + return nil + } + // The function of handling `errors.ErrorGroup` is placed in `HandleError` but not in `HandleErrorWithAlias`, because + // it's hard to give a proper error and warn alias for an error group. + if errs, ok := err.(errors.ErrorGroup); ok { + for _, singleErr := range errs.Errors() { + singleErr = ctx.HandleError(singleErr) + // If the one error is found, just return it. + // TODO: consider whether it's more appropriate to continue to handle other errors. For example, other errors + // may need to append warnings. The current behavior is same with TiDB original behavior before using + // `errctx` to handle multiple errors. + if singleErr != nil { + return singleErr + } + } + + return nil + } + + return ctx.HandleErrorWithAlias(err, err, err) +} + +// HandleErrorWithAlias handles the error according to the contextutil. +// 1. If the `internalErr` is not `"pingcap/errors".Error`, or the error code is not defined in the `errGroupMap`, or the error +// level is set to `LevelError`(0), the `err` will be returned directly. +// 2. If the error level is set to `LevelWarn`, the `warnErr` will be appended as a warning. +// 3. If the error level is set to `LevelIgnore`, this function will return a `nil`. +// +// In most cases, these three should be the same. If there are many different kinds of error internally, but they are expected +// to give the same error to users, the `err` can be different form `internalErr`. Also, if the warning is expected to be +// different from the initial error, you can also use the `warnErr` argument. +// +// TODO: is it good to give an error code for internal only errors? Or should we use another way to distinguish different +// group of errors? +// TODO: both `types.Context` and `errctx.Context` can handle truncate error now. Refractor them. +func (ctx *Context) HandleErrorWithAlias(internalErr error, err error, warnErr error) error { + if internalErr == nil { + return nil + } + + internalErr = errors.Cause(internalErr) + + e, ok := internalErr.(*errors.Error) + if !ok { + return err + } + + eg, ok := errGroupMap[e.Code()] + if !ok { + return err + } + + switch ctx.levelMap[eg] { + case LevelError: + return err + case LevelWarn: + ctx.AppendWarning(warnErr) + case LevelIgnore: + } + + return nil +} + +// NewContext creates an error context to handle the errors and warnings +func NewContext(handler contextutil.WarnAppender) Context { + return NewContextWithLevels(LevelMap{}, handler) +} + +// NewContextWithLevels creates an error context to handle the errors and warnings +func NewContextWithLevels(levels LevelMap, handler contextutil.WarnAppender) Context { + intest.Assert(handler != nil) + return Context{ + warnHandler: handler, + levelMap: levels, + } +} + +// StrictNoWarningContext returns all errors directly, and ignore all errors +var StrictNoWarningContext = NewContext(contextutil.IgnoreWarn) + +var errGroupMap = make(map[errors.ErrCode]ErrGroup) + +// ErrGroup groups the error according to the behavior of handling errors +type ErrGroup int + +const ( + // ErrGroupTruncate is the group of truncated errors + ErrGroupTruncate ErrGroup = iota + // ErrGroupDupKey is the group of duplicate key errors + ErrGroupDupKey + // ErrGroupBadNull is the group of bad null errors + ErrGroupBadNull + // ErrGroupNoDefault is the group of no default value errors + ErrGroupNoDefault + // ErrGroupDividedByZero is the group of divided by zero errors + ErrGroupDividedByZero + // ErrGroupAutoIncReadFailed is the group of auto increment read failed errors + ErrGroupAutoIncReadFailed + // ErrGroupNoMatchedPartition is the group of no partition is matched errors. + ErrGroupNoMatchedPartition + // errGroupCount is the count of all `ErrGroup`. Please leave it at the end of the list. + errGroupCount +) + +func init() { + group2Errors := map[ErrGroup][]errors.ErrCode{ + ErrGroupTruncate: { + errno.ErrTruncatedWrongValue, + errno.ErrDataTooLong, + errno.ErrTruncatedWrongValueForField, + errno.ErrWarnDataOutOfRange, + errno.ErrDataOutOfRange, + errno.ErrBadNumber, + errno.ErrWrongValueForType, + errno.ErrDatetimeFunctionOverflow, + errno.WarnDataTruncated, + errno.ErrIncorrectDatetimeValue, + }, + ErrGroupBadNull: { + errno.ErrBadNull, + errno.ErrWarnNullToNotnull, + }, + ErrGroupNoDefault: { + errno.ErrNoDefaultForField, + }, + ErrGroupDividedByZero: { + errno.ErrDivisionByZero, + }, + ErrGroupAutoIncReadFailed: { + errno.ErrAutoincReadFailed, + }, + ErrGroupNoMatchedPartition: { + errno.ErrNoPartitionForGivenValue, + errno.ErrRowDoesNotMatchGivenPartitionSet, + }, + ErrGroupDupKey: { + errno.ErrDupEntry, + }, + } + + for group, codes := range group2Errors { + for _, errCode := range codes { + errGroupMap[errCode] = group + } + } +} + +// ResolveErrLevel resolves the error level according to the `ignore` and `warn` flags +// if ignore is true, it will return `LevelIgnore` to ignore the error, +// otherwise, it will return `LevelWarn` or `LevelError` according to the `warn` flag +// Only one of `ignore` and `warn` can be true. +func ResolveErrLevel(ignore bool, warn bool) Level { + if ignore { + return LevelIgnore + } + if warn { + return LevelWarn + } + return LevelError +} diff --git a/pkg/executor/BUILD.bazel b/pkg/executor/BUILD.bazel new file mode 100644 index 0000000000000..72d59a63e7604 --- /dev/null +++ b/pkg/executor/BUILD.bazel @@ -0,0 +1,512 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "executor", + srcs = [ + "adapter.go", + "admin.go", + "admin_plugins.go", + "analyze.go", + "analyze_col.go", + "analyze_col_v2.go", + "analyze_global_stats.go", + "analyze_idx.go", + "analyze_utils.go", + "analyze_worker.go", + "batch_checker.go", + "batch_point_get.go", + "bind.go", + "brie.go", + "brie_utils.go", + "builder.go", + "check_table_index.go", + "checksum.go", + "compact_table.go", + "compiler.go", + "coprocessor.go", + "cte.go", + "cte_table_reader.go", + "ddl.go", + "delete.go", + "detach.go", + "distsql.go", + "expand.go", + "explain.go", + "foreign_key.go", + "grant.go", + "import_into.go", + "index_merge_reader.go", + "infoschema_reader.go", + "insert.go", + "insert_common.go", + "inspection_common.go", + "inspection_profile.go", + "inspection_result.go", + "inspection_summary.go", + "load_data.go", + "load_stats.go", + "mem_reader.go", + "memtable_reader.go", + "metrics_reader.go", + "mpp_gather.go", + "operate_ddl_jobs.go", + "opt_rule_blacklist.go", + "parallel_apply.go", + "pipelined_window.go", + "plan_replayer.go", + "point_get.go", + "prepared.go", + "projection.go", + "recommend_index.go", + "reload_expr_pushdown_blacklist.go", + "replace.go", + "revoke.go", + "sample.go", + "select.go", + "select_into.go", + "set.go", + "set_config.go", + "show.go", + "show_bdr_role.go", + "show_ddl.go", + "show_ddl_job_queries.go", + "show_ddl_jobs.go", + "show_next_row_id.go", + "show_placement.go", + "show_slow_queries.go", + "show_stats.go", + "shuffle.go", + "simple.go", + "slow_query.go", + "split.go", + "stmtsummary.go", + "table_reader.go", + "trace.go", + "union_scan.go", + "update.go", + "utils.go", + "window.go", + "write.go", + ], + importpath = "github.com/pingcap/tidb/pkg/executor", + visibility = ["//visibility:public"], + deps = [ + "//br/pkg/glue", + "//br/pkg/storage", + "//br/pkg/task", + "//br/pkg/task/show", + "//br/pkg/utils", + "//pkg/bindinfo", + "//pkg/config", + "//pkg/ddl", + "//pkg/ddl/label", + "//pkg/ddl/placement", + "//pkg/ddl/schematracker", + "//pkg/ddl/session", + "//pkg/ddl/util", + "//pkg/distsql", + "//pkg/distsql/context", + "//pkg/disttask/framework/handle", + "//pkg/disttask/framework/proto", + "//pkg/disttask/framework/storage", + "//pkg/disttask/importinto", + "//pkg/domain", + "//pkg/domain/infosync", + "//pkg/errctx", + "//pkg/errno", + "//pkg/executor/aggfuncs", + "//pkg/executor/aggregate", + "//pkg/executor/importer", + "//pkg/executor/internal/applycache", + "//pkg/executor/internal/builder", + "//pkg/executor/internal/calibrateresource", + "//pkg/executor/internal/exec", + "//pkg/executor/internal/mpp", + "//pkg/executor/internal/pdhelper", + "//pkg/executor/internal/querywatch", + "//pkg/executor/internal/testutil", + "//pkg/executor/internal/util", + "//pkg/executor/internal/vecgroupchecker", + "//pkg/executor/join", + "//pkg/executor/lockstats", + "//pkg/executor/metrics", + "//pkg/executor/sortexec", + "//pkg/executor/staticrecordset", + "//pkg/executor/unionexec", + "//pkg/expression", + "//pkg/expression/aggregation", + "//pkg/expression/exprctx", + "//pkg/expression/sessionexpr", + "//pkg/extension", + "//pkg/infoschema", + "//pkg/infoschema/context", + "//pkg/keyspace", + "//pkg/kv", + "//pkg/lightning/backend/encode", + "//pkg/lightning/backend/kv", + "//pkg/lightning/log", + "//pkg/lightning/mydump", + "//pkg/meta", + "//pkg/meta/autoid", + "//pkg/meta/model", + "//pkg/metrics", + "//pkg/parser", + "//pkg/parser/ast", + "//pkg/parser/auth", + "//pkg/parser/charset", + "//pkg/parser/format", + "//pkg/parser/model", + "//pkg/parser/mysql", + "//pkg/parser/terror", + "//pkg/parser/tidb", + "//pkg/parser/types", + "//pkg/planner", + "//pkg/planner/cardinality", + "//pkg/planner/core", + "//pkg/planner/core/base", + "//pkg/planner/core/operator/logicalop", + "//pkg/planner/core/resolve", + "//pkg/planner/indexadvisor", + "//pkg/planner/planctx", + "//pkg/planner/util", + "//pkg/planner/util/coreusage", + "//pkg/planner/util/fixcontrol", + "//pkg/plugin", + "//pkg/privilege", + "//pkg/privilege/privileges", + "//pkg/resourcegroup", + "//pkg/resourcegroup/runaway", + "//pkg/resourcemanager/pool/workerpool", + "//pkg/resourcemanager/util", + "//pkg/session/txninfo", + "//pkg/sessionctx", + "//pkg/sessionctx/sessionstates", + "//pkg/sessionctx/stmtctx", + "//pkg/sessionctx/variable", + "//pkg/sessiontxn", + "//pkg/sessiontxn/staleread", + "//pkg/statistics", + "//pkg/statistics/handle", + "//pkg/statistics/handle/cache", + "//pkg/statistics/handle/storage", + "//pkg/statistics/handle/types", + "//pkg/statistics/handle/util", + "//pkg/store/driver/backoff", + "//pkg/store/driver/txn", + "//pkg/store/helper", + "//pkg/table", + "//pkg/table/tables", + "//pkg/table/temptable", + "//pkg/tablecodec", + "//pkg/types", + "//pkg/types/parser_driver", + "//pkg/util", + "//pkg/util/admin", + "//pkg/util/breakpoint", + "//pkg/util/channel", + "//pkg/util/chunk", + "//pkg/util/codec", + "//pkg/util/collate", + "//pkg/util/context", + "//pkg/util/cteutil", + "//pkg/util/dbterror", + "//pkg/util/dbterror/exeerrors", + "//pkg/util/dbterror/plannererrors", + "//pkg/util/deadlockhistory", + "//pkg/util/disk", + "//pkg/util/disttask", + "//pkg/util/execdetails", + "//pkg/util/filter", + "//pkg/util/format", + "//pkg/util/gcutil", + "//pkg/util/globalconn", + "//pkg/util/hack", + "//pkg/util/hint", + "//pkg/util/intest", + "//pkg/util/keydecoder", + "//pkg/util/logutil", + "//pkg/util/logutil/consistency", + "//pkg/util/mathutil", + "//pkg/util/memory", + "//pkg/util/password-validation", + "//pkg/util/plancodec", + "//pkg/util/printer", + "//pkg/util/ranger", + "//pkg/util/ranger/context", + "//pkg/util/redact", + "//pkg/util/replayer", + "//pkg/util/resourcegrouptag", + "//pkg/util/rowDecoder", + "//pkg/util/rowcodec", + "//pkg/util/sem", + "//pkg/util/servermemorylimit", + "//pkg/util/set", + "//pkg/util/size", + "//pkg/util/sqlescape", + "//pkg/util/sqlexec", + "//pkg/util/sqlkiller", + "//pkg/util/stmtsummary", + "//pkg/util/stmtsummary/v2:stmtsummary", + "//pkg/util/stringutil", + "//pkg/util/syncutil", + "//pkg/util/table-filter", + "//pkg/util/tiflash", + "//pkg/util/timeutil", + "//pkg/util/tls", + "//pkg/util/topsql", + "//pkg/util/topsql/state", + "//pkg/util/tracing", + "@com_github_burntsushi_toml//:toml", + "@com_github_docker_go_units//:go-units", + "@com_github_gogo_protobuf//proto", + "@com_github_google_uuid//:uuid", + "@com_github_opentracing_basictracer_go//:basictracer-go", + "@com_github_opentracing_opentracing_go//:opentracing-go", + "@com_github_pingcap_errors//:errors", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_pingcap_kvproto//pkg/brpb", + "@com_github_pingcap_kvproto//pkg/coprocessor", + "@com_github_pingcap_kvproto//pkg/deadlock", + "@com_github_pingcap_kvproto//pkg/diagnosticspb", + "@com_github_pingcap_kvproto//pkg/encryptionpb", + "@com_github_pingcap_kvproto//pkg/kvrpcpb", + "@com_github_pingcap_kvproto//pkg/metapb", + "@com_github_pingcap_kvproto//pkg/resource_manager", + "@com_github_pingcap_kvproto//pkg/tikvpb", + "@com_github_pingcap_log//:log", + "@com_github_pingcap_sysutil//:sysutil", + "@com_github_pingcap_tipb//go-tipb", + "@com_github_prometheus_client_golang//api", + "@com_github_prometheus_client_golang//api/prometheus/v1:prometheus", + "@com_github_prometheus_client_golang//prometheus", + "@com_github_prometheus_common//model", + "@com_github_tiancaiamao_gp//:gp", + "@com_github_tikv_client_go_v2//error", + "@com_github_tikv_client_go_v2//kv", + "@com_github_tikv_client_go_v2//oracle", + "@com_github_tikv_client_go_v2//tikv", + "@com_github_tikv_client_go_v2//tikvrpc", + "@com_github_tikv_client_go_v2//txnkv", + "@com_github_tikv_client_go_v2//txnkv/txnlock", + "@com_github_tikv_client_go_v2//txnkv/txnsnapshot", + "@com_github_tikv_client_go_v2//util", + "@com_github_tikv_pd_client//:client", + "@com_github_tikv_pd_client//http", + "@com_github_twmb_murmur3//:murmur3", + "@com_sourcegraph_sourcegraph_appdash//:appdash", + "@com_sourcegraph_sourcegraph_appdash//opentracing", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//credentials", + "@org_golang_google_grpc//credentials/insecure", + "@org_golang_google_grpc//status", + "@org_golang_x_sync//errgroup", + "@org_uber_go_atomic//:atomic", + "@org_uber_go_zap//:zap", + "@org_uber_go_zap//zapcore", + ], +) + +go_test( + name = "executor_test", + timeout = "moderate", + srcs = [ + "adapter_test.go", + "analyze_test.go", + "analyze_utils_test.go", + "batch_point_get_test.go", + "benchmark_test.go", + "brie_test.go", + "brie_utils_test.go", + "checksum_test.go", + "chunk_size_control_test.go", + "cluster_table_test.go", + "compact_table_test.go", + "copr_cache_test.go", + "delete_test.go", + "detach_integration_test.go", + "detach_test.go", + "distsql_test.go", + "executor_failpoint_test.go", + "executor_pkg_test.go", + "executor_required_rows_test.go", + "explain_test.go", + "explain_unit_test.go", + "explainfor_test.go", + "grant_test.go", + "historical_stats_test.go", + "hot_regions_history_table_test.go", + "import_into_test.go", + "infoschema_cluster_table_test.go", + "infoschema_reader_internal_test.go", + "infoschema_reader_test.go", + "insert_test.go", + "inspection_result_test.go", + "inspection_summary_test.go", + "join_pkg_test.go", + "main_test.go", + "memtable_reader_test.go", + "metrics_reader_test.go", + "parallel_apply_test.go", + "partition_table_test.go", + "pkg_test.go", + "point_get_test.go", + "prepared_test.go", + "recover_test.go", + "resource_tag_test.go", + "revoke_test.go", + "sample_test.go", + "select_into_test.go", + "select_test.go", + "set_test.go", + "show_placement_labels_test.go", + "show_placement_test.go", + "show_stats_test.go", + "show_test.go", + "shuffle_test.go", + "slow_query_sql_test.go", + "slow_query_test.go", + "split_test.go", + "stale_txn_test.go", + "stmtsummary_test.go", + "table_readers_required_rows_test.go", + "temporary_table_test.go", + "tikv_regions_peers_table_test.go", + "trace_test.go", + "union_scan_test.go", + "update_test.go", + "utils_test.go", + "window_test.go", + "write_concurrent_test.go", + ], + data = glob(["testdata/**"]), + embed = [":executor"], + flaky = True, + shard_count = 50, + deps = [ + "//pkg/config", + "//pkg/ddl", + "//pkg/ddl/placement", + "//pkg/ddl/util", + "//pkg/distsql", + "//pkg/distsql/context", + "//pkg/domain", + "//pkg/domain/infosync", + "//pkg/errctx", + "//pkg/errno", + "//pkg/executor/aggfuncs", + "//pkg/executor/aggregate", + "//pkg/executor/importer", + "//pkg/executor/internal/builder", + "//pkg/executor/internal/exec", + "//pkg/executor/internal/testutil", + "//pkg/executor/join", + "//pkg/executor/sortexec", + "//pkg/expression", + "//pkg/expression/aggregation", + "//pkg/expression/exprstatic", + "//pkg/extension", + "//pkg/infoschema", + "//pkg/kv", + "//pkg/meta", + "//pkg/meta/autoid", + "//pkg/meta/model", + "//pkg/metrics", + "//pkg/parser", + "//pkg/parser/ast", + "//pkg/parser/auth", + "//pkg/parser/model", + "//pkg/parser/mysql", + "//pkg/parser/terror", + "//pkg/planner", + "//pkg/planner/core", + "//pkg/planner/core/base", + "//pkg/planner/core/operator/logicalop", + "//pkg/planner/core/resolve", + "//pkg/planner/property", + "//pkg/planner/util", + "//pkg/server", + "//pkg/session", + "//pkg/session/types", + "//pkg/sessionctx", + "//pkg/sessionctx/stmtctx", + "//pkg/sessionctx/variable", + "//pkg/sessiontxn", + "//pkg/sessiontxn/staleread", + "//pkg/statistics", + "//pkg/statistics/handle/storage", + "//pkg/statistics/handle/util", + "//pkg/store/copr", + "//pkg/store/driver/error", + "//pkg/store/helper", + "//pkg/store/mockstore", + "//pkg/store/mockstore/unistore", + "//pkg/table", + "//pkg/table/tables", + "//pkg/tablecodec", + "//pkg/testkit", + "//pkg/testkit/external", + "//pkg/testkit/testdata", + "//pkg/testkit/testfailpoint", + "//pkg/testkit/testmain", + "//pkg/testkit/testsetup", + "//pkg/types", + "//pkg/util", + "//pkg/util/benchdaily", + "//pkg/util/chunk", + "//pkg/util/codec", + "//pkg/util/collate", + "//pkg/util/dbterror", + "//pkg/util/dbterror/exeerrors", + "//pkg/util/dbterror/plannererrors", + "//pkg/util/deadlockhistory", + "//pkg/util/disk", + "//pkg/util/execdetails", + "//pkg/util/gcutil", + "//pkg/util/logutil", + "//pkg/util/memory", + "//pkg/util/mock", + "//pkg/util/paging", + "//pkg/util/ranger", + "//pkg/util/sem", + "//pkg/util/set", + "//pkg/util/sqlexec", + "//pkg/util/sqlkiller", + "//pkg/util/stmtsummary/v2:stmtsummary", + "//pkg/util/stringutil", + "//pkg/util/syncutil", + "//pkg/util/tableutil", + "//pkg/util/topsql/state", + "@com_github_docker_go_units//:go-units", + "@com_github_gorilla_mux//:mux", + "@com_github_hashicorp_go_version//:go-version", + "@com_github_pingcap_errors//:errors", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_pingcap_fn//:fn", + "@com_github_pingcap_kvproto//pkg/brpb", + "@com_github_pingcap_kvproto//pkg/diagnosticspb", + "@com_github_pingcap_kvproto//pkg/encryptionpb", + "@com_github_pingcap_kvproto//pkg/errorpb", + "@com_github_pingcap_kvproto//pkg/kvrpcpb", + "@com_github_pingcap_kvproto//pkg/metapb", + "@com_github_pingcap_log//:log", + "@com_github_pingcap_sysutil//:sysutil", + "@com_github_pingcap_tipb//go-tipb", + "@com_github_prometheus_client_golang//prometheus", + "@com_github_prometheus_client_model//go", + "@com_github_prometheus_common//model", + "@com_github_stretchr_testify//mock", + "@com_github_stretchr_testify//require", + "@com_github_tikv_client_go_v2//oracle", + "@com_github_tikv_client_go_v2//testutils", + "@com_github_tikv_client_go_v2//tikv", + "@com_github_tikv_client_go_v2//tikvrpc", + "@com_github_tikv_client_go_v2//tikvrpc/interceptor", + "@com_github_tikv_client_go_v2//util", + "@com_github_tikv_pd_client//http", + "@org_golang_google_grpc//:grpc", + "@org_uber_go_atomic//:atomic", + "@org_uber_go_goleak//:goleak", + "@org_uber_go_zap//zapcore", + ], +) diff --git a/pkg/executor/executor_pkg_test.go b/pkg/executor/executor_pkg_test.go new file mode 100644 index 0000000000000..1aba8c6bbde22 --- /dev/null +++ b/pkg/executor/executor_pkg_test.go @@ -0,0 +1,505 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "fmt" + "runtime" + "strconv" + "strings" + "testing" + "time" + "unsafe" + + "github.com/hashicorp/go-version" + "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/executor/aggfuncs" + "github.com/pingcap/tidb/pkg/executor/join" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util/collate" + "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/mock" + "github.com/pingcap/tidb/pkg/util/ranger" + "github.com/pingcap/tidb/pkg/util/tableutil" + "github.com/stretchr/testify/require" +) + +// Note: it's a tricky way to export the `inspectionSummaryRules` and `inspectionRules` for unit test but invisible for normal code +var ( + InspectionSummaryRules = inspectionSummaryRules + InspectionRules = inspectionRules +) + +func TestBuildKvRangesForIndexJoinWithoutCwc(t *testing.T) { + indexRanges := make([]*ranger.Range, 0, 6) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 1, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 2, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 2, 1, 2)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 3, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(2, 1, 1, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(2, 1, 2, 1, 1)) + + joinKeyRows := make([]*join.IndexJoinLookUpContent, 0, 5) + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(1, 1)}) + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(1, 2)}) + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(2, 1)}) + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(2, 2)}) + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(2, 3)}) + + keyOff2IdxOff := []int{1, 3} + ctx := mock.NewContext() + kvRanges, err := buildKvRangesForIndexJoin(ctx.GetDistSQLCtx(), ctx.GetRangerCtx(), 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil, nil, nil) + require.NoError(t, err) + // Check the kvRanges is in order. + for i, kvRange := range kvRanges { + require.True(t, kvRange.StartKey.Cmp(kvRange.EndKey) < 0) + if i > 0 { + require.True(t, kvRange.StartKey.Cmp(kvRanges[i-1].EndKey) >= 0) + } + } +} + +func TestBuildKvRangesForIndexJoinWithoutCwcAndWithMemoryTracker(t *testing.T) { + indexRanges := make([]*ranger.Range, 0, 6) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 1, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 2, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 2, 1, 2)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 3, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(2, 1, 1, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(2, 1, 2, 1, 1)) + + bytesConsumed1 := int64(0) + { + joinKeyRows := make([]*join.IndexJoinLookUpContent, 0, 10) + for i := int64(0); i < 10; i++ { + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(1, i)}) + } + + keyOff2IdxOff := []int{1, 3} + ctx := mock.NewContext() + memTracker := memory.NewTracker(memory.LabelForIndexWorker, -1) + kvRanges, err := buildKvRangesForIndexJoin(ctx.GetDistSQLCtx(), ctx.GetRangerCtx(), 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil, memTracker, nil) + require.NoError(t, err) + // Check the kvRanges is in order. + for i, kvRange := range kvRanges { + require.True(t, kvRange.StartKey.Cmp(kvRange.EndKey) < 0) + if i > 0 { + require.True(t, kvRange.StartKey.Cmp(kvRanges[i-1].EndKey) >= 0) + } + } + bytesConsumed1 = memTracker.BytesConsumed() + } + + bytesConsumed2 := int64(0) + { + joinKeyRows := make([]*join.IndexJoinLookUpContent, 0, 20) + for i := int64(0); i < 20; i++ { + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(1, i)}) + } + + keyOff2IdxOff := []int{1, 3} + ctx := mock.NewContext() + memTracker := memory.NewTracker(memory.LabelForIndexWorker, -1) + kvRanges, err := buildKvRangesForIndexJoin(ctx.GetDistSQLCtx(), ctx.GetRangerCtx(), 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil, memTracker, nil) + require.NoError(t, err) + // Check the kvRanges is in order. + for i, kvRange := range kvRanges { + require.True(t, kvRange.StartKey.Cmp(kvRange.EndKey) < 0) + if i > 0 { + require.True(t, kvRange.StartKey.Cmp(kvRanges[i-1].EndKey) >= 0) + } + } + bytesConsumed2 = memTracker.BytesConsumed() + } + + require.Equal(t, 2*bytesConsumed1, bytesConsumed2) + require.Equal(t, int64(25560), bytesConsumed1) +} + +func generateIndexRange(vals ...int64) *ranger.Range { + lowDatums := generateDatumSlice(vals...) + highDatums := make([]types.Datum, len(vals)) + copy(highDatums, lowDatums) + return &ranger.Range{LowVal: lowDatums, HighVal: highDatums, Collators: collate.GetBinaryCollatorSlice(len(lowDatums))} +} + +func generateDatumSlice(vals ...int64) []types.Datum { + datums := make([]types.Datum, len(vals)) + for i, val := range vals { + datums[i].SetInt64(val) + } + return datums +} + +func TestSlowQueryRuntimeStats(t *testing.T) { + stats := &slowQueryRuntimeStats{ + totalFileNum: 2, + readFileNum: 2, + readFile: time.Second, + initialize: time.Millisecond, + readFileSize: 1024 * 1024 * 1024, + parseLog: int64(time.Millisecond * 100), + concurrent: 15, + } + require.Equal(t, "initialize: 1ms, read_file: 1s, parse_log: {time:100ms, concurrency:15}, total_file: 2, read_file: 2, read_size: 1024 MB", stats.String()) + require.Equal(t, stats.Clone().String(), stats.String()) + stats.Merge(stats.Clone()) + require.Equal(t, "initialize: 2ms, read_file: 2s, parse_log: {time:200ms, concurrency:15}, total_file: 4, read_file: 4, read_size: 2 GB", stats.String()) +} + +// Test whether the actual buckets in Golang Map is same with the estimated number. +// The test relies on the implement of Golang Map. ref https://github.com/golang/go/blob/go1.13/src/runtime/map.go#L114 +func TestAggPartialResultMapperB(t *testing.T) { + // skip err, since we guarantee the success of execution + go113, _ := version.NewVersion(`1.13`) + // go version format is `gox.y.z foobar`, we only need x.y.z part + // The following is pretty hacky, but it only in test which is ok to do so. + actualVer, err := version.NewVersion(runtime.Version()[2:6]) + if err != nil { + t.Fatalf("Cannot get actual go version with error %v\n", err) + } + if actualVer.LessThan(go113) { + t.Fatalf("Unsupported version and should never use any version less than go1.13\n") + } + type testCase struct { + rowNum int + expectedB int + expectedGrowing bool + } + var cases []testCase + // https://github.com/golang/go/issues/63438 + // in 1.21, the load factor of map is 6 rather than 6.5 and the go team refused to backport to 1.21. + // https://github.com/golang/go/issues/65706 + // in 1.23, it has problem. + if strings.Contains(runtime.Version(), `go1.21`) { + cases = []testCase{ + { + rowNum: 0, + expectedB: 0, + expectedGrowing: false, + }, + { + rowNum: 95, + expectedB: 4, + expectedGrowing: false, + }, + { + rowNum: 10000, // 6 * (1 << 11) is 12288 + expectedB: 11, + expectedGrowing: false, + }, + { + rowNum: 1000000, // 6 * (1 << 18) is 1572864 + expectedB: 18, + expectedGrowing: false, + }, + { + rowNum: 786432, // 6 * (1 << 17) + expectedB: 17, + expectedGrowing: false, + }, + { + rowNum: 786433, // 6 * (1 << 17) + 1 + expectedB: 18, + expectedGrowing: true, + }, + { + rowNum: 393216, // 6 * (1 << 16) + expectedB: 16, + expectedGrowing: false, + }, + { + rowNum: 393217, // 6 * (1 << 16) + 1 + expectedB: 17, + expectedGrowing: true, + }, + } + } else { + cases = []testCase{ + { + rowNum: 0, + expectedB: 0, + expectedGrowing: false, + }, + { + rowNum: 100, + expectedB: 4, + expectedGrowing: false, + }, + { + rowNum: 10000, + expectedB: 11, + expectedGrowing: false, + }, + { + rowNum: 1000000, + expectedB: 18, + expectedGrowing: false, + }, + { + rowNum: 851968, // 6.5 * (1 << 17) + expectedB: 17, + expectedGrowing: false, + }, + { + rowNum: 851969, // 6.5 * (1 << 17) + 1 + expectedB: 18, + expectedGrowing: true, + }, + { + rowNum: 425984, // 6.5 * (1 << 16) + expectedB: 16, + expectedGrowing: false, + }, + { + rowNum: 425985, // 6.5 * (1 << 16) + 1 + expectedB: 17, + expectedGrowing: true, + }, + } + } + + for _, tc := range cases { + aggMap := make(aggfuncs.AggPartialResultMapper) + tempSlice := make([]aggfuncs.PartialResult, 10) + for num := 0; num < tc.rowNum; num++ { + aggMap[strconv.Itoa(num)] = tempSlice + } + + require.Equal(t, tc.expectedB, getB(aggMap)) + require.Equal(t, tc.expectedGrowing, getGrowing(aggMap)) + } +} + +// A header for a Go map. +// nolint:structcheck +type hmap struct { + // Note: the format of the hmap is also encoded in cmd/compile/internal/gc/reflect.go. + // Make sure this stays in sync with the compiler's definition. + count int // nolint:unused // # live cells == size of map. Must be first (used by len() builtin) + flags uint8 // nolint:unused + B uint8 // nolint:unused // log_2 of # of buckets (can hold up to loadFactor * 2^B items) + noverflow uint16 // nolint:unused // approximate number of overflow buckets; see incrnoverflow for details + hash0 uint32 // nolint:unused // hash seed + + buckets unsafe.Pointer // nolint:unused // array of 2^B Buckets. may be nil if count==0. + oldbuckets unsafe.Pointer // nolint:unused // previous bucket array of half the size, non-nil only when growing + nevacuate uintptr // nolint:unused // progress counter for evacuation (buckets less than this have been evacuated) +} + +func getB(m aggfuncs.AggPartialResultMapper) int { + point := (**hmap)(unsafe.Pointer(&m)) + value := *point + return int(value.B) +} + +func getGrowing(m aggfuncs.AggPartialResultMapper) bool { + point := (**hmap)(unsafe.Pointer(&m)) + value := *point + return value.oldbuckets != nil +} + +func TestFilterTemporaryTableKeys(t *testing.T) { + vars := variable.NewSessionVars(nil) + const tableID int64 = 3 + vars.TxnCtx = &variable.TransactionContext{ + TxnCtxNoNeedToRestore: variable.TxnCtxNoNeedToRestore{ + TemporaryTables: map[int64]tableutil.TempTable{tableID: nil}, + }, + } + + res := filterTemporaryTableKeys(vars, []kv.Key{tablecodec.EncodeTablePrefix(tableID), tablecodec.EncodeTablePrefix(42)}) + require.Len(t, res, 1) +} + +func TestErrLevelsForResetStmtContext(t *testing.T) { + ctx := mock.NewContext() + domain.BindDomain(ctx, &domain.Domain{}) + + cases := []struct { + name string + sqlMode mysql.SQLMode + stmt []ast.StmtNode + levels errctx.LevelMap + }{ + { + name: "strict,write", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.InsertStmt{}, &ast.UpdateStmt{}, &ast.DeleteStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelError + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "non-strict,write", + sqlMode: mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.InsertStmt{}, &ast.UpdateStmt{}, &ast.DeleteStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict,insert ignore", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.InsertStmt{IgnoreErr: true}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelWarn + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelWarn + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, + { + name: "strict,update ignore", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.UpdateStmt{IgnoreErr: true}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelWarn + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, + { + name: "strict,delete ignore", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.DeleteStmt{IgnoreErr: true}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelWarn + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict without error_for_division_by_zero,write", + sqlMode: mysql.ModeStrictAllTables, + stmt: []ast.StmtNode{&ast.InsertStmt{}, &ast.UpdateStmt{}, &ast.DeleteStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelIgnore + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict,select/union", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.SelectStmt{}, &ast.SetOprStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "non-strict,select/union", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.SelectStmt{}, &ast.SetOprStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict,load_data", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.LoadDataStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, + { + name: "non-strict,load_data", + sqlMode: mysql.SQLMode(0), + stmt: []ast.StmtNode{&ast.LoadDataStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, + } + + for i, c := range cases { + require.NotEmpty(t, c.stmt, c.name) + for _, stmt := range c.stmt { + msg := fmt.Sprintf("%d: %s, stmt: %T", i, c.name, stmt) + ctx.GetSessionVars().SQLMode = c.sqlMode + require.NoError(t, ResetContextOfStmt(ctx, stmt), msg) + ec := ctx.GetSessionVars().StmtCtx.ErrCtx() + require.Equal(t, c.levels, ec.LevelMap(), msg) + } + } +} diff --git a/pkg/executor/insert_test.go b/pkg/executor/insert_test.go new file mode 100644 index 0000000000000..4731fae2f8fcd --- /dev/null +++ b/pkg/executor/insert_test.go @@ -0,0 +1,687 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/pkg/executor" + "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/table" + "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/execdetails" + "github.com/stretchr/testify/require" +) + +func TestInsertOnDuplicateKeyWithBinlog(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + failpoint.Enable("github.com/pingcap/tidb/pkg/table/tblsession/forceWriteBinlog", "return") + defer failpoint.Disable("github.com/pingcap/tidb/pkg/table/tblsession/forceWriteBinlog") + testInsertOnDuplicateKey(t, tk) +} + +func testInsertOnDuplicateKey(t *testing.T, tk *testkit.TestKit) { + tk.MustExec("use test") + + tk.MustExec(`drop table if exists t1, t2;`) + tk.MustExec(`create table t1(a1 bigint primary key, b1 bigint);`) + tk.MustExec(`create table t2(a2 bigint primary key, b2 bigint);`) + tk.MustExec(`insert into t1 values(1, 100);`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + tk.MustExec(`insert into t2 values(1, 200);`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + + tk.MustExec(`insert into t1 select a2, b2 from t2 on duplicate key update b1 = a2;`) + require.Equal(t, uint64(2), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 1 Duplicates: 1 Warnings: 0") + tk.MustQuery(`select * from t1;`).Check(testkit.Rows("1 1")) + + tk.MustExec(`insert into t1 select a2, b2 from t2 on duplicate key update b1 = b2;`) + require.Equal(t, uint64(2), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 1 Duplicates: 1 Warnings: 0") + tk.MustQuery(`select * from t1;`).Check(testkit.Rows("1 200")) + + tk.MustExec(`insert into t1 select a2, b2 from t2 on duplicate key update a1 = a2;`) + require.Equal(t, uint64(0), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 1 Duplicates: 0 Warnings: 0") + tk.MustQuery(`select * from t1;`).Check(testkit.Rows("1 200")) + + tk.MustExec(`insert into t1 select a2, b2 from t2 on duplicate key update b1 = 300;`) + require.Equal(t, uint64(2), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 1 Duplicates: 1 Warnings: 0") + tk.MustQuery(`select * from t1;`).Check(testkit.Rows("1 300")) + + tk.MustExec(`insert into t1 values(1, 1) on duplicate key update b1 = 400;`) + require.Equal(t, uint64(2), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + tk.MustQuery(`select * from t1;`).Check(testkit.Rows("1 400")) + + tk.MustExec(`insert into t1 select 1, 500 from t2 on duplicate key update b1 = 400;`) + require.Equal(t, uint64(0), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 1 Duplicates: 0 Warnings: 0") + tk.MustQuery(`select * from t1;`).Check(testkit.Rows("1 400")) + + tk.MustExec(`drop table if exists t1, t2;`) + tk.MustExec(`create table t1(a bigint primary key, b bigint);`) + tk.MustExec(`create table t2(a bigint primary key, b bigint);`) + tk.MustGetErrMsg(`insert into t1 select * from t2 on duplicate key update c = t2.b;`, + `[planner:1054]Unknown column 'c' in 'field list'`) + + tk.MustExec(`drop table if exists t1, t2;`) + tk.MustExec(`create table t1(a bigint primary key, b bigint);`) + tk.MustExec(`create table t2(a bigint primary key, b bigint);`) + tk.MustGetErrMsg(`insert into t1 select * from t2 on duplicate key update a = b;`, + `[planner:1052]Column 'b' in field list is ambiguous`) + + tk.MustExec(`drop table if exists t1, t2;`) + tk.MustExec(`create table t1(a bigint primary key, b bigint);`) + tk.MustExec(`create table t2(a bigint primary key, b bigint);`) + tk.MustGetErrMsg(`insert into t1 select * from t2 on duplicate key update c = b;`, + `[planner:1054]Unknown column 'c' in 'field list'`) + + tk.MustExec(`drop table if exists t1, t2;`) + tk.MustExec(`create table t1(a1 bigint primary key, b1 bigint);`) + tk.MustExec(`create table t2(a2 bigint primary key, b2 bigint);`) + tk.MustGetErrMsg(`insert into t1 select * from t2 on duplicate key update a1 = values(b2);`, + `[planner:1054]Unknown column 'b2' in 'field list'`) + + tk.MustExec(`drop table if exists t1, t2;`) + tk.MustExec(`create table t1(a1 bigint primary key, b1 bigint);`) + tk.MustExec(`create table t2(a2 bigint primary key, b2 bigint);`) + tk.MustExec(`insert into t1 values(1, 100);`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + tk.MustExec(`insert into t2 values(1, 200);`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + tk.MustExec(`insert into t1 select * from t2 on duplicate key update b1 = values(b1) + b2;`) + require.Equal(t, uint64(2), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 1 Duplicates: 1 Warnings: 0") + tk.MustQuery(`select * from t1`).Check(testkit.Rows("1 400")) + tk.MustExec(`insert into t1 select * from t2 on duplicate key update b1 = values(b1) + b2;`) + require.Equal(t, uint64(0), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 1 Duplicates: 0 Warnings: 0") + tk.MustQuery(`select * from t1`).Check(testkit.Rows("1 400")) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(k1 bigint, k2 bigint, val bigint, primary key(k1, k2));`) + tk.MustExec(`insert into t (val, k1, k2) values (3, 1, 2);`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + tk.MustQuery(`select * from t;`).Check(testkit.Rows(`1 2 3`)) + tk.MustExec(`insert into t (val, k1, k2) select c, a, b from (select 1 as a, 2 as b, 4 as c) tmp on duplicate key update val = tmp.c;`) + require.Equal(t, uint64(2), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 1 Duplicates: 1 Warnings: 0") + tk.MustQuery(`select * from t;`).Check(testkit.Rows(`1 2 4`)) + + tk.MustExec(`drop table if exists t;`) + tk.MustExec(`create table t(k1 double, k2 double, v double, primary key(k1, k2));`) + tk.MustExec(`insert into t (v, k1, k2) select c, a, b from (select "3" c, "1" a, "2" b) tmp on duplicate key update v=c;`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 1 Duplicates: 0 Warnings: 0") + tk.MustQuery(`select * from t;`).Check(testkit.Rows(`1 2 3`)) + tk.MustExec(`insert into t (v, k1, k2) select c, a, b from (select "3" c, "1" a, "2" b) tmp on duplicate key update v=c;`) + require.Equal(t, uint64(0), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 1 Duplicates: 0 Warnings: 0") + tk.MustQuery(`select * from t;`).Check(testkit.Rows(`1 2 3`)) + + tk.MustExec(`drop table if exists t1, t2;`) + tk.MustExec(`create table t1(id int, a int, b int);`) + tk.MustExec(`insert into t1 values (1, 1, 1);`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + tk.MustExec(`insert into t1 values (2, 2, 1);`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + tk.MustExec(`insert into t1 values (3, 3, 1);`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + tk.MustExec(`create table t2(a int primary key, b int, unique(b));`) + tk.MustExec(`insert into t2 select a, b from t1 order by id on duplicate key update a=t1.a, b=t1.b;`) + require.Equal(t, uint64(5), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 3 Duplicates: 2 Warnings: 0") + tk.MustQuery(`select * from t2 order by a;`).Check(testkit.Rows(`3 1`)) + + tk.MustExec(`drop table if exists t1, t2;`) + tk.MustExec(`create table t1(id int, a int, b int);`) + tk.MustExec(`insert into t1 values (1, 1, 1);`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + tk.MustExec(`insert into t1 values (2, 1, 2);`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + tk.MustExec(`insert into t1 values (3, 3, 1);`) + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.CheckLastMessage("") + tk.MustExec(`create table t2(a int primary key, b int, unique(b));`) + tk.MustExec(`insert into t2 select a, b from t1 order by id on duplicate key update a=t1.a, b=t1.b;`) + require.Equal(t, uint64(4), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 3 Duplicates: 1 Warnings: 0") + tk.MustQuery(`select * from t2 order by a;`).Check(testkit.Rows(`1 2`, `3 1`)) + + tk.MustExec(`drop table if exists t1, t2;`) + tk.MustExec(`create table t1(id int, a int, b int, c int);`) + tk.MustExec(`insert into t1 values (1, 1, 1, 1);`) + tk.MustExec(`insert into t1 values (2, 2, 1, 2);`) + tk.MustExec(`insert into t1 values (3, 3, 2, 2);`) + tk.MustExec(`insert into t1 values (4, 4, 2, 2);`) + tk.MustExec(`create table t2(a int primary key, b int, c int, unique(b), unique(c));`) + tk.MustExec(`insert into t2 select a, b, c from t1 order by id on duplicate key update b=t2.b, c=t2.c;`) + require.Equal(t, uint64(2), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 4 Duplicates: 0 Warnings: 0") + tk.MustQuery(`select * from t2 order by a;`).Check(testkit.Rows(`1 1 1`, `3 2 2`)) + + tk.MustExec(`drop table if exists t1`) + tk.MustExec(`create table t1(a int primary key, b int);`) + tk.MustExec(`insert into t1 values(1,1),(2,2),(3,3),(4,4),(5,5);`) + require.Equal(t, uint64(5), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 5 Duplicates: 0 Warnings: 0") + tk.MustExec(`insert into t1 values(4,14),(5,15),(6,16),(7,17),(8,18) on duplicate key update b=b+10`) + require.Equal(t, uint64(7), tk.Session().AffectedRows()) + tk.CheckLastMessage("Records: 5 Duplicates: 2 Warnings: 0") + + tk.MustExec("drop table if exists a, b") + tk.MustExec("create table a(x int primary key)") + tk.MustExec("create table b(x int, y int)") + tk.MustExec("insert into a values(1)") + tk.MustExec("insert into b values(1, 2)") + tk.MustExec("insert into a select x from b ON DUPLICATE KEY UPDATE a.x=b.y") + require.Equal(t, uint64(2), tk.Session().AffectedRows()) + tk.MustQuery("select * from a").Check(testkit.Rows("2")) + + // Test issue 28078. + // Use different types of columns so that there's likely to be error if the types mismatches. + tk.MustExec("drop table if exists a, b") + tk.MustExec("create table a(id int, a1 timestamp, a2 varchar(10), a3 float, unique(id))") + tk.MustExec("create table b(id int, b1 time, b2 varchar(10), b3 int)") + tk.MustExec("insert into a values (1, '2022-01-04 07:02:04', 'a', 1.1), (2, '2022-01-04 07:02:05', 'b', 2.2)") + tk.MustExec("insert into b values (2, '12:34:56', 'c', 10), (3, '01:23:45', 'd', 20)") + tk.MustExec("insert into a (id) select id from b on duplicate key update a.a2 = b.b2, a.a3 = 3.3") + require.Equal(t, uint64(3), tk.Session().AffectedRows()) + tk.MustQuery("select * from a").Check(testkit.RowsWithSep("/", + "1/2022-01-04 07:02:04/a/1.1", + "2/2022-01-04 07:02:05/c/3.3", + "3///")) + tk.MustExec("insert into a (id) select 4 from b where b3 = 20 on duplicate key update a.a3 = b.b3") + require.Equal(t, uint64(1), tk.Session().AffectedRows()) + tk.MustQuery("select * from a").Check(testkit.RowsWithSep("/", + "1/2022-01-04 07:02:04/a/1.1", + "2/2022-01-04 07:02:05/c/3.3", + "3///", + "4///")) + tk.MustExec("insert into a (a2, a3) select 'x', 1.2 from b on duplicate key update a.a2 = b.b3") + require.Equal(t, uint64(2), tk.Session().AffectedRows()) + tk.MustQuery("select * from a").Check(testkit.RowsWithSep("/", + "1/2022-01-04 07:02:04/a/1.1", + "2/2022-01-04 07:02:05/c/3.3", + "3///", + "4///", + "//x/1.2", + "//x/1.2")) + + // reproduce insert on duplicate key update bug under new row format. + tk.MustExec(`drop table if exists t1`) + tk.MustExec(`create table t1(c1 decimal(6,4), primary key(c1))`) + tk.MustExec(`insert into t1 set c1 = 0.1`) + tk.MustExec(`insert into t1 set c1 = 0.1 on duplicate key update c1 = 1`) + tk.MustQuery(`select * from t1 use index(primary)`).Check(testkit.Rows(`1.0000`)) +} + +func TestAllocateContinuousRowID(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec(`use test`) + tk.MustExec(`create table t1 (a int,b int, key I_a(a));`) + var wg util.WaitGroupWrapper + for i := 0; i < 5; i++ { + idx := i + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + for j := 0; j < 10; j++ { + k := strconv.Itoa(idx*100 + j) + sql := "insert into t1(a,b) values (" + k + ", 2)" + for t := 0; t < 20; t++ { + sql += ",(" + k + ",2)" + } + tk.MustExec(sql) + q := "select _tidb_rowid from t1 where a=" + k + rows := tk.MustQuery(q).Rows() + require.Equal(t, 21, len(rows)) + last := 0 + for _, r := range rows { + require.Equal(t, 1, len(r)) + v, err := strconv.Atoi(r[0].(string)) + require.Equal(t, nil, err) + if last > 0 { + require.Equal(t, v, last+1) + } + last = v + } + } + }) + } + wg.Wait() +} + +func TestAutoRandomID(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec(`use test`) + tk.MustExec(`drop table if exists ar`) + tk.MustExec(`create table ar (id bigint key clustered auto_random, name char(10))`) + + tk.MustExec(`insert into ar(id) values (null)`) + rs := tk.MustQuery(`select id from ar`) + require.Equal(t, 1, len(rs.Rows())) + firstValue, err := strconv.Atoi(rs.Rows()[0][0].(string)) + require.NoError(t, err) + require.Greater(t, firstValue, 0) + tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows(fmt.Sprintf("%d", firstValue))) + tk.MustExec(`delete from ar`) + + tk.MustExec(`insert into ar(id) values (0)`) + rs = tk.MustQuery(`select id from ar`) + require.Equal(t, 1, len(rs.Rows())) + firstValue, err = strconv.Atoi(rs.Rows()[0][0].(string)) + require.NoError(t, err) + require.Greater(t, firstValue, 0) + tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows(fmt.Sprintf("%d", firstValue))) + tk.MustExec(`delete from ar`) + + tk.MustExec(`insert into ar(name) values ('a')`) + rs = tk.MustQuery(`select id from ar`) + require.Equal(t, 1, len(rs.Rows())) + firstValue, err = strconv.Atoi(rs.Rows()[0][0].(string)) + require.NoError(t, err) + require.Greater(t, firstValue, 0) + tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows(fmt.Sprintf("%d", firstValue))) + + tk.MustExec(`drop table ar`) + tk.MustExec(`create table ar (id bigint key clustered auto_random(15), name char(10))`) + overflowVal := 1 << (64 - 5) + errMsg := fmt.Sprintf(autoid.AutoRandomRebaseOverflow, overflowVal, 1<<(64-16)-1) + tk.MustContainErrMsg(fmt.Sprintf("alter table ar auto_random_base = %d", overflowVal), errMsg) +} + +func TestMultiAutoRandomID(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec(`use test`) + tk.MustExec(`drop table if exists ar`) + tk.MustExec(`create table ar (id bigint key clustered auto_random, name char(10))`) + + tk.MustExec(`insert into ar(id) values (null),(null),(null)`) + rs := tk.MustQuery(`select id from ar order by id`) + require.Equal(t, 3, len(rs.Rows())) + firstValue, err := strconv.Atoi(rs.Rows()[0][0].(string)) + require.NoError(t, err) + require.Greater(t, firstValue, 0) + require.Equal(t, fmt.Sprintf("%d", firstValue+1), rs.Rows()[1][0].(string)) + require.Equal(t, fmt.Sprintf("%d", firstValue+2), rs.Rows()[2][0].(string)) + tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows(fmt.Sprintf("%d", firstValue))) + tk.MustExec(`delete from ar`) + + tk.MustExec(`insert into ar(id) values (0),(0),(0)`) + rs = tk.MustQuery(`select id from ar order by id`) + require.Equal(t, 3, len(rs.Rows())) + firstValue, err = strconv.Atoi(rs.Rows()[0][0].(string)) + require.NoError(t, err) + require.Greater(t, firstValue, 0) + require.Equal(t, fmt.Sprintf("%d", firstValue+1), rs.Rows()[1][0].(string)) + require.Equal(t, fmt.Sprintf("%d", firstValue+2), rs.Rows()[2][0].(string)) + tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows(fmt.Sprintf("%d", firstValue))) + tk.MustExec(`delete from ar`) + + tk.MustExec(`insert into ar(name) values ('a'),('a'),('a')`) + rs = tk.MustQuery(`select id from ar order by id`) + require.Equal(t, 3, len(rs.Rows())) + firstValue, err = strconv.Atoi(rs.Rows()[0][0].(string)) + require.NoError(t, err) + require.Greater(t, firstValue, 0) + require.Equal(t, fmt.Sprintf("%d", firstValue+1), rs.Rows()[1][0].(string)) + require.Equal(t, fmt.Sprintf("%d", firstValue+2), rs.Rows()[2][0].(string)) + tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows(fmt.Sprintf("%d", firstValue))) + + tk.MustExec(`drop table ar`) +} + +func TestAutoRandomIDAllowZero(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec(`use test`) + tk.MustExec(`drop table if exists ar`) + tk.MustExec(`create table ar (id bigint key clustered auto_random, name char(10))`) + + rs := tk.MustQuery(`select @@session.sql_mode`) + sqlMode := rs.Rows()[0][0].(string) + tk.MustExec(fmt.Sprintf(`set session sql_mode="%s,%s"`, sqlMode, "NO_AUTO_VALUE_ON_ZERO")) + + tk.MustExec(`insert into ar(id) values (0)`) + rs = tk.MustQuery(`select id from ar`) + require.Equal(t, 1, len(rs.Rows())) + firstValue, err := strconv.Atoi(rs.Rows()[0][0].(string)) + require.NoError(t, err) + require.Equal(t, 0, firstValue) + tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows(fmt.Sprintf("%d", firstValue))) + tk.MustExec(`delete from ar`) + + tk.MustExec(`insert into ar(id) values (null)`) + rs = tk.MustQuery(`select id from ar`) + require.Equal(t, 1, len(rs.Rows())) + firstValue, err = strconv.Atoi(rs.Rows()[0][0].(string)) + require.NoError(t, err) + require.Greater(t, firstValue, 0) + tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows(fmt.Sprintf("%d", firstValue))) + + tk.MustExec(`drop table ar`) +} +func TestInsertRuntimeStat(t *testing.T) { + stats := &executor.InsertRuntimeStat{ + BasicRuntimeStats: &execdetails.BasicRuntimeStats{}, + SnapshotRuntimeStats: nil, + CheckInsertTime: 2 * time.Second, + Prefetch: 1 * time.Second, + } + stats.BasicRuntimeStats.Record(5*time.Second, 1) + require.Equal(t, "prepare: 3s, check_insert: {total_time: 2s, mem_insert_time: 1s, prefetch: 1s}", stats.String()) + require.Equal(t, stats.Clone().String(), stats.String()) + stats.Merge(stats.Clone()) + require.Equal(t, "prepare: 6s, check_insert: {total_time: 4s, mem_insert_time: 2s, prefetch: 2s}", stats.String()) + stats.FKCheckTime = time.Second + require.Equal(t, "prepare: 6s, check_insert: {total_time: 4s, mem_insert_time: 2s, prefetch: 2s, fk_check: 1s}", stats.String()) +} + +func TestDuplicateEntryMessage(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + for _, enable := range []variable.ClusteredIndexDefMode{variable.ClusteredIndexDefModeOn, variable.ClusteredIndexDefModeOff, variable.ClusteredIndexDefModeIntOnly} { + tk.Session().GetSessionVars().EnableClusteredIndex = enable + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int, b char(10), unique key(b)) collate utf8mb4_general_ci;") + tk.MustExec("insert into t value (34, '12Ak');") + tk.MustGetErrMsg("insert into t value (34, '12Ak');", "[kv:1062]Duplicate entry '12Ak' for key 't.b'") + + tk.MustExec("begin optimistic;") + tk.MustExec("insert into t value (34, '12ak');") + tk.MustExec("delete from t where b = '12ak';") + tk.MustGetErrMsg("commit;", "previous statement: delete from t where b = '12ak';: [kv:1062]Duplicate entry '12ak' for key 't.b'") + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a datetime primary key);") + tk.MustExec("insert into t values ('2020-01-01');") + tk.MustGetErrMsg("insert into t values ('2020-01-01');", "[kv:1062]Duplicate entry '2020-01-01 00:00:00' for key 't.PRIMARY'") + + tk.MustExec("begin optimistic;") + tk.MustExec("insert into t values ('2020-01-01');") + tk.MustExec("delete from t where a = '2020-01-01';") + tk.MustGetErrMsg("commit;", "previous statement: delete from t where a = '2020-01-01';: [kv:1062]Duplicate entry '2020-01-01 00:00:00' for key 't.PRIMARY'") + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int primary key );") + tk.MustExec("insert into t value (1);") + tk.MustGetErrMsg("insert into t value (1);", "[kv:1062]Duplicate entry '1' for key 't.PRIMARY'") + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a datetime unique);") + tk.MustExec("insert into t values ('2020-01-01');") + tk.MustGetErrMsg("insert into t values ('2020-01-01');", "[kv:1062]Duplicate entry '2020-01-01 00:00:00' for key 't.a'") + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a datetime, b int, c varchar(10), primary key (a, b, c)) collate utf8mb4_general_ci;") + tk.MustExec("insert into t values ('2020-01-01', 1, 'aSDd');") + tk.MustGetErrMsg("insert into t values ('2020-01-01', 1, 'ASDD');", "[kv:1062]Duplicate entry '2020-01-01 00:00:00-1-ASDD' for key 't.PRIMARY'") + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a datetime, b int, c varchar(10), unique key (a, b, c)) collate utf8mb4_general_ci;") + tk.MustExec("insert into t values ('2020-01-01', 1, 'aSDd');") + tk.MustGetErrMsg("insert into t values ('2020-01-01', 1, 'ASDD');", "[kv:1062]Duplicate entry '2020-01-01 00:00:00-1-ASDD' for key 't.a'") + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a char(10) collate utf8mb4_unicode_ci, b char(20) collate utf8mb4_general_ci, c int(11), primary key (a, b, c), unique key (a));") + tk.MustExec("insert ignore into t values ('$', 'C', 10);") + tk.MustExec("insert ignore into t values ('$', 'C', 10);") + tk.MustQuery("show warnings;").Check(testkit.RowsWithSep("|", "Warning|1062|Duplicate entry '$-C-10' for key 't.PRIMARY'")) + + tk.MustExec("begin pessimistic;") + tk.MustExec("insert into t values ('a7', 'a', 10);") + tk.MustGetErrMsg("insert into t values ('a7', 'a', 10);", "[kv:1062]Duplicate entry 'a7-a-10' for key 't.PRIMARY'") + tk.MustExec("rollback;") + + // Test for large unsigned integer handle. + // See https://github.com/pingcap/tidb/issues/12420. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a bigint unsigned primary key);") + tk.MustExec("insert into t values(18446744073709551615);") + tk.MustGetErrMsg("insert into t values(18446744073709551615);", "[kv:1062]Duplicate entry '18446744073709551615' for key 't.PRIMARY'") + } +} + +func TestGlobalTempTableParallel(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec(`use test`) + tk.MustExec("drop table if exists temp_test") + tk.MustExec("create global temporary table temp_test(id int primary key auto_increment) on commit delete rows") + defer tk.MustExec("drop table if exists temp_test") + + threads := 8 + loops := 1 + var wg util.WaitGroupWrapper + + insertFunc := func() { + newTk := testkit.NewTestKit(t, store) + newTk.MustExec("use test") + newTk.MustExec("begin") + for i := 0; i < loops; i++ { + newTk.MustExec("insert temp_test value(0)") + newTk.MustExec("insert temp_test value(0), (0)") + } + maxID := strconv.Itoa(loops * 3) + newTk.MustQuery("select max(id) from temp_test").Check(testkit.Rows(maxID)) + newTk.MustExec("commit") + } + + for i := 0; i < threads; i++ { + wg.Run(insertFunc) + } + wg.Wait() +} + +func TestInsertLockUnchangedKeys(t *testing.T) { + store := testkit.CreateMockStore(t) + tk1 := testkit.NewTestKit(t, store) + tk2 := testkit.NewTestKit(t, store) + tk1.MustExec("use test") + tk2.MustExec("use test") + + for _, shouldLock := range []bool{false} { + for _, tt := range []struct { + name string + ddl string + dml string + isClusteredPK bool + }{ + { + "replace-pk", + "create table t (c int primary key clustered)", + "replace into t values (1)", + true, + }, + { + "replace-uk", + "create table t (c int unique key)", + "replace into t values (1)", + false, + }, + { + "insert-ignore-pk", + "create table t (c int primary key clustered)", + "insert ignore into t values (1)", + true, + }, + { + "insert-ignore-uk", + "create table t (c int unique key)", + "insert ignore into t values (1)", + false, + }, + { + "insert-update-pk", + "create table t (c int primary key clustered)", + "insert into t values (1) on duplicate key update c = values(c)", + true, + }, + { + "insert-update-uk", + "create table t (c int unique key)", + "insert into t values (1) on duplicate key update c = values(c)", + false, + }, + } { + t.Run( + tt.name+"-"+strconv.FormatBool(shouldLock), func(t *testing.T) { + tk1.MustExec(fmt.Sprintf("set @@tidb_lock_unchanged_keys = %v", shouldLock)) + tk1.MustExec("drop table if exists t") + tk1.MustExec(tt.ddl) + tk1.MustExec("insert into t values (1)") + tk1.MustExec("begin") + tk1.MustExec(tt.dml) + errCh := make(chan error) + go func() { + _, err := tk2.Exec("insert into t values (1)") + errCh <- err + }() + select { + case <-errCh: + if shouldLock { + require.Failf(t, "txn2 is not blocked by %q", tt.dml) + } + close(errCh) + case <-time.After(200 * time.Millisecond): + if !shouldLock && !tt.isClusteredPK { + require.Failf(t, "txn2 is blocked by %q", tt.dml) + } + } + tk1.MustExec("commit") + <-errCh + tk1.MustQuery("select * from t").Check(testkit.Rows("1")) + }, + ) + } + } +} + +func TestMySQLInsertID(t *testing.T) { + // mysql_insert_id() differs from LAST_INSERT_ID() + // See https://github.com/pingcap/tidb/issues/55965 + // mysql_insert_id() is got from tk.Session().LastInsertID() + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec(`use test`) + tk.MustExec("drop table if exists tb") + tk.MustExec("create table tb(pk int primary key auto_increment, a int, b int, unique(a))") + defer tk.MustExec("drop table if exists tb") + + tk.MustExec("insert into tb (a, b) values (1, 1) on duplicate key update b = values(b)") + require.Equal(t, tk.Session().LastInsertID(), uint64(1)) + + tk.MustExec("insert into tb (a, b) values (2, 2) on duplicate key update b = values(b)") + require.Equal(t, tk.Session().LastInsertID(), uint64(2)) + + // If there is an AUTO_INCREMENT column in the table and there were some explicit successfully + // inserted values or some updated values, return the last of the inserted or updated values. + // Ref https://dev.mysql.com/doc/c-api/5.7/en/mysql-insert-id.html#:~:text=When%20called%20after%20an%20INSERT%20...%20ON,of%20the%20inserted%20or%20updated%20values + tk.MustExec("insert into tb (a, b) values (1, 2) on duplicate key update b = values(b)") + require.Equal(t, tk.Session().LastInsertID(), uint64(1)) + tk.MustQuery("select LAST_INSERT_ID()").Check(testkit.Rows("2")) + + tk.MustQuery("select * from tb").Sort().Check(testkit.Rows("1 1 2", "2 2 2")) + + // When the new row and the old row are exactly the same (no inserted or updated values), mysql_insert_id() is 0 + tk.MustExec("insert into tb (a, b) values (1, 2) on duplicate key update b = 2") + require.Equal(t, tk.Session().LastInsertID(), uint64(0)) + tk.MustQuery("select LAST_INSERT_ID()").Check(testkit.Rows("2")) + + // When the value of auto increment column is assigned explicitly, LAST_INSERT_ID() is unchanged. + // mysql_insert_id() is set to the explicit assigned value. + tk.MustExec("insert into tb values (6, 6, 6)") + require.Equal(t, tk.Session().LastInsertID(), uint64(6)) + tk.MustQuery("select LAST_INSERT_ID()").Check(testkit.Rows("2")) + + // Update statement touches neigher mysql_insert_id() nor LAST_INSERT_ID() + tk.MustExec("update tb set b = 7, pk = pk + 1 where b = 6") + require.Equal(t, tk.Session().LastInsertID(), uint64(0)) + tk.MustQuery("select LAST_INSERT_ID()").Check(testkit.Rows("2")) + + // How to distinguish LAST_INSERT_ID() and mysql_insert_id()? + // In a word, LAST_INSERT_ID() is always get from auto allocated value, while mysql_insert_id() can be + // auto allocated or explicited specified. + + // Another scenario mentioned by @lcwangcao + // What's the behaviour when transaction conflict involved? + tk.MustExec("truncate table tb") + tk.MustExec("insert into tb (a, b) values (1, 1), (2, 2)") + + tk1 := testkit.NewTestKit(t, store) + tk1.MustExec("use test") + tk1.MustExec("begin") + tk1.MustExec("update tb set b = 2 where a = 1") + go func() { + time.Sleep(100 * time.Millisecond) + tk1.MustExec("commit") + }() + // The first time this will update one row. + // Then transaction conflict and retry, in the second time it modify nothing. + tk.MustExec("insert into tb(a, b) values(1,2) on duplicate key update b = 2;") + require.Equal(t, tk.Session().LastInsertID(), uint64(0)) +} + +func TestInsertNullInNonStrictMode(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (id int primary key, col1 varchar(10) not null default '')") + tk.MustExec("create table t2 (id int primary key, col1 varchar(10))") + tk.MustExec("insert into t2 values (1, null)") + tk.MustExec("insert ignore into t1 values(5, null)") + + tk.MustExec("set session sql_mode = ''") + + err := tk.ExecToErr("insert into t1 values(1, null)") + require.EqualError(t, err, table.ErrColumnCantNull.GenWithStackByArgs("col1").Error()) + + err = tk.ExecToErr("insert into t1 set id = 1, col1 = null") + require.EqualError(t, err, table.ErrColumnCantNull.GenWithStackByArgs("col1").Error()) + + err = tk.ExecToErr("insert t1 VALUES (5, 5) ON DUPLICATE KEY UPDATE col1 = null") + require.EqualError(t, err, table.ErrColumnCantNull.GenWithStackByArgs("col1").Error()) + + tk.MustExec("insert into t1 select * from t2") + tk.MustExec("insert into t1 values(2, null), (3, 3), (4, 4)") + tk.MustExec("update t1 set col1 = null where id = 3") + tk.MustExec("insert ignore t1 VALUES (4, 4) ON DUPLICATE KEY UPDATE col1 = null") + tk.MustQuery("select * from t1").Check(testkit.RowsWithSep("|", "1|", "2|", "3|", "4|", "5|")) +} diff --git a/pkg/executor/load_data.go b/pkg/executor/load_data.go new file mode 100644 index 0000000000000..e034098518532 --- /dev/null +++ b/pkg/executor/load_data.go @@ -0,0 +1,786 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "fmt" + "io" + "math" + "strings" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/executor/importer" + "github.com/pingcap/tidb/pkg/executor/internal/exec" + "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/lightning/mydump" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/parser/terror" + plannercore "github.com/pingcap/tidb/pkg/planner/core" + "github.com/pingcap/tidb/pkg/sessionctx" + "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" + "github.com/pingcap/tidb/pkg/sessiontxn" + "github.com/pingcap/tidb/pkg/table" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/chunk" + contextutil "github.com/pingcap/tidb/pkg/util/context" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" + "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlkiller" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +// LoadDataVarKey is a variable key for load data. +const LoadDataVarKey loadDataVarKeyType = 0 + +// LoadDataReaderBuilderKey stores the reader channel that reads from the connection. +const LoadDataReaderBuilderKey loadDataVarKeyType = 1 + +var ( + taskQueueSize = 16 // the maximum number of pending tasks to commit in queue +) + +// LoadDataReaderBuilder is a function type that builds a reader from a file path. +type LoadDataReaderBuilder func(filepath string) ( + r io.ReadCloser, err error, +) + +// LoadDataExec represents a load data executor. +type LoadDataExec struct { + exec.BaseExecutor + + FileLocRef ast.FileLocRefTp + loadDataWorker *LoadDataWorker + + // fields for loading local file + infileReader io.ReadCloser +} + +// Open implements the Executor interface. +func (e *LoadDataExec) Open(_ context.Context) error { + if rb, ok := e.Ctx().Value(LoadDataReaderBuilderKey).(LoadDataReaderBuilder); ok { + var err error + e.infileReader, err = rb(e.loadDataWorker.GetInfilePath()) + if err != nil { + return err + } + } + return nil +} + +// Close implements the Executor interface. +func (e *LoadDataExec) Close() error { + return e.closeLocalReader(nil) +} + +func (e *LoadDataExec) closeLocalReader(originalErr error) error { + err := originalErr + if e.infileReader != nil { + if err2 := e.infileReader.Close(); err2 != nil { + logutil.BgLogger().Error( + "close local reader failed", zap.Error(err2), + zap.NamedError("original error", originalErr), + ) + if err == nil { + err = err2 + } + } + e.infileReader = nil + } + return err +} + +// Next implements the Executor Next interface. +func (e *LoadDataExec) Next(ctx context.Context, _ *chunk.Chunk) (err error) { + switch e.FileLocRef { + case ast.FileLocServerOrRemote: + return e.loadDataWorker.loadRemote(ctx) + case ast.FileLocClient: + // This is for legacy test only + // TODO: adjust tests to remove LoadDataVarKey + sctx := e.loadDataWorker.UserSctx + sctx.SetValue(LoadDataVarKey, e.loadDataWorker) + + err = e.loadDataWorker.LoadLocal(ctx, e.infileReader) + if err != nil { + logutil.Logger(ctx).Error("load local data failed", zap.Error(err)) + err = e.closeLocalReader(err) + return err + } + } + return nil +} + +type planInfo struct { + ID int + Columns []*ast.ColumnName + GenColExprs []expression.Expression +} + +// LoadDataWorker does a LOAD DATA job. +type LoadDataWorker struct { + UserSctx sessionctx.Context + + controller *importer.LoadDataController + planInfo planInfo + + table table.Table +} + +func setNonRestrictiveFlags(stmtCtx *stmtctx.StatementContext) { + // TODO: DupKeyAsWarning represents too many "ignore error" paths, the + // meaning of this flag is not clear. I can only reuse it here. + levels := stmtCtx.ErrLevels() + levels[errctx.ErrGroupDupKey] = errctx.LevelWarn + levels[errctx.ErrGroupBadNull] = errctx.LevelWarn + levels[errctx.ErrGroupNoDefault] = errctx.LevelWarn + stmtCtx.SetErrLevels(levels) + stmtCtx.SetTypeFlags(stmtCtx.TypeFlags().WithTruncateAsWarning(true)) +} + +// NewLoadDataWorker creates a new LoadDataWorker that is ready to work. +func NewLoadDataWorker( + userSctx sessionctx.Context, + plan *plannercore.LoadData, + tbl table.Table, +) (w *LoadDataWorker, err error) { + importPlan, err := importer.NewPlanFromLoadDataPlan(userSctx, plan) + if err != nil { + return nil, err + } + astArgs := importer.ASTArgsFromPlan(plan) + controller, err := importer.NewLoadDataController(importPlan, tbl, astArgs) + if err != nil { + return nil, err + } + + if !controller.Restrictive { + setNonRestrictiveFlags(userSctx.GetSessionVars().StmtCtx) + } + + loadDataWorker := &LoadDataWorker{ + UserSctx: userSctx, + table: tbl, + controller: controller, + planInfo: planInfo{ + ID: plan.ID(), + Columns: plan.Columns, + GenColExprs: plan.GenCols.Exprs, + }, + } + return loadDataWorker, nil +} + +func (e *LoadDataWorker) loadRemote(ctx context.Context) error { + if err2 := e.controller.InitDataFiles(ctx); err2 != nil { + return err2 + } + return e.load(ctx, e.controller.GetLoadDataReaderInfos()) +} + +// LoadLocal reads from client connection and do load data job. +func (e *LoadDataWorker) LoadLocal(ctx context.Context, r io.ReadCloser) error { + if r == nil { + return errors.New("load local data, reader is nil") + } + + compressTp := mydump.ParseCompressionOnFileExtension(e.GetInfilePath()) + compressTp2, err := mydump.ToStorageCompressType(compressTp) + if err != nil { + return err + } + readers := []importer.LoadDataReaderInfo{{ + Opener: func(_ context.Context) (io.ReadSeekCloser, error) { + addedSeekReader := NewSimpleSeekerOnReadCloser(r) + return storage.InterceptDecompressReader(addedSeekReader, compressTp2, storage.DecompressConfig{ + ZStdDecodeConcurrency: 1, + }) + }}} + return e.load(ctx, readers) +} + +func (e *LoadDataWorker) load(ctx context.Context, readerInfos []importer.LoadDataReaderInfo) error { + group, groupCtx := errgroup.WithContext(ctx) + + encoder, committer, err := initEncodeCommitWorkers(e) + if err != nil { + return err + } + + // main goroutine -> readerInfoCh -> processOneStream goroutines + readerInfoCh := make(chan importer.LoadDataReaderInfo, 1) + // processOneStream goroutines -> commitTaskCh -> commitWork goroutines + commitTaskCh := make(chan commitTask, taskQueueSize) + // commitWork goroutines -> done -> UpdateJobProgress goroutine + + // processOneStream goroutines. + group.Go(func() error { + err2 := encoder.processStream(groupCtx, readerInfoCh, commitTaskCh) + if err2 == nil { + close(commitTaskCh) + } + return err2 + }) + // commitWork goroutines. + group.Go(func() error { + failpoint.Inject("BeforeCommitWork", nil) + return committer.commitWork(groupCtx, commitTaskCh) + }) + +sendReaderInfoLoop: + for _, info := range readerInfos { + select { + case <-groupCtx.Done(): + break sendReaderInfoLoop + case readerInfoCh <- info: + } + } + close(readerInfoCh) + err = group.Wait() + e.setResult(encoder.exprWarnings) + return err +} + +func (e *LoadDataWorker) setResult(colAssignExprWarnings []contextutil.SQLWarn) { + stmtCtx := e.UserSctx.GetSessionVars().StmtCtx + numWarnings := uint64(stmtCtx.WarningCount()) + numRecords := stmtCtx.RecordRows() + numDeletes := stmtCtx.DeletedRows() + numSkipped := stmtCtx.RecordRows() - stmtCtx.CopiedRows() + + // col assign expr warnings is generated during init, it's static + // we need to generate it for each row processed. + numWarnings += numRecords * uint64(len(colAssignExprWarnings)) + + if numWarnings > math.MaxUint16 { + numWarnings = math.MaxUint16 + } + + msg := fmt.Sprintf(mysql.MySQLErrName[mysql.ErrLoadInfo].Raw, numRecords, numDeletes, numSkipped, numWarnings) + warns := make([]contextutil.SQLWarn, numWarnings) + n := copy(warns, stmtCtx.GetWarnings()) + for i := 0; i < int(numRecords) && n < len(warns); i++ { + n += copy(warns[n:], colAssignExprWarnings) + } + + stmtCtx.SetMessage(msg) + stmtCtx.SetWarnings(warns) +} + +func initEncodeCommitWorkers(e *LoadDataWorker) (*encodeWorker, *commitWorker, error) { + insertValues, err2 := createInsertValues(e) + if err2 != nil { + return nil, nil, err2 + } + colAssignExprs, exprWarnings, err2 := e.controller.CreateColAssignExprs(insertValues.Ctx().GetPlanCtx()) + if err2 != nil { + return nil, nil, err2 + } + enc := &encodeWorker{ + InsertValues: insertValues, + controller: e.controller, + colAssignExprs: colAssignExprs, + exprWarnings: exprWarnings, + killer: &e.UserSctx.GetSessionVars().SQLKiller, + } + enc.resetBatch() + com := &commitWorker{ + InsertValues: insertValues, + controller: e.controller, + } + return enc, com, nil +} + +// createInsertValues creates InsertValues from userSctx. +func createInsertValues(e *LoadDataWorker) (insertVal *InsertValues, err error) { + insertColumns := e.controller.InsertColumns + hasExtraHandle := false + for _, col := range insertColumns { + if col.Name.L == model.ExtraHandleName.L { + if !e.UserSctx.GetSessionVars().AllowWriteRowID { + return nil, errors.Errorf("load data statement for _tidb_rowid are not supported") + } + hasExtraHandle = true + break + } + } + ret := &InsertValues{ + BaseExecutor: exec.NewBaseExecutor(e.UserSctx, nil, e.planInfo.ID), + Table: e.table, + Columns: e.planInfo.Columns, + GenExprs: e.planInfo.GenColExprs, + maxRowsInBatch: 1000, + insertColumns: insertColumns, + rowLen: len(insertColumns), + hasExtraHandle: hasExtraHandle, + } + if len(insertColumns) > 0 { + ret.initEvalBuffer() + } + ret.collectRuntimeStatsEnabled() + return ret, nil +} + +// encodeWorker is a sub-worker of LoadDataWorker that dedicated to encode data. +type encodeWorker struct { + *InsertValues + controller *importer.LoadDataController + colAssignExprs []expression.Expression + // sessionCtx generate warnings when rewrite AST node into expression. + // we should generate such warnings for each row encoded. + exprWarnings []contextutil.SQLWarn + killer *sqlkiller.SQLKiller + rows [][]types.Datum +} + +// commitTask is used for passing data from processStream goroutine to commitWork goroutine. +type commitTask struct { + cnt uint64 + rows [][]types.Datum +} + +// processStream always tries to build a parser from channel and process it. When +// it returns nil, it means all data is read. +func (w *encodeWorker) processStream( + ctx context.Context, + inCh <-chan importer.LoadDataReaderInfo, + outCh chan<- commitTask, +) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case readerInfo, ok := <-inCh: + if !ok { + return nil + } + dataParser, err := w.controller.GetParser(ctx, readerInfo) + if err != nil { + return err + } + if err = w.controller.HandleSkipNRows(dataParser); err != nil { + return err + } + err = w.processOneStream(ctx, dataParser, outCh) + terror.Log(dataParser.Close()) + if err != nil { + return err + } + } + } +} + +// processOneStream process input stream from parser. When returns nil, it means +// all data is read. +func (w *encodeWorker) processOneStream( + ctx context.Context, + parser mydump.Parser, + outCh chan<- commitTask, +) (err error) { + defer func() { + r := recover() + if r != nil { + logutil.Logger(ctx).Error("process routine panicked", + zap.Any("r", r), + zap.Stack("stack")) + err = util.GetRecoverError(r) + } + }() + + checkKilled := time.NewTicker(30 * time.Second) + defer checkKilled.Stop() + + for { + // prepare batch and enqueue task + if err = w.readOneBatchRows(ctx, parser); err != nil { + return + } + if w.curBatchCnt == 0 { + return + } + + TrySendTask: + select { + case <-ctx.Done(): + return ctx.Err() + case <-checkKilled.C: + if err := w.killer.HandleSignal(); err != nil { + logutil.Logger(ctx).Info("load data query interrupted quit data processing") + return err + } + goto TrySendTask + case outCh <- commitTask{ + cnt: w.curBatchCnt, + rows: w.rows, + }: + } + // reset rows buffer, will reallocate buffer but NOT reuse + w.resetBatch() + } +} + +func (w *encodeWorker) resetBatch() { + w.rows = make([][]types.Datum, 0, w.maxRowsInBatch) + w.curBatchCnt = 0 +} + +// readOneBatchRows reads rows from parser. When parser's reader meet EOF, it +// will return nil. For other errors it will return directly. When the rows +// batch is full it will also return nil. +// The result rows are saved in w.rows and update some members, caller can check +// if curBatchCnt == 0 to know if reached EOF. +func (w *encodeWorker) readOneBatchRows(ctx context.Context, parser mydump.Parser) error { + for { + if err := parser.ReadRow(); err != nil { + if errors.Cause(err) == io.EOF { + return nil + } + return exeerrors.ErrLoadDataCantRead.GenWithStackByArgs( + err.Error(), + "Only the following formats delimited text file (csv, tsv), parquet, sql are supported. Please provide the valid source file(s)", + ) + } + // rowCount will be used in fillRow(), last insert ID will be assigned according to the rowCount = 1. + // So should add first here. + w.rowCount++ + r, err := w.parserData2TableData(ctx, parser.LastRow().Row) + if err != nil { + return err + } + parser.RecycleRow(parser.LastRow()) + w.rows = append(w.rows, r) + w.curBatchCnt++ + if w.maxRowsInBatch != 0 && w.rowCount%w.maxRowsInBatch == 0 { + logutil.Logger(ctx).Info("batch limit hit when inserting rows", zap.Int("maxBatchRows", w.MaxChunkSize()), + zap.Uint64("totalRows", w.rowCount)) + return nil + } + } +} + +// parserData2TableData encodes the data of parser output. +func (w *encodeWorker) parserData2TableData( + ctx context.Context, + parserData []types.Datum, +) ([]types.Datum, error) { + var errColNumMismatch error + switch { + case len(parserData) < w.controller.GetFieldCount(): + errColNumMismatch = exeerrors.ErrWarnTooFewRecords.GenWithStackByArgs(w.rowCount) + case len(parserData) > w.controller.GetFieldCount(): + errColNumMismatch = exeerrors.ErrWarnTooManyRecords.GenWithStackByArgs(w.rowCount) + } + + if errColNumMismatch != nil { + if w.controller.Restrictive { + return nil, errColNumMismatch + } + w.handleWarning(errColNumMismatch) + } + + row := make([]types.Datum, 0, len(w.insertColumns)) + sessionVars := w.Ctx().GetSessionVars() + setVar := func(name string, col *types.Datum) { + // User variable names are not case-sensitive + // https://dev.mysql.com/doc/refman/8.0/en/user-variables.html + name = strings.ToLower(name) + if col == nil || col.IsNull() { + sessionVars.UnsetUserVar(name) + } else { + sessionVars.SetUserVarVal(name, *col) + } + } + + fieldMappings := w.controller.FieldMappings + for i := 0; i < len(fieldMappings); i++ { + if i >= len(parserData) { + if fieldMappings[i].Column == nil { + setVar(fieldMappings[i].UserVar.Name, nil) + continue + } + + // If some columns is missing and their type is time and has not null flag, they should be set as current time. + if types.IsTypeTime(fieldMappings[i].Column.GetType()) && mysql.HasNotNullFlag(fieldMappings[i].Column.GetFlag()) { + row = append(row, types.NewTimeDatum(types.CurrentTime(fieldMappings[i].Column.GetType()))) + continue + } + + row = append(row, types.NewDatum(nil)) + continue + } + + if fieldMappings[i].Column == nil { + setVar(fieldMappings[i].UserVar.Name, &parserData[i]) + continue + } + + // Don't set the value for generated columns. + if fieldMappings[i].Column.IsGenerated() { + row = append(row, types.NewDatum(nil)) + continue + } + + row = append(row, parserData[i]) + } + for i := 0; i < len(w.colAssignExprs); i++ { + // eval expression of `SET` clause + d, err := w.colAssignExprs[i].Eval(w.Ctx().GetExprCtx().GetEvalCtx(), chunk.Row{}) + if err != nil { + if w.controller.Restrictive { + return nil, err + } + w.handleWarning(err) + } + row = append(row, d) + } + + // a new row buffer will be allocated in getRow + newRow, err := w.getRow(ctx, row) + if err != nil { + if w.controller.Restrictive { + return nil, err + } + w.handleWarning(err) + logutil.Logger(ctx).Error("failed to get row", zap.Error(err)) + // TODO: should not return nil! caller will panic when lookup index + return nil, nil + } + + return newRow, nil +} + +// commitWorker is a sub-worker of LoadDataWorker that dedicated to commit data. +type commitWorker struct { + *InsertValues + controller *importer.LoadDataController +} + +// commitWork commit batch sequentially. When returns nil, it means the job is +// finished. +func (w *commitWorker) commitWork(ctx context.Context, inCh <-chan commitTask) (err error) { + defer func() { + r := recover() + if r != nil { + logutil.Logger(ctx).Error("commitWork panicked", + zap.Any("r", r), + zap.Stack("stack")) + err = util.GetRecoverError(r) + } + }() + + var ( + taskCnt uint64 + ) + for { + select { + case <-ctx.Done(): + return ctx.Err() + case task, ok := <-inCh: + if !ok { + return nil + } + start := time.Now() + if err = w.commitOneTask(ctx, task); err != nil { + return err + } + taskCnt++ + logutil.Logger(ctx).Info("commit one task success", + zap.Duration("commit time usage", time.Since(start)), + zap.Uint64("keys processed", task.cnt), + zap.Uint64("taskCnt processed", taskCnt), + ) + } + } +} + +// commitOneTask insert Data from LoadDataWorker.rows, then commit the modification +// like a statement. +func (w *commitWorker) commitOneTask(ctx context.Context, task commitTask) error { + err := w.checkAndInsertOneBatch(ctx, task.rows, task.cnt) + if err != nil { + logutil.Logger(ctx).Error("commit error CheckAndInsert", zap.Error(err)) + return err + } + failpoint.Inject("commitOneTaskErr", func() { + failpoint.Return(errors.New("mock commit one task error")) + }) + return nil +} + +func (w *commitWorker) checkAndInsertOneBatch(ctx context.Context, rows [][]types.Datum, cnt uint64) error { + if w.stats != nil && w.stats.BasicRuntimeStats != nil { + // Since this method will not call by executor Next, + // so we need record the basic executor runtime stats by ourselves. + start := time.Now() + defer func() { + w.stats.BasicRuntimeStats.Record(time.Since(start), 0) + }() + } + var err error + if cnt == 0 { + return err + } + w.Ctx().GetSessionVars().StmtCtx.AddRecordRows(cnt) + + switch w.controller.OnDuplicate { + case ast.OnDuplicateKeyHandlingReplace: + return w.batchCheckAndInsert(ctx, rows[0:cnt], w.addRecordLD, true) + case ast.OnDuplicateKeyHandlingIgnore: + return w.batchCheckAndInsert(ctx, rows[0:cnt], w.addRecordLD, false) + case ast.OnDuplicateKeyHandlingError: + txn, err := w.Ctx().Txn(true) + if err != nil { + return err + } + dupKeyCheck := optimizeDupKeyCheckForNormalInsert(w.Ctx().GetSessionVars(), txn) + for i, row := range rows[0:cnt] { + sizeHintStep := int(w.Ctx().GetSessionVars().ShardAllocateStep) + if sizeHintStep > 0 && i%sizeHintStep == 0 { + sizeHint := sizeHintStep + remain := len(rows[0:cnt]) - i + if sizeHint > remain { + sizeHint = remain + } + err = w.addRecordWithAutoIDHint(ctx, row, sizeHint, dupKeyCheck) + } else { + err = w.addRecord(ctx, row, dupKeyCheck) + } + if err != nil { + return err + } + w.Ctx().GetSessionVars().StmtCtx.AddCopiedRows(1) + } + return nil + default: + return errors.Errorf("unknown on duplicate key handling: %v", w.controller.OnDuplicate) + } +} + +func (w *commitWorker) addRecordLD(ctx context.Context, row []types.Datum, dupKeyCheck table.DupKeyCheckMode) error { + if row == nil { + return nil + } + return w.addRecord(ctx, row, dupKeyCheck) +} + +// GetInfilePath get infile path. +func (e *LoadDataWorker) GetInfilePath() string { + return e.controller.Path +} + +// GetController get load data controller. +// used in unit test. +func (e *LoadDataWorker) GetController() *importer.LoadDataController { + return e.controller +} + +// TestLoadLocal is a helper function for unit test. +func (e *LoadDataWorker) TestLoadLocal(parser mydump.Parser) error { + if err := ResetContextOfStmt(e.UserSctx, &ast.LoadDataStmt{}); err != nil { + return err + } + setNonRestrictiveFlags(e.UserSctx.GetSessionVars().StmtCtx) + encoder, committer, err := initEncodeCommitWorkers(e) + if err != nil { + return err + } + + ctx := context.Background() + err = sessiontxn.NewTxn(ctx, e.UserSctx) + if err != nil { + return err + } + + for i := uint64(0); i < e.controller.IgnoreLines; i++ { + //nolint: errcheck + _ = parser.ReadRow() + } + + err = encoder.readOneBatchRows(ctx, parser) + if err != nil { + return err + } + + err = committer.checkAndInsertOneBatch( + ctx, + encoder.rows, + encoder.curBatchCnt) + if err != nil { + return err + } + encoder.resetBatch() + committer.Ctx().StmtCommit(ctx) + err = committer.Ctx().CommitTxn(ctx) + if err != nil { + return err + } + e.setResult(encoder.exprWarnings) + return nil +} + +var _ io.ReadSeekCloser = (*SimpleSeekerOnReadCloser)(nil) + +// SimpleSeekerOnReadCloser provides Seek(0, SeekCurrent) on ReadCloser. +type SimpleSeekerOnReadCloser struct { + r io.ReadCloser + pos int +} + +// NewSimpleSeekerOnReadCloser creates a SimpleSeekerOnReadCloser. +func NewSimpleSeekerOnReadCloser(r io.ReadCloser) *SimpleSeekerOnReadCloser { + return &SimpleSeekerOnReadCloser{r: r} +} + +// Read implements io.Reader. +func (s *SimpleSeekerOnReadCloser) Read(p []byte) (n int, err error) { + n, err = s.r.Read(p) + s.pos += n + return +} + +// Seek implements io.Seeker. +func (s *SimpleSeekerOnReadCloser) Seek(offset int64, whence int) (int64, error) { + // only support get reader's current offset + if offset == 0 && whence == io.SeekCurrent { + return int64(s.pos), nil + } + return 0, errors.Errorf("unsupported seek on SimpleSeekerOnReadCloser, offset: %d whence: %d", offset, whence) +} + +// Close implements io.Closer. +func (s *SimpleSeekerOnReadCloser) Close() error { + return s.r.Close() +} + +// GetFileSize implements storage.ExternalFileReader. +func (*SimpleSeekerOnReadCloser) GetFileSize() (int64, error) { + return 0, errors.Errorf("unsupported GetFileSize on SimpleSeekerOnReadCloser") +} + +// loadDataVarKeyType is a dummy type to avoid naming collision in context. +type loadDataVarKeyType int + +// String defines a Stringer function for debugging and pretty printing. +func (loadDataVarKeyType) String() string { + return "load_data_var" +} diff --git a/pkg/executor/select.go b/pkg/executor/select.go new file mode 100644 index 0000000000000..c27b7e498047c --- /dev/null +++ b/pkg/executor/select.go @@ -0,0 +1,1273 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + stderrors "errors" + "runtime/pprof" + "strings" + "sync/atomic" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/pkg/ddl/schematracker" + "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/executor/aggregate" + "github.com/pingcap/tidb/pkg/executor/internal/exec" + "github.com/pingcap/tidb/pkg/executor/internal/pdhelper" + "github.com/pingcap/tidb/pkg/executor/sortexec" + "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/parser/terror" + plannercore "github.com/pingcap/tidb/pkg/planner/core" + "github.com/pingcap/tidb/pkg/planner/core/base" + "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" + "github.com/pingcap/tidb/pkg/planner/planctx" + plannerutil "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/fixcontrol" + "github.com/pingcap/tidb/pkg/sessionctx" + "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/sessiontxn" + "github.com/pingcap/tidb/pkg/table" + "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/chunk" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" + "github.com/pingcap/tidb/pkg/util/deadlockhistory" + "github.com/pingcap/tidb/pkg/util/disk" + "github.com/pingcap/tidb/pkg/util/execdetails" + "github.com/pingcap/tidb/pkg/util/intest" + "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/syncutil" + "github.com/pingcap/tidb/pkg/util/topsql" + topsqlstate "github.com/pingcap/tidb/pkg/util/topsql/state" + "github.com/pingcap/tidb/pkg/util/tracing" + tikverr "github.com/tikv/client-go/v2/error" + tikvstore "github.com/tikv/client-go/v2/kv" + tikvutil "github.com/tikv/client-go/v2/util" + "go.uber.org/zap" +) + +var ( + _ exec.Executor = &aggregate.HashAggExec{} + _ exec.Executor = &IndexLookUpExecutor{} + _ exec.Executor = &IndexReaderExecutor{} + _ exec.Executor = &LimitExec{} + _ exec.Executor = &MaxOneRowExec{} + _ exec.Executor = &ProjectionExec{} + _ exec.Executor = &SelectionExec{} + _ exec.Executor = &SelectLockExec{} + _ exec.Executor = &sortexec.SortExec{} + _ exec.Executor = &aggregate.StreamAggExec{} + _ exec.Executor = &TableDualExec{} + _ exec.Executor = &TableReaderExecutor{} + _ exec.Executor = &TableScanExec{} + _ exec.Executor = &sortexec.TopNExec{} + + // GlobalMemoryUsageTracker is the ancestor of all the Executors' memory tracker and GlobalMemory Tracker + GlobalMemoryUsageTracker *memory.Tracker + // GlobalDiskUsageTracker is the ancestor of all the Executors' disk tracker + GlobalDiskUsageTracker *disk.Tracker + // GlobalAnalyzeMemoryTracker is the ancestor of all the Analyze jobs' memory tracker and child of global Tracker + GlobalAnalyzeMemoryTracker *memory.Tracker +) + +var ( + _ dataSourceExecutor = &TableReaderExecutor{} + _ dataSourceExecutor = &IndexReaderExecutor{} + _ dataSourceExecutor = &IndexLookUpExecutor{} + _ dataSourceExecutor = &IndexMergeReaderExecutor{} + + // CheckTableFastBucketSize is the bucket size of fast check table. + CheckTableFastBucketSize = atomic.Int64{} +) + +// dataSourceExecutor is a table DataSource converted Executor. +// Currently, there are TableReader/IndexReader/IndexLookUp/IndexMergeReader. +// Note, partition reader is special and the caller should handle it carefully. +type dataSourceExecutor interface { + exec.Executor + Table() table.Table +} + +const ( + // globalPanicStorageExceed represents the panic message when out of storage quota. + globalPanicStorageExceed string = "Out Of Quota For Local Temporary Space!" + // globalPanicMemoryExceed represents the panic message when out of memory limit. + globalPanicMemoryExceed string = "Out Of Global Memory Limit!" + // globalPanicAnalyzeMemoryExceed represents the panic message when out of analyze memory limit. + globalPanicAnalyzeMemoryExceed string = "Out Of Global Analyze Memory Limit!" +) + +// globalPanicOnExceed panics when GlobalDisTracker storage usage exceeds storage quota. +type globalPanicOnExceed struct { + memory.BaseOOMAction + mutex syncutil.Mutex // For synchronization. +} + +func init() { + action := &globalPanicOnExceed{} + GlobalMemoryUsageTracker = memory.NewGlobalTracker(memory.LabelForGlobalMemory, -1) + GlobalMemoryUsageTracker.SetActionOnExceed(action) + GlobalDiskUsageTracker = disk.NewGlobalTrcaker(memory.LabelForGlobalStorage, -1) + GlobalDiskUsageTracker.SetActionOnExceed(action) + GlobalAnalyzeMemoryTracker = memory.NewTracker(memory.LabelForGlobalAnalyzeMemory, -1) + GlobalAnalyzeMemoryTracker.SetActionOnExceed(action) + // register quota funcs + variable.SetMemQuotaAnalyze = GlobalAnalyzeMemoryTracker.SetBytesLimit + variable.GetMemQuotaAnalyze = GlobalAnalyzeMemoryTracker.GetBytesLimit + // TODO: do not attach now to avoid impact to global, will attach later when analyze memory track is stable + //GlobalAnalyzeMemoryTracker.AttachToGlobalTracker(GlobalMemoryUsageTracker) + + schematracker.ConstructResultOfShowCreateDatabase = ConstructResultOfShowCreateDatabase + schematracker.ConstructResultOfShowCreateTable = ConstructResultOfShowCreateTable + + // CheckTableFastBucketSize is used to set the fast analyze bucket size for check table. + CheckTableFastBucketSize.Store(1024) +} + +// Start the backend components +func Start() { + pdhelper.GlobalPDHelper.Start() +} + +// Stop the backend components +func Stop() { + pdhelper.GlobalPDHelper.Stop() +} + +// Action panics when storage usage exceeds storage quota. +func (a *globalPanicOnExceed) Action(t *memory.Tracker) { + a.mutex.Lock() + defer a.mutex.Unlock() + msg := "" + switch t.Label() { + case memory.LabelForGlobalStorage: + msg = globalPanicStorageExceed + case memory.LabelForGlobalMemory: + msg = globalPanicMemoryExceed + case memory.LabelForGlobalAnalyzeMemory: + msg = globalPanicAnalyzeMemoryExceed + default: + msg = "Out of Unknown Resource Quota!" + } + // TODO(hawkingrei): should return error instead. + panic(msg) +} + +// GetPriority get the priority of the Action +func (*globalPanicOnExceed) GetPriority() int64 { + return memory.DefPanicPriority +} + +// SelectLockExec represents a select lock executor. +// It is built from the "SELECT .. FOR UPDATE" or the "SELECT .. LOCK IN SHARE MODE" statement. +// For "SELECT .. FOR UPDATE" statement, it locks every row key from source Executor. +// After the execution, the keys are buffered in transaction, and will be sent to KV +// when doing commit. If there is any key already locked by another transaction, +// the transaction will rollback and retry. +type SelectLockExec struct { + exec.BaseExecutor + + Lock *ast.SelectLockInfo + keys []kv.Key + + // The children may be a join of multiple tables, so we need a map. + tblID2Handle map[int64][]plannerutil.HandleCols + + // When SelectLock work on a partition table, we need the partition ID + // (Physical Table ID) instead of the 'logical' table ID to calculate + // the lock KV. In that case, the Physical Table ID is extracted + // from the row key in the store and as an extra column in the chunk row. + + // tblID2PhyTblIDCol is used for partitioned tables. + // The child executor need to return an extra column containing + // the Physical Table ID (i.e. from which partition the row came from) + // Used during building + tblID2PhysTblIDCol map[int64]*expression.Column + + // Used during execution + // Map from logic tableID to column index where the physical table id is stored + // For dynamic prune mode, model.ExtraPhysTblID columns are requested from + // storage and used for physical table id + // For static prune mode, model.ExtraPhysTblID is still sent to storage/Protobuf + // but could be filled in by the partitions TableReaderExecutor + // due to issues with chunk handling between the TableReaderExecutor and the + // SelectReader result. + tblID2PhysTblIDColIdx map[int64]int +} + +// Open implements the Executor Open interface. +func (e *SelectLockExec) Open(ctx context.Context) error { + if len(e.tblID2PhysTblIDCol) > 0 { + e.tblID2PhysTblIDColIdx = make(map[int64]int) + cols := e.Schema().Columns + for i := len(cols) - 1; i >= 0; i-- { + if cols[i].ID == model.ExtraPhysTblID { + for tblID, col := range e.tblID2PhysTblIDCol { + if cols[i].UniqueID == col.UniqueID { + e.tblID2PhysTblIDColIdx[tblID] = i + break + } + } + } + } + } + return e.BaseExecutor.Open(ctx) +} + +// Next implements the Executor Next interface. +func (e *SelectLockExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + err := exec.Next(ctx, e.Children(0), req) + if err != nil { + return err + } + // If there's no handle or it's not a `SELECT FOR UPDATE` or `SELECT FOR SHARE` statement. + if len(e.tblID2Handle) == 0 || (!logicalop.IsSupportedSelectLockType(e.Lock.LockType)) { + return nil + } + + if req.NumRows() > 0 { + iter := chunk.NewIterator4Chunk(req) + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + for tblID, cols := range e.tblID2Handle { + for _, col := range cols { + handle, err := col.BuildHandle(row) + if err != nil { + return err + } + physTblID := tblID + if physTblColIdx, ok := e.tblID2PhysTblIDColIdx[tblID]; ok { + physTblID = row.GetInt64(physTblColIdx) + if physTblID == 0 { + // select * from t1 left join t2 on t1.c = t2.c for update + // The join right side might be added NULL in left join + // In that case, physTblID is 0, so skip adding the lock. + // + // Note, we can't distinguish whether it's the left join case, + // or a bug that TiKV return without correct physical ID column. + continue + } + } + e.keys = append(e.keys, tablecodec.EncodeRowKeyWithHandle(physTblID, handle)) + } + } + } + return nil + } + lockWaitTime := e.Ctx().GetSessionVars().LockWaitTimeout + if e.Lock.LockType == ast.SelectLockForUpdateNoWait || e.Lock.LockType == ast.SelectLockForShareNoWait { + lockWaitTime = tikvstore.LockNoWait + } else if e.Lock.LockType == ast.SelectLockForUpdateWaitN { + lockWaitTime = int64(e.Lock.WaitSec) * 1000 + } + + for id := range e.tblID2Handle { + e.UpdateDeltaForTableID(id) + } + lockCtx, err := newLockCtx(e.Ctx(), lockWaitTime, len(e.keys)) + if err != nil { + return err + } + return doLockKeys(ctx, e.Ctx(), lockCtx, e.keys...) +} + +func newLockCtx(sctx sessionctx.Context, lockWaitTime int64, numKeys int) (*tikvstore.LockCtx, error) { + seVars := sctx.GetSessionVars() + forUpdateTS, err := sessiontxn.GetTxnManager(sctx).GetStmtForUpdateTS() + if err != nil { + return nil, err + } + lockCtx := tikvstore.NewLockCtx(forUpdateTS, lockWaitTime, seVars.StmtCtx.GetLockWaitStartTime()) + lockCtx.Killed = &seVars.SQLKiller.Signal + lockCtx.PessimisticLockWaited = &seVars.StmtCtx.PessimisticLockWaited + lockCtx.LockKeysDuration = &seVars.StmtCtx.LockKeysDuration + lockCtx.LockKeysCount = &seVars.StmtCtx.LockKeysCount + lockCtx.LockExpired = &seVars.TxnCtx.LockExpire + lockCtx.ResourceGroupTagger = func(req *kvrpcpb.PessimisticLockRequest) []byte { + if req == nil { + return nil + } + if len(req.Mutations) == 0 { + return nil + } + if mutation := req.Mutations[0]; mutation != nil { + normalized, digest := seVars.StmtCtx.SQLDigest() + if len(normalized) == 0 { + return nil + } + _, planDigest := seVars.StmtCtx.GetPlanDigest() + + return kv.NewResourceGroupTagBuilder(). + SetPlanDigest(planDigest). + SetSQLDigest(digest). + EncodeTagWithKey(mutation.Key) + } + return nil + } + lockCtx.OnDeadlock = func(deadlock *tikverr.ErrDeadlock) { + cfg := config.GetGlobalConfig() + if deadlock.IsRetryable && !cfg.PessimisticTxn.DeadlockHistoryCollectRetryable { + return + } + rec := deadlockhistory.ErrDeadlockToDeadlockRecord(deadlock) + deadlockhistory.GlobalDeadlockHistory.Push(rec) + } + if lockCtx.ForUpdateTS > 0 && seVars.AssertionLevel != variable.AssertionLevelOff { + lockCtx.InitCheckExistence(numKeys) + } + return lockCtx, nil +} + +// doLockKeys is the main entry for pessimistic lock keys +// waitTime means the lock operation will wait in milliseconds if target key is already +// locked by others. used for (select for update nowait) situation +func doLockKeys(ctx context.Context, se sessionctx.Context, lockCtx *tikvstore.LockCtx, keys ...kv.Key) error { + sessVars := se.GetSessionVars() + sctx := sessVars.StmtCtx + if !sctx.InUpdateStmt && !sctx.InDeleteStmt { + atomic.StoreUint32(&se.GetSessionVars().TxnCtx.ForUpdate, 1) + } + // Lock keys only once when finished fetching all results. + txn, err := se.Txn(true) + if err != nil { + return err + } + + // Skip the temporary table keys. + keys = filterTemporaryTableKeys(sessVars, keys) + + keys = filterLockTableKeys(sessVars.StmtCtx, keys) + var lockKeyStats *tikvutil.LockKeysDetails + ctx = context.WithValue(ctx, tikvutil.LockKeysDetailCtxKey, &lockKeyStats) + err = txn.LockKeys(tikvutil.SetSessionID(ctx, se.GetSessionVars().ConnectionID), lockCtx, keys...) + if lockKeyStats != nil { + sctx.MergeLockKeysExecDetails(lockKeyStats) + } + return err +} + +func filterTemporaryTableKeys(vars *variable.SessionVars, keys []kv.Key) []kv.Key { + txnCtx := vars.TxnCtx + if txnCtx == nil || txnCtx.TemporaryTables == nil { + return keys + } + + newKeys := keys[:0:len(keys)] + for _, key := range keys { + tblID := tablecodec.DecodeTableID(key) + if _, ok := txnCtx.TemporaryTables[tblID]; !ok { + newKeys = append(newKeys, key) + } + } + return newKeys +} + +func filterLockTableKeys(stmtCtx *stmtctx.StatementContext, keys []kv.Key) []kv.Key { + if len(stmtCtx.LockTableIDs) == 0 { + return keys + } + newKeys := keys[:0:len(keys)] + for _, key := range keys { + tblID := tablecodec.DecodeTableID(key) + if _, ok := stmtCtx.LockTableIDs[tblID]; ok { + newKeys = append(newKeys, key) + } + } + return newKeys +} + +// LimitExec represents limit executor +// It ignores 'Offset' rows from src, then returns 'Count' rows at maximum. +type LimitExec struct { + exec.BaseExecutor + + begin uint64 + end uint64 + cursor uint64 + + // meetFirstBatch represents whether we have met the first valid Chunk from child. + meetFirstBatch bool + + childResult *chunk.Chunk + + // columnIdxsUsedByChild keep column indexes of child executor used for inline projection + columnIdxsUsedByChild []int + columnSwapHelper *chunk.ColumnSwapHelper + + // Log the close time when opentracing is enabled. + span opentracing.Span +} + +// Next implements the Executor Next interface. +func (e *LimitExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.cursor >= e.end { + return nil + } + for !e.meetFirstBatch { + // transfer req's requiredRows to childResult and then adjust it in childResult + e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.MaxChunkSize()) + err := exec.Next(ctx, e.Children(0), e.adjustRequiredRows(e.childResult)) + if err != nil { + return err + } + batchSize := uint64(e.childResult.NumRows()) + // no more data. + if batchSize == 0 { + return nil + } + if newCursor := e.cursor + batchSize; newCursor >= e.begin { + e.meetFirstBatch = true + begin, end := e.begin-e.cursor, batchSize + if newCursor > e.end { + end = e.end - e.cursor + } + e.cursor += end + if begin == end { + break + } + if e.columnIdxsUsedByChild != nil { + req.Append(e.childResult.Prune(e.columnIdxsUsedByChild), int(begin), int(end)) + } else { + req.Append(e.childResult, int(begin), int(end)) + } + return nil + } + e.cursor += batchSize + } + e.childResult.Reset() + e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.MaxChunkSize()) + e.adjustRequiredRows(e.childResult) + err := exec.Next(ctx, e.Children(0), e.childResult) + if err != nil { + return err + } + batchSize := uint64(e.childResult.NumRows()) + // no more data. + if batchSize == 0 { + return nil + } + if e.cursor+batchSize > e.end { + e.childResult.TruncateTo(int(e.end - e.cursor)) + batchSize = e.end - e.cursor + } + e.cursor += batchSize + + if e.columnIdxsUsedByChild != nil { + err = e.columnSwapHelper.SwapColumns(e.childResult, req) + if err != nil { + return err + } + } else { + req.SwapColumns(e.childResult) + } + return nil +} + +// Open implements the Executor Open interface. +func (e *LimitExec) Open(ctx context.Context) error { + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + e.childResult = exec.TryNewCacheChunk(e.Children(0)) + e.cursor = 0 + e.meetFirstBatch = e.begin == 0 + if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { + e.span = span + } + return nil +} + +// Close implements the Executor Close interface. +func (e *LimitExec) Close() error { + start := time.Now() + + e.childResult = nil + err := e.BaseExecutor.Close() + + elapsed := time.Since(start) + if elapsed > time.Millisecond { + logutil.BgLogger().Info("limit executor close takes a long time", + zap.Duration("elapsed", elapsed)) + if e.span != nil { + span1 := e.span.Tracer().StartSpan("limitExec.Close", opentracing.ChildOf(e.span.Context()), opentracing.StartTime(start)) + defer span1.Finish() + } + } + return err +} + +func (e *LimitExec) adjustRequiredRows(chk *chunk.Chunk) *chunk.Chunk { + // the limit of maximum number of rows the LimitExec should read + limitTotal := int(e.end - e.cursor) + + var limitRequired int + if e.cursor < e.begin { + // if cursor is less than begin, it have to read (begin-cursor) rows to ignore + // and then read chk.RequiredRows() rows to return, + // so the limit is (begin-cursor)+chk.RequiredRows(). + limitRequired = int(e.begin) - int(e.cursor) + chk.RequiredRows() + } else { + // if cursor is equal or larger than begin, just read chk.RequiredRows() rows to return. + limitRequired = chk.RequiredRows() + } + + return chk.SetRequiredRows(min(limitTotal, limitRequired), e.MaxChunkSize()) +} + +func init() { + // While doing optimization in the plan package, we need to execute uncorrelated subquery, + // but the plan package cannot import the executor package because of the dependency cycle. + // So we assign a function implemented in the executor package to the plan package to avoid the dependency cycle. + plannercore.EvalSubqueryFirstRow = func(ctx context.Context, p base.PhysicalPlan, is infoschema.InfoSchema, pctx planctx.PlanContext) ([]types.Datum, error) { + if fixcontrol.GetBoolWithDefault(pctx.GetSessionVars().OptimizerFixControl, fixcontrol.Fix43817, false) { + return nil, errors.NewNoStackError("evaluate non-correlated sub-queries during optimization phase is not allowed by fix-control 43817") + } + + defer func(begin time.Time) { + s := pctx.GetSessionVars() + s.StmtCtx.SetSkipPlanCache("query has uncorrelated sub-queries is un-cacheable") + s.RewritePhaseInfo.PreprocessSubQueries++ + s.RewritePhaseInfo.DurationPreprocessSubQuery += time.Since(begin) + }(time.Now()) + + r, ctx := tracing.StartRegionEx(ctx, "executor.EvalSubQuery") + defer r.End() + + sctx, err := plannercore.AsSctx(pctx) + intest.AssertNoError(err) + if err != nil { + return nil, err + } + + e := newExecutorBuilder(sctx, is) + executor := e.build(p) + if e.err != nil { + return nil, e.err + } + err = exec.Open(ctx, executor) + defer func() { terror.Log(exec.Close(executor)) }() + if err != nil { + return nil, err + } + if pi, ok := sctx.(processinfoSetter); ok { + // Before executing the sub-query, we need update the processinfo to make the progress bar more accurate. + // because the sub-query may take a long time. + pi.UpdateProcessInfo() + } + chk := exec.TryNewCacheChunk(executor) + err = exec.Next(ctx, executor, chk) + if err != nil { + return nil, err + } + if chk.NumRows() == 0 { + return nil, nil + } + row := chk.GetRow(0).GetDatumRow(exec.RetTypes(executor)) + return row, err + } +} + +// TableDualExec represents a dual table executor. +type TableDualExec struct { + exec.BaseExecutorV2 + + // numDualRows can only be 0 or 1. + numDualRows int + numReturned int +} + +// Open implements the Executor Open interface. +func (e *TableDualExec) Open(context.Context) error { + e.numReturned = 0 + return nil +} + +// Next implements the Executor Next interface. +func (e *TableDualExec) Next(_ context.Context, req *chunk.Chunk) error { + req.Reset() + if e.numReturned >= e.numDualRows { + return nil + } + if e.Schema().Len() == 0 { + req.SetNumVirtualRows(1) + } else { + for i := range e.Schema().Columns { + req.AppendNull(i) + } + } + e.numReturned = e.numDualRows + return nil +} + +type selectionExecutorContext struct { + stmtMemTracker *memory.Tracker + evalCtx expression.EvalContext + enableVectorizedExpression bool +} + +func newSelectionExecutorContext(sctx sessionctx.Context) selectionExecutorContext { + return selectionExecutorContext{ + stmtMemTracker: sctx.GetSessionVars().StmtCtx.MemTracker, + evalCtx: sctx.GetExprCtx().GetEvalCtx(), + enableVectorizedExpression: sctx.GetSessionVars().EnableVectorizedExpression, + } +} + +// SelectionExec represents a filter executor. +type SelectionExec struct { + selectionExecutorContext + exec.BaseExecutorV2 + + batched bool + filters []expression.Expression + selected []bool + inputIter *chunk.Iterator4Chunk + inputRow chunk.Row + childResult *chunk.Chunk + + memTracker *memory.Tracker +} + +// Open implements the Executor Open interface. +func (e *SelectionExec) Open(ctx context.Context) error { + if err := e.BaseExecutorV2.Open(ctx); err != nil { + return err + } + failpoint.Inject("mockSelectionExecBaseExecutorOpenReturnedError", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(errors.New("mock SelectionExec.baseExecutor.Open returned error")) + } + }) + return e.open(ctx) +} + +func (e *SelectionExec) open(context.Context) error { + if e.memTracker != nil { + e.memTracker.Reset() + } else { + e.memTracker = memory.NewTracker(e.ID(), -1) + } + e.memTracker.AttachTo(e.stmtMemTracker) + e.childResult = exec.TryNewCacheChunk(e.Children(0)) + e.memTracker.Consume(e.childResult.MemoryUsage()) + e.batched = expression.Vectorizable(e.filters) + if e.batched { + e.selected = make([]bool, 0, chunk.InitialCapacity) + } + e.inputIter = chunk.NewIterator4Chunk(e.childResult) + e.inputRow = e.inputIter.End() + return nil +} + +// Close implements plannercore.Plan Close interface. +func (e *SelectionExec) Close() error { + if e.childResult != nil { + e.memTracker.Consume(-e.childResult.MemoryUsage()) + e.childResult = nil + } + e.selected = nil + return e.BaseExecutorV2.Close() +} + +// Next implements the Executor Next interface. +func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + + if !e.batched { + return e.unBatchedNext(ctx, req) + } + + for { + for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() { + if req.IsFull() { + return nil + } + + if !e.selected[e.inputRow.Idx()] { + continue + } + + req.AppendRow(e.inputRow) + } + mSize := e.childResult.MemoryUsage() + err := exec.Next(ctx, e.Children(0), e.childResult) + e.memTracker.Consume(e.childResult.MemoryUsage() - mSize) + if err != nil { + return err + } + // no more data. + if e.childResult.NumRows() == 0 { + return nil + } + e.selected, err = expression.VectorizedFilter(e.evalCtx, e.enableVectorizedExpression, e.filters, e.inputIter, e.selected) + if err != nil { + return err + } + e.inputRow = e.inputIter.Begin() + } +} + +// unBatchedNext filters input rows one by one and returns once an input row is selected. +// For sql with "SETVAR" in filter and "GETVAR" in projection, for example: "SELECT @a FROM t WHERE (@a := 2) > 0", +// we have to set batch size to 1 to do the evaluation of filter and projection. +func (e *SelectionExec) unBatchedNext(ctx context.Context, chk *chunk.Chunk) error { + evalCtx := e.evalCtx + for { + for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() { + selected, _, err := expression.EvalBool(evalCtx, e.filters, e.inputRow) + if err != nil { + return err + } + if selected { + chk.AppendRow(e.inputRow) + e.inputRow = e.inputIter.Next() + return nil + } + } + mSize := e.childResult.MemoryUsage() + err := exec.Next(ctx, e.Children(0), e.childResult) + e.memTracker.Consume(e.childResult.MemoryUsage() - mSize) + if err != nil { + return err + } + e.inputRow = e.inputIter.Begin() + // no more data. + if e.childResult.NumRows() == 0 { + return nil + } + } +} + +// TableScanExec is a table scan executor without result fields. +type TableScanExec struct { + exec.BaseExecutor + + t table.Table + columns []*model.ColumnInfo + virtualTableChunkList *chunk.List + virtualTableChunkIdx int +} + +// Next implements the Executor Next interface. +func (e *TableScanExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + return e.nextChunk4InfoSchema(ctx, req) +} + +func (e *TableScanExec) nextChunk4InfoSchema(ctx context.Context, chk *chunk.Chunk) error { + chk.GrowAndReset(e.MaxChunkSize()) + if e.virtualTableChunkList == nil { + e.virtualTableChunkList = chunk.NewList(exec.RetTypes(e), e.InitCap(), e.MaxChunkSize()) + columns := make([]*table.Column, e.Schema().Len()) + for i, colInfo := range e.columns { + columns[i] = table.ToColumn(colInfo) + } + mutableRow := chunk.MutRowFromTypes(exec.RetTypes(e)) + type tableIter interface { + IterRecords(ctx context.Context, sctx sessionctx.Context, cols []*table.Column, fn table.RecordIterFunc) error + } + err := (e.t.(tableIter)).IterRecords(ctx, e.Ctx(), columns, func(_ kv.Handle, rec []types.Datum, _ []*table.Column) (bool, error) { + mutableRow.SetDatums(rec...) + e.virtualTableChunkList.AppendRow(mutableRow.ToRow()) + return true, nil + }) + if err != nil { + return err + } + } + // no more data. + if e.virtualTableChunkIdx >= e.virtualTableChunkList.NumChunks() { + return nil + } + virtualTableChunk := e.virtualTableChunkList.GetChunk(e.virtualTableChunkIdx) + e.virtualTableChunkIdx++ + chk.SwapColumns(virtualTableChunk) + return nil +} + +// Open implements the Executor Open interface. +func (e *TableScanExec) Open(context.Context) error { + e.virtualTableChunkList = nil + return nil +} + +// MaxOneRowExec checks if the number of rows that a query returns is at maximum one. +// It's built from subquery expression. +type MaxOneRowExec struct { + exec.BaseExecutor + + evaluated bool +} + +// Open implements the Executor Open interface. +func (e *MaxOneRowExec) Open(ctx context.Context) error { + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + e.evaluated = false + return nil +} + +// Next implements the Executor Next interface. +func (e *MaxOneRowExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.evaluated { + return nil + } + e.evaluated = true + err := exec.Next(ctx, e.Children(0), req) + if err != nil { + return err + } + + if num := req.NumRows(); num == 0 { + for i := range e.Schema().Columns { + req.AppendNull(i) + } + return nil + } else if num != 1 { + return exeerrors.ErrSubqueryMoreThan1Row + } + + childChunk := exec.TryNewCacheChunk(e.Children(0)) + err = exec.Next(ctx, e.Children(0), childChunk) + if err != nil { + return err + } + if childChunk.NumRows() != 0 { + return exeerrors.ErrSubqueryMoreThan1Row + } + + return nil +} + +// ResetContextOfStmt resets the StmtContext and session variables. +// Before every execution, we must clear statement context. +func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { + defer func() { + if r := recover(); r != nil { + logutil.BgLogger().Warn("ResetContextOfStmt panicked", zap.Stack("stack"), zap.Any("recover", r), zap.Error(err)) + if err != nil { + err = stderrors.Join(err, util.GetRecoverError(r)) + } else { + err = util.GetRecoverError(r) + } + } + }() + vars := ctx.GetSessionVars() + for name, val := range vars.StmtCtx.SetVarHintRestore { + err := vars.SetSystemVar(name, val) + if err != nil { + logutil.BgLogger().Warn("Failed to restore the variable after SET_VAR hint", zap.String("variable name", name), zap.String("expected value", val)) + } + } + vars.StmtCtx.SetVarHintRestore = nil + var sc *stmtctx.StatementContext + if vars.TxnCtx.CouldRetry || vars.HasStatusFlag(mysql.ServerStatusCursorExists) { + // Must construct new statement context object, the retry history need context for every statement. + // TODO: Maybe one day we can get rid of transaction retry, then this logic can be deleted. + sc = stmtctx.NewStmtCtx() + } else { + sc = vars.InitStatementContext() + } + sc.SetTimeZone(vars.Location()) + sc.TaskID = stmtctx.AllocateTaskID() + if sc.CTEStorageMap == nil { + sc.CTEStorageMap = map[int]*CTEStorages{} + } else { + clear(sc.CTEStorageMap.(map[int]*CTEStorages)) + } + if sc.LockTableIDs == nil { + sc.LockTableIDs = make(map[int64]struct{}) + } else { + clear(sc.LockTableIDs) + } + if sc.TableStats == nil { + sc.TableStats = make(map[int64]any) + } else { + clear(sc.TableStats) + } + if sc.MDLRelatedTableIDs == nil { + sc.MDLRelatedTableIDs = make(map[int64]struct{}) + } else { + clear(sc.MDLRelatedTableIDs) + } + if sc.TblInfo2UnionScan == nil { + sc.TblInfo2UnionScan = make(map[*model.TableInfo]bool) + } else { + clear(sc.TblInfo2UnionScan) + } + sc.IsStaleness = false + sc.EnableOptimizeTrace = false + sc.OptimizeTracer = nil + sc.OptimizerCETrace = nil + sc.IsSyncStatsFailed = false + sc.IsExplainAnalyzeDML = false + sc.ResourceGroupName = vars.ResourceGroupName + // Firstly we assume that UseDynamicPruneMode can be enabled according session variable, then we will check other conditions + // in PlanBuilder.buildDataSource + if ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { + sc.UseDynamicPruneMode = true + } else { + sc.UseDynamicPruneMode = false + } + + sc.StatsLoad.Timeout = 0 + sc.StatsLoad.NeededItems = nil + sc.StatsLoad.ResultCh = nil + + sc.SysdateIsNow = ctx.GetSessionVars().SysdateIsNow + + vars.MemTracker.Detach() + vars.MemTracker.UnbindActions() + vars.MemTracker.SetBytesLimit(vars.MemQuotaQuery) + vars.MemTracker.ResetMaxConsumed() + vars.DiskTracker.Detach() + vars.DiskTracker.ResetMaxConsumed() + vars.MemTracker.SessionID.Store(vars.ConnectionID) + vars.MemTracker.Killer = &vars.SQLKiller + vars.DiskTracker.Killer = &vars.SQLKiller + vars.SQLKiller.Reset() + vars.SQLKiller.ConnID.Store(vars.ConnectionID) + + isAnalyze := false + if execStmt, ok := s.(*ast.ExecuteStmt); ok { + prepareStmt, err := plannercore.GetPreparedStmt(execStmt, vars) + if err != nil { + return err + } + _, isAnalyze = prepareStmt.PreparedAst.Stmt.(*ast.AnalyzeTableStmt) + } else if _, ok := s.(*ast.AnalyzeTableStmt); ok { + isAnalyze = true + } + if isAnalyze { + sc.InitMemTracker(memory.LabelForAnalyzeMemory, -1) + vars.MemTracker.SetBytesLimit(-1) + vars.MemTracker.AttachTo(GlobalAnalyzeMemoryTracker) + } else { + sc.InitMemTracker(memory.LabelForSQLText, -1) + } + logOnQueryExceedMemQuota := domain.GetDomain(ctx).ExpensiveQueryHandle().LogOnQueryExceedMemQuota + switch variable.OOMAction.Load() { + case variable.OOMActionCancel: + action := &memory.PanicOnExceed{ConnID: vars.ConnectionID, Killer: vars.MemTracker.Killer} + action.SetLogHook(logOnQueryExceedMemQuota) + vars.MemTracker.SetActionOnExceed(action) + case variable.OOMActionLog: + fallthrough + default: + action := &memory.LogOnExceed{ConnID: vars.ConnectionID} + action.SetLogHook(logOnQueryExceedMemQuota) + vars.MemTracker.SetActionOnExceed(action) + } + sc.MemTracker.SessionID.Store(vars.ConnectionID) + sc.MemTracker.AttachTo(vars.MemTracker) + sc.InitDiskTracker(memory.LabelForSQLText, -1) + globalConfig := config.GetGlobalConfig() + if variable.EnableTmpStorageOnOOM.Load() && sc.DiskTracker != nil { + sc.DiskTracker.AttachTo(vars.DiskTracker) + if GlobalDiskUsageTracker != nil { + vars.DiskTracker.AttachTo(GlobalDiskUsageTracker) + } + } + if execStmt, ok := s.(*ast.ExecuteStmt); ok { + prepareStmt, err := plannercore.GetPreparedStmt(execStmt, vars) + if err != nil { + return err + } + s = prepareStmt.PreparedAst.Stmt + sc.InitSQLDigest(prepareStmt.NormalizedSQL, prepareStmt.SQLDigest) + // For `execute stmt` SQL, should reset the SQL digest with the prepare SQL digest. + goCtx := context.Background() + if variable.EnablePProfSQLCPU.Load() && len(prepareStmt.NormalizedSQL) > 0 { + goCtx = pprof.WithLabels(goCtx, pprof.Labels("sql", FormatSQL(prepareStmt.NormalizedSQL).String())) + pprof.SetGoroutineLabels(goCtx) + } + if topsqlstate.TopSQLEnabled() && prepareStmt.SQLDigest != nil { + sc.IsSQLRegistered.Store(true) + topsql.AttachAndRegisterSQLInfo(goCtx, prepareStmt.NormalizedSQL, prepareStmt.SQLDigest, vars.InRestrictedSQL) + } + if s, ok := prepareStmt.PreparedAst.Stmt.(*ast.SelectStmt); ok { + if s.LockInfo == nil { + sc.WeakConsistency = isWeakConsistencyRead(ctx, execStmt) + } + } + } + // execute missed stmtID uses empty sql + sc.OriginalSQL = s.Text() + if explainStmt, ok := s.(*ast.ExplainStmt); ok { + sc.InExplainStmt = true + sc.ExplainFormat = explainStmt.Format + sc.InExplainAnalyzeStmt = explainStmt.Analyze + sc.IgnoreExplainIDSuffix = strings.ToLower(explainStmt.Format) == types.ExplainFormatBrief + sc.InVerboseExplain = strings.ToLower(explainStmt.Format) == types.ExplainFormatVerbose + s = explainStmt.Stmt + } else { + sc.ExplainFormat = "" + } + if explainForStmt, ok := s.(*ast.ExplainForStmt); ok { + sc.InExplainStmt = true + sc.InExplainAnalyzeStmt = true + sc.InVerboseExplain = strings.ToLower(explainForStmt.Format) == types.ExplainFormatVerbose + } + + // TODO: Many same bool variables here. + // We should set only two variables ( + // IgnoreErr and StrictSQLMode) to avoid setting the same bool variables and + // pushing them down to TiKV as flags. + + sc.InRestrictedSQL = vars.InRestrictedSQL + strictSQLMode := vars.SQLMode.HasStrictMode() + + errLevels := sc.ErrLevels() + errLevels[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + switch stmt := s.(type) { + // `ResetUpdateStmtCtx` and `ResetDeleteStmtCtx` may modify the flags, so we'll need to store them. + case *ast.UpdateStmt: + ResetUpdateStmtCtx(sc, stmt, vars) + errLevels = sc.ErrLevels() + case *ast.DeleteStmt: + ResetDeleteStmtCtx(sc, stmt, vars) + errLevels = sc.ErrLevels() + case *ast.InsertStmt: + sc.InInsertStmt = true + // For insert statement (not for update statement), disabling the StrictSQLMode + // should make TruncateAsWarning and DividedByZeroAsWarning, + // but should not make DupKeyAsWarning. + if stmt.IgnoreErr { + errLevels[errctx.ErrGroupDupKey] = errctx.LevelWarn + errLevels[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelWarn + errLevels[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + } + // For single-row INSERT statements, ignore non-strict mode + // See https://dev.mysql.com/doc/refman/5.7/en/constraint-invalid-data.html + isSingleInsert := len(stmt.Lists) == 1 + errLevels[errctx.ErrGroupBadNull] = errctx.ResolveErrLevel(false, (!strictSQLMode && !isSingleInsert) || stmt.IgnoreErr) + errLevels[errctx.ErrGroupNoDefault] = errctx.ResolveErrLevel(false, !strictSQLMode || stmt.IgnoreErr) + errLevels[errctx.ErrGroupDividedByZero] = errctx.ResolveErrLevel( + !vars.SQLMode.HasErrorForDivisionByZeroMode(), + !strictSQLMode || stmt.IgnoreErr, + ) + sc.Priority = stmt.Priority + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!strictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || + !vars.SQLMode.HasNoZeroDateMode() || !strictSQLMode || stmt.IgnoreErr || + vars.SQLMode.HasAllowInvalidDatesMode())) + case *ast.CreateTableStmt, *ast.AlterTableStmt: + sc.InCreateOrAlterStmt = true + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!strictSQLMode). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !strictSQLMode || + vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroDateErr(!vars.SQLMode.HasNoZeroDateMode() || !strictSQLMode)) + + case *ast.LoadDataStmt: + sc.InLoadDataStmt = true + // return warning instead of error when load data meet no partition for value + errLevels[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + case *ast.SelectStmt: + sc.InSelectStmt = true + + // Return warning for truncate error in selection. + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + if opts := stmt.SelectStmtOpts; opts != nil { + sc.Priority = opts.Priority + sc.NotFillCache = !opts.SQLCache + } + sc.WeakConsistency = isWeakConsistencyRead(ctx, stmt) + case *ast.SetOprStmt: + sc.InSelectStmt = true + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + case *ast.ShowStmt: + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + if stmt.Tp == ast.ShowWarnings || stmt.Tp == ast.ShowErrors || stmt.Tp == ast.ShowSessionStates { + sc.InShowWarning = true + sc.SetWarnings(vars.StmtCtx.GetWarnings()) + } + case *ast.SplitRegionStmt: + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(false). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + case *ast.SetSessionStatesStmt: + sc.InSetSessionStatesStmt = true + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + default: + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + } + + if errLevels != sc.ErrLevels() { + sc.SetErrLevels(errLevels) + } + + sc.SetTypeFlags(sc.TypeFlags(). + WithSkipUTF8Check(vars.SkipUTF8Check). + WithSkipSACIICheck(vars.SkipASCIICheck). + WithSkipUTF8MB4Check(!globalConfig.Instance.CheckMb4ValueInUTF8.Load()). + // WithAllowNegativeToUnsigned with false value indicates values less than 0 should be clipped to 0 for unsigned integer types. + // This is the case for `insert`, `update`, `alter table`, `create table` and `load data infile` statements, when not in strict SQL mode. + // see https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html + WithAllowNegativeToUnsigned(!sc.InInsertStmt && !sc.InLoadDataStmt && !sc.InUpdateStmt && !sc.InCreateOrAlterStmt), + ) + + vars.PlanCacheParams.Reset() + if priority := mysql.PriorityEnum(atomic.LoadInt32(&variable.ForcePriority)); priority != mysql.NoPriority { + sc.Priority = priority + } + if vars.StmtCtx.LastInsertID > 0 { + sc.PrevLastInsertID = vars.StmtCtx.LastInsertID + } else { + sc.PrevLastInsertID = vars.StmtCtx.PrevLastInsertID + } + sc.PrevAffectedRows = 0 + if vars.StmtCtx.InUpdateStmt || vars.StmtCtx.InDeleteStmt || vars.StmtCtx.InInsertStmt || vars.StmtCtx.InSetSessionStatesStmt { + sc.PrevAffectedRows = int64(vars.StmtCtx.AffectedRows()) + } else if vars.StmtCtx.InSelectStmt { + sc.PrevAffectedRows = -1 + } + if globalConfig.Instance.EnableCollectExecutionInfo.Load() { + // In ExplainFor case, RuntimeStatsColl should not be reset for reuse, + // because ExplainFor need to display the last statement information. + reuseObj := vars.StmtCtx.RuntimeStatsColl + if _, ok := s.(*ast.ExplainForStmt); ok { + reuseObj = nil + } + sc.RuntimeStatsColl = execdetails.NewRuntimeStatsColl(reuseObj) + + // also enable index usage collector + if sc.IndexUsageCollector == nil { + sc.IndexUsageCollector = ctx.NewStmtIndexUsageCollector() + } else { + sc.IndexUsageCollector.Reset() + } + } else { + // turn off the index usage collector + sc.IndexUsageCollector = nil + } + + sc.SetForcePlanCache(fixcontrol.GetBoolWithDefault(vars.OptimizerFixControl, fixcontrol.Fix49736, false)) + sc.SetAlwaysWarnSkipCache(sc.InExplainStmt && sc.ExplainFormat == "plan_cache") + errCount, warnCount := vars.StmtCtx.NumErrorWarnings() + vars.SysErrorCount = errCount + vars.SysWarningCount = warnCount + vars.ExchangeChunkStatus() + vars.StmtCtx = sc + vars.PrevFoundInPlanCache = vars.FoundInPlanCache + vars.FoundInPlanCache = false + vars.PrevFoundInBinding = vars.FoundInBinding + vars.FoundInBinding = false + vars.DurationWaitTS = 0 + vars.CurrInsertBatchExtraCols = nil + vars.CurrInsertValues = chunk.Row{} + + return +} + +// ResetUpdateStmtCtx resets statement context for UpdateStmt. +func ResetUpdateStmtCtx(sc *stmtctx.StatementContext, stmt *ast.UpdateStmt, vars *variable.SessionVars) { + strictSQLMode := vars.SQLMode.HasStrictMode() + sc.InUpdateStmt = true + errLevels := sc.ErrLevels() + errLevels[errctx.ErrGroupDupKey] = errctx.ResolveErrLevel(false, stmt.IgnoreErr) + errLevels[errctx.ErrGroupBadNull] = errctx.ResolveErrLevel(false, !strictSQLMode || stmt.IgnoreErr) + errLevels[errctx.ErrGroupNoDefault] = errLevels[errctx.ErrGroupBadNull] + errLevels[errctx.ErrGroupDividedByZero] = errctx.ResolveErrLevel( + !vars.SQLMode.HasErrorForDivisionByZeroMode(), + !strictSQLMode || stmt.IgnoreErr, + ) + errLevels[errctx.ErrGroupNoMatchedPartition] = errctx.ResolveErrLevel(false, stmt.IgnoreErr) + sc.SetErrLevels(errLevels) + sc.Priority = stmt.Priority + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!strictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || + !strictSQLMode || stmt.IgnoreErr || vars.SQLMode.HasAllowInvalidDatesMode())) +} + +// ResetDeleteStmtCtx resets statement context for DeleteStmt. +func ResetDeleteStmtCtx(sc *stmtctx.StatementContext, stmt *ast.DeleteStmt, vars *variable.SessionVars) { + strictSQLMode := vars.SQLMode.HasStrictMode() + sc.InDeleteStmt = true + errLevels := sc.ErrLevels() + errLevels[errctx.ErrGroupDupKey] = errctx.ResolveErrLevel(false, stmt.IgnoreErr) + errLevels[errctx.ErrGroupBadNull] = errctx.ResolveErrLevel(false, !strictSQLMode || stmt.IgnoreErr) + errLevels[errctx.ErrGroupNoDefault] = errLevels[errctx.ErrGroupBadNull] + errLevels[errctx.ErrGroupDividedByZero] = errctx.ResolveErrLevel( + !vars.SQLMode.HasErrorForDivisionByZeroMode(), + !strictSQLMode || stmt.IgnoreErr, + ) + sc.SetErrLevels(errLevels) + sc.Priority = stmt.Priority + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!strictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || + !strictSQLMode || stmt.IgnoreErr || vars.SQLMode.HasAllowInvalidDatesMode())) +} + +func setOptionForTopSQL(sc *stmtctx.StatementContext, snapshot kv.Snapshot) { + if snapshot == nil { + return + } + // pipelined dml may already flush in background, don't touch it to avoid race. + if txn, ok := snapshot.(kv.Transaction); ok && txn.IsPipelined() { + return + } + snapshot.SetOption(kv.ResourceGroupTagger, sc.GetResourceGroupTagger()) + if sc.KvExecCounter != nil { + snapshot.SetOption(kv.RPCInterceptor, sc.KvExecCounter.RPCInterceptor()) + } +} + +func isWeakConsistencyRead(ctx sessionctx.Context, node ast.Node) bool { + sessionVars := ctx.GetSessionVars() + return sessionVars.ConnectionID > 0 && sessionVars.ReadConsistency.IsWeak() && + plannercore.IsAutoCommitTxn(sessionVars) && plannercore.IsReadOnly(node, sessionVars) +} diff --git a/pkg/executor/test/writetest/write_test.go b/pkg/executor/test/writetest/write_test.go new file mode 100644 index 0000000000000..98def043bcea3 --- /dev/null +++ b/pkg/executor/test/writetest/write_test.go @@ -0,0 +1,551 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writetest + +import ( + "context" + "errors" + "fmt" + "io" + "testing" + + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/executor" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/lightning/mydump" + "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/session" + "github.com/pingcap/tidb/pkg/sessionctx" + "github.com/pingcap/tidb/pkg/sessiontxn" + "github.com/pingcap/tidb/pkg/store/mockstore" + "github.com/pingcap/tidb/pkg/table/tables" + "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/mock" + "github.com/stretchr/testify/require" +) + +func TestInsertIgnore(t *testing.T) { + store := testkit.CreateMockStore(t) + var cfg kv.InjectionConfig + tk := testkit.NewTestKit(t, kv.NewInjectedStore(store, &cfg)) + tk.MustExec("use test") + testSQL := `drop table if exists t; + create table t (id int PRIMARY KEY AUTO_INCREMENT, c1 int unique key);` + tk.MustExec(testSQL) + testSQL = `insert into t values (1, 2);` + tk.MustExec(testSQL) + require.Empty(t, tk.Session().LastMessage()) + + r := tk.MustQuery("select * from t;") + rowStr := fmt.Sprintf("%v %v", "1", "2") + r.Check(testkit.Rows(rowStr)) + + tk.MustExec("insert ignore into t values (1, 3), (2, 3)") + require.Equal(t, tk.Session().LastMessage(), "Records: 2 Duplicates: 1 Warnings: 1") + r = tk.MustQuery("select * from t;") + rowStr1 := fmt.Sprintf("%v %v", "2", "3") + r.Check(testkit.Rows(rowStr, rowStr1)) + + tk.MustExec("insert ignore into t values (3, 4), (3, 4)") + require.Equal(t, tk.Session().LastMessage(), "Records: 2 Duplicates: 1 Warnings: 1") + r = tk.MustQuery("select * from t;") + rowStr2 := fmt.Sprintf("%v %v", "3", "4") + r.Check(testkit.Rows(rowStr, rowStr1, rowStr2)) + + tk.MustExec("begin") + tk.MustExec("insert ignore into t values (4, 4), (4, 5), (4, 6)") + require.Equal(t, tk.Session().LastMessage(), "Records: 3 Duplicates: 2 Warnings: 2") + r = tk.MustQuery("select * from t;") + rowStr3 := fmt.Sprintf("%v %v", "4", "5") + r.Check(testkit.Rows(rowStr, rowStr1, rowStr2, rowStr3)) + tk.MustExec("commit") + + cfg.SetGetError(errors.New("foo")) + err := tk.ExecToErr("insert ignore into t values (1, 3)") + require.Error(t, err) + cfg.SetGetError(nil) + + // for issue 4268 + testSQL = `drop table if exists t; + create table t (a bigint);` + tk.MustExec(testSQL) + testSQL = "insert ignore into t select '1a';" + err = tk.ExecToErr(testSQL) + require.NoError(t, err) + require.Equal(t, tk.Session().LastMessage(), "Records: 1 Duplicates: 0 Warnings: 1") + r = tk.MustQuery("SHOW WARNINGS") + r.Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: '1a'")) + testSQL = "insert ignore into t values ('1a')" + err = tk.ExecToErr(testSQL) + require.NoError(t, err) + require.Empty(t, tk.Session().LastMessage()) + r = tk.MustQuery("SHOW WARNINGS") + // TODO: MySQL8.0 reports Warning 1265 Data truncated for column 'a' at row 1 + r.Check(testkit.Rows("Warning 1366 Incorrect bigint value: '1a' for column 'a' at row 1")) + + // for duplicates with warning + testSQL = `drop table if exists t; + create table t(a int primary key, b int);` + tk.MustExec(testSQL) + testSQL = "insert ignore into t values (1,1);" + tk.MustExec(testSQL) + require.Empty(t, tk.Session().LastMessage()) + err = tk.ExecToErr(testSQL) + require.Empty(t, tk.Session().LastMessage()) + require.NoError(t, err) + r = tk.MustQuery("SHOW WARNINGS") + r.Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 't.PRIMARY'")) + + testSQL = `drop table if exists test; +create table test (i int primary key, j int unique); +begin; +insert into test values (1,1); +insert ignore into test values (2,1); +commit;` + tk.MustExec(testSQL) + testSQL = `select * from test;` + r = tk.MustQuery(testSQL) + r.Check(testkit.Rows("1 1")) + + testSQL = `delete from test; +insert into test values (1, 1); +begin; +delete from test where i = 1; +insert ignore into test values (2, 1); +commit;` + tk.MustExec(testSQL) + testSQL = `select * from test;` + r = tk.MustQuery(testSQL) + r.Check(testkit.Rows("2 1")) + + testSQL = `delete from test; +insert into test values (1, 1); +begin; +update test set i = 2, j = 2 where i = 1; +insert ignore into test values (1, 3); +insert ignore into test values (2, 4); +commit;` + tk.MustExec(testSQL) + testSQL = `select * from test order by i;` + r = tk.MustQuery(testSQL) + r.Check(testkit.Rows("1 3", "2 2")) + + testSQL = `create table badnull (i int not null)` + tk.MustExec(testSQL) + testSQL = `insert ignore into badnull values (null)` + tk.MustExec(testSQL) + require.Empty(t, tk.Session().LastMessage()) + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1048 Column 'i' cannot be null")) + testSQL = `select * from badnull` + tk.MustQuery(testSQL).Check(testkit.Rows("0")) + + tk.MustExec("create table tp (id int) partition by range (id) (partition p0 values less than (1), partition p1 values less than(2))") + tk.MustExec("insert ignore into tp values (1), (3)") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1526 Table has no partition for value 3")) +} + +type testCase struct { + data []byte + expected []string + expectedMsg string +} + +func checkCases( + tests []testCase, + loadSQL string, + t *testing.T, + tk *testkit.TestKit, + ctx sessionctx.Context, + selectSQL, deleteSQL string, +) { + for _, tt := range tests { + var reader io.ReadCloser = mydump.NewStringReader(string(tt.data)) + var readerBuilder executor.LoadDataReaderBuilder = func(_ string) ( + r io.ReadCloser, err error, + ) { + return reader, nil + } + + ctx.SetValue(executor.LoadDataReaderBuilderKey, readerBuilder) + tk.MustExec(loadSQL) + warnings := tk.Session().GetSessionVars().StmtCtx.GetWarnings() + for _, w := range warnings { + fmt.Printf("warnnig: %#v\n", w.Err.Error()) + } + require.Equal(t, tt.expectedMsg, tk.Session().LastMessage(), tt.expected) + tk.MustQuery(selectSQL).Check(testkit.RowsWithSep("|", tt.expected...)) + tk.MustExec(deleteSQL) + } +} + +func TestLoadDataMissingColumn(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + createSQL := `create table load_data_missing (id int, t timestamp not null)` + tk.MustExec(createSQL) + loadSQL := "load data local infile '/tmp/nonexistence.csv' ignore into table load_data_missing" + ctx := tk.Session().(sessionctx.Context) + + deleteSQL := "delete from load_data_missing" + selectSQL := "select id, hour(t), minute(t) from load_data_missing;" + + curTime := types.CurrentTime(mysql.TypeTimestamp) + timeHour := curTime.Hour() + timeMinute := curTime.Minute() + tests := []testCase{ + {[]byte(""), nil, "Records: 0 Deleted: 0 Skipped: 0 Warnings: 0"}, + {[]byte("12\n"), []string{fmt.Sprintf("12|%v|%v", timeHour, timeMinute)}, "Records: 1 Deleted: 0 Skipped: 0 Warnings: 1"}, + } + checkCases(tests, loadSQL, t, tk, ctx, selectSQL, deleteSQL) + + tk.MustExec("alter table load_data_missing add column t2 timestamp null") + curTime = types.CurrentTime(mysql.TypeTimestamp) + timeHour = curTime.Hour() + timeMinute = curTime.Minute() + selectSQL = "select id, hour(t), minute(t), t2 from load_data_missing;" + tests = []testCase{ + {[]byte("12\n"), []string{fmt.Sprintf("12|%v|%v|", timeHour, timeMinute)}, "Records: 1 Deleted: 0 Skipped: 0 Warnings: 1"}, + } + checkCases(tests, loadSQL, t, tk, ctx, selectSQL, deleteSQL) +} + +func TestIssue18681(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + createSQL := `drop table if exists load_data_test; + create table load_data_test (a bit(1),b bit(1),c bit(1),d bit(1),e bit(32),f bit(1));` + tk.MustExec(createSQL) + loadSQL := "load data local infile '/tmp/nonexistence.csv' ignore into table load_data_test" + ctx := tk.Session().(sessionctx.Context) + + deleteSQL := "delete from load_data_test" + selectSQL := "select bin(a), bin(b), bin(c), bin(d), bin(e), bin(f) from load_data_test;" + levels := ctx.GetSessionVars().StmtCtx.ErrLevels() + levels[errctx.ErrGroupDupKey] = errctx.LevelWarn + levels[errctx.ErrGroupBadNull] = errctx.LevelWarn + levels[errctx.ErrGroupNoDefault] = errctx.LevelWarn + + sc := ctx.GetSessionVars().StmtCtx + oldTypeFlags := sc.TypeFlags() + defer func() { + sc.SetTypeFlags(oldTypeFlags) + }() + sc.SetTypeFlags(oldTypeFlags.WithIgnoreTruncateErr(true)) + tests := []testCase{ + {[]byte("true\tfalse\t0\t1\tb'1'\tb'1'\n"), []string{"1|1|1|1|1100010001001110011000100100111|1"}, "Records: 1 Deleted: 0 Skipped: 0 Warnings: 5"}, + } + checkCases(tests, loadSQL, t, tk, ctx, selectSQL, deleteSQL) + require.Equal(t, uint16(0), sc.WarningCount()) +} + +func TestIssue34358(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + ctx := tk.Session().(sessionctx.Context) + defer ctx.SetValue(executor.LoadDataVarKey, nil) + + tk.MustExec("use test") + tk.MustExec("drop table if exists load_data_test") + tk.MustExec("create table load_data_test (a varchar(10), b varchar(10))") + + loadSQL := "load data local infile '/tmp/nonexistence.csv' into table load_data_test ( @v1, " + + "@v2 ) set a = @v1, b = @v2" + checkCases([]testCase{ + {[]byte("\\N\n"), []string{"|"}, "Records: 1 Deleted: 0 Skipped: 0 Warnings: 1"}, + }, loadSQL, t, tk, ctx, "select * from load_data_test", "delete from load_data_test", + ) +} + +func TestLatch(t *testing.T) { + store, err := mockstore.NewMockStore( + // Small latch slot size to make conflicts. + mockstore.WithTxnLocalLatches(64), + ) + require.NoError(t, err) + defer func() { + err := store.Close() + require.NoError(t, err) + }() + + dom, err1 := session.BootstrapSession(store) + require.Nil(t, err1) + defer dom.Close() + + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") + tk1 := testkit.NewTestKit(t, store) + tk1.MustExec("use test") + tk1.MustExec("drop table if exists t") + tk1.MustExec("create table t (id int)") + tk1.MustExec("set @@tidb_disable_txn_auto_retry = true") + + tk2 := testkit.NewTestKit(t, store) + tk2.MustExec("use test") + tk1.MustExec("set @@tidb_disable_txn_auto_retry = true") + + fn := func() { + tk1.MustExec("begin") + for i := 0; i < 100; i++ { + tk1.MustExec(fmt.Sprintf("insert into t values (%d)", i)) + } + tk2.MustExec("begin") + for i := 100; i < 200; i++ { + tk1.MustExec(fmt.Sprintf("insert into t values (%d)", i)) + } + tk2.MustExec("commit") + } + + // txn1 and txn2 data range do not overlap, using latches should not + // result in txn conflict. + fn() + tk1.MustExec("commit") + + tk1.MustExec("truncate table t") + fn() + tk1.MustExec("commit") + + // Test the error type of latch and it could be retry if TiDB enable the retry. + tk1.MustExec("begin") + tk1.MustExec("update t set id = id + 1") + tk2.MustExec("update t set id = id + 1") + tk1.MustGetDBError("commit", kv.ErrWriteConflictInTiDB) +} + +func TestReplaceLog(t *testing.T) { + store, domain := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table testLog (a int not null primary key, b int unique key);`) + + // Make some dangling index. + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() + dbName := model.NewCIStr("test") + tblName := model.NewCIStr("testLog") + tbl, err := is.TableByName(context.Background(), dbName, tblName) + require.NoError(t, err) + tblInfo := tbl.Meta() + idxInfo := tblInfo.FindIndexByName("b") + indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) + + txn, err := store.Begin() + require.NoError(t, err) + _, err = indexOpr.Create(ctx.GetTableCtx(), txn, types.MakeDatums(1), kv.IntHandle(1), nil) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + + err = tk.ExecToErr(`replace into testLog values (0, 0), (1, 1);`) + require.Error(t, err) + require.EqualError(t, err, `can not be duplicated row, due to old row not found. handle 1 not found`) + tk.MustQuery(`admin cleanup index testLog b;`).Check(testkit.Rows("1")) +} + +// TestRebaseIfNeeded is for issue 7422. +// There is no need to do the rebase when updating a record if the auto-increment ID not changed. +// This could make the auto ID increasing speed slower. +func TestRebaseIfNeeded(t *testing.T) { + store, domain := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t (a int not null primary key auto_increment, b int unique key);`) + tk.MustExec(`insert into t (b) values (1);`) + + ctx := mock.NewContext() + ctx.Store = store + tbl, err := domain.InfoSchema().TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t")) + require.NoError(t, err) + require.Nil(t, sessiontxn.NewTxn(context.Background(), ctx)) + txn, err := ctx.Txn(true) + require.NoError(t, err) + // AddRecord directly here will skip to rebase the auto ID in the insert statement, + // which could simulate another TiDB adds a large auto ID. + _, err = tbl.AddRecord(ctx.GetTableCtx(), txn, types.MakeDatums(30001, 2)) + require.NoError(t, err) + require.NoError(t, txn.Commit(context.Background())) + + tk.MustExec(`update t set b = 3 where a = 30001;`) + tk.MustExec(`insert into t (b) values (4);`) + tk.MustQuery(`select a from t where b = 4;`).Check(testkit.Rows("2")) + + tk.MustExec(`insert into t set b = 3 on duplicate key update a = a;`) + tk.MustExec(`insert into t (b) values (5);`) + tk.MustQuery(`select a from t where b = 5;`).Check(testkit.Rows("4")) + + tk.MustExec(`insert into t set b = 3 on duplicate key update a = a + 1;`) + tk.MustExec(`insert into t (b) values (6);`) + tk.MustQuery(`select a from t where b = 6;`).Check(testkit.Rows("30003")) +} + +func TestDeferConstraintCheckForInsert(t *testing.T) { + store := testkit.CreateMockStore(t) + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") + tk := testkit.NewTestKit(t, store) + tk.MustExec(`use test`) + + tk.MustExec(`drop table if exists t;create table t (a int primary key, b int);`) + tk.MustExec(`insert into t values (1,2),(2,2)`) + err := tk.ExecToErr("update t set a=a+1 where b=2") + require.Error(t, err) + + tk.MustExec(`drop table if exists t;create table t (i int key);`) + tk.MustExec(`insert t values (1);`) + tk.MustExec(`set tidb_constraint_check_in_place = 1;`) + tk.MustExec(`begin;`) + err = tk.ExecToErr(`insert t values (1);`) + require.Error(t, err) + tk.MustExec(`update t set i = 2 where i = 1;`) + tk.MustExec(`commit;`) + tk.MustQuery(`select * from t;`).Check(testkit.Rows("2")) + + tk.MustExec(`set tidb_constraint_check_in_place = 0;`) + tk.MustExec("replace into t values (1),(2)") + tk.MustExec("begin") + err = tk.ExecToErr("update t set i = 2 where i = 1") + require.Error(t, err) + err = tk.ExecToErr("insert into t values (1) on duplicate key update i = i + 1") + require.Error(t, err) + tk.MustExec("rollback") + + tk.MustExec(`drop table t; create table t (id int primary key, v int unique);`) + tk.MustExec(`insert into t values (1, 1)`) + tk.MustExec(`set tidb_constraint_check_in_place = 1;`) + tk.MustExec(`set @@autocommit = 0;`) + + err = tk.ExecToErr("insert into t values (3, 1)") + require.Error(t, err) + err = tk.ExecToErr("insert into t values (1, 3)") + require.Error(t, err) + tk.MustExec("commit") + + tk.MustExec(`set tidb_constraint_check_in_place = 0;`) + tk.MustExec("insert into t values (3, 1)") + tk.MustExec("insert into t values (1, 3)") + err = tk.ExecToErr("commit") + require.Error(t, err) + + // Cover the temporary table. + for val := range []int{0, 1} { + tk.MustExec("set tidb_constraint_check_in_place = ?", val) + + tk.MustExec("drop table t") + tk.MustExec("create global temporary table t (a int primary key, b int) on commit delete rows") + tk.MustExec("begin") + tk.MustExec("insert into t values (1, 1)") + err = tk.ExecToErr(`insert into t values (1, 3)`) + require.Error(t, err) + tk.MustExec("insert into t values (2, 2)") + err = tk.ExecToErr("update t set a = a + 1 where a = 1") + require.Error(t, err) + err = tk.ExecToErr("insert into t values (1, 3) on duplicated key update a = a + 1") + require.Error(t, err) + tk.MustExec("commit") + + tk.MustExec("drop table t") + tk.MustExec("create global temporary table t (a int, b int unique) on commit delete rows") + tk.MustExec("begin") + tk.MustExec("insert into t values (1, 1)") + err = tk.ExecToErr(`insert into t values (3, 1)`) + require.Error(t, err) + tk.MustExec("insert into t values (2, 2)") + err = tk.ExecToErr("update t set b = b + 1 where a = 1") + require.Error(t, err) + err = tk.ExecToErr("insert into t values (3, 1) on duplicated key update b = b + 1") + require.Error(t, err) + tk.MustExec("commit") + + // cases for temporary table + tk.MustExec("drop table if exists tl") + tk.MustExec("create temporary table tl (a int primary key, b int)") + tk.MustExec("begin") + tk.MustExec("insert into tl values (1, 1)") + err = tk.ExecToErr(`insert into tl values (1, 3)`) + require.Error(t, err) + tk.MustExec("insert into tl values (2, 2)") + err = tk.ExecToErr("update tl set a = a + 1 where a = 1") + require.Error(t, err) + err = tk.ExecToErr("insert into tl values (1, 3) on duplicated key update a = a + 1") + require.Error(t, err) + tk.MustExec("commit") + + tk.MustExec("begin") + tk.MustQuery("select * from tl").Check(testkit.Rows("1 1", "2 2")) + err = tk.ExecToErr(`insert into tl values (1, 3)`) + require.Error(t, err) + err = tk.ExecToErr("update tl set a = a + 1 where a = 1") + require.Error(t, err) + err = tk.ExecToErr("insert into tl values (1, 3) on duplicated key update a = a + 1") + require.Error(t, err) + tk.MustExec("rollback") + + tk.MustExec("drop table tl") + tk.MustExec("create temporary table tl (a int, b int unique)") + tk.MustExec("begin") + tk.MustExec("insert into tl values (1, 1)") + err = tk.ExecToErr(`insert into tl values (3, 1)`) + require.Error(t, err) + tk.MustExec("insert into tl values (2, 2)") + err = tk.ExecToErr("update tl set b = b + 1 where a = 1") + require.Error(t, err) + err = tk.ExecToErr("insert into tl values (3, 1) on duplicated key update b = b + 1") + require.Error(t, err) + tk.MustExec("commit") + + tk.MustExec("begin") + tk.MustQuery("select * from tl").Check(testkit.Rows("1 1", "2 2")) + err = tk.ExecToErr(`insert into tl values (3, 1)`) + require.Error(t, err) + err = tk.ExecToErr("update tl set b = b + 1 where a = 1") + require.Error(t, err) + err = tk.ExecToErr("insert into tl values (3, 1) on duplicated key update b = b + 1") + require.Error(t, err) + tk.MustExec("rollback") + } +} + +func TestPessimisticDeleteYourWrites(t *testing.T) { + store := testkit.CreateMockStore(t) + + session1 := testkit.NewTestKit(t, store) + session1.MustExec("use test") + session2 := testkit.NewTestKit(t, store) + session2.MustExec("use test") + + session1.MustExec("drop table if exists x;") + session1.MustExec("create table x (id int primary key, c int);") + + session1.MustExec("set tidb_txn_mode = 'pessimistic'") + session2.MustExec("set tidb_txn_mode = 'pessimistic'") + + session1.MustExec("begin;") + session1.MustExec("insert into x select 1, 1") + session1.MustExec("delete from x where id = 1") + session2.MustExec("begin;") + var wg util.WaitGroupWrapper + wg.Run(func() { + session2.MustExec("insert into x select 1, 2") + }) + session1.MustExec("commit;") + wg.Wait() + session2.MustExec("commit;") + session2.MustQuery("select * from x").Check(testkit.Rows("1 2")) +} diff --git a/pkg/expression/exprstatic/evalctx_test.go b/pkg/expression/exprstatic/evalctx_test.go new file mode 100644 index 0000000000000..0a792e4ef3e87 --- /dev/null +++ b/pkg/expression/exprstatic/evalctx_test.go @@ -0,0 +1,670 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exprstatic + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/expression/exprctx" + "github.com/pingcap/tidb/pkg/expression/expropt" + infoschema "github.com/pingcap/tidb/pkg/infoschema/context" + "github.com/pingcap/tidb/pkg/parser/auth" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/types" + contextutil "github.com/pingcap/tidb/pkg/util/context" + "github.com/pingcap/tidb/pkg/util/deeptest" + "github.com/stretchr/testify/require" +) + +func TestNewStaticEvalCtx(t *testing.T) { + // default context + prevID := contextutil.GenContextID() + ctx := NewEvalContext() + require.Equal(t, prevID+1, ctx.CtxID()) + checkDefaultStaticEvalCtx(t, ctx) + + // with options + prevID = ctx.CtxID() + options, stateForTest := getEvalCtxOptionsForTest(t) + ctx = NewEvalContext(options...) + require.Equal(t, prevID+1, ctx.CtxID()) + checkOptionsStaticEvalCtx(t, ctx, stateForTest) +} + +func checkDefaultStaticEvalCtx(t *testing.T, ctx *EvalContext) { + mode, err := mysql.GetSQLMode(mysql.DefaultSQLMode) + require.NoError(t, err) + require.Equal(t, mode, ctx.SQLMode()) + require.Same(t, time.UTC, ctx.Location()) + require.Equal(t, types.NewContext(types.StrictFlags, time.UTC, ctx), ctx.TypeCtx()) + require.Equal(t, errctx.NewContextWithLevels(errctx.LevelMap{}, ctx), ctx.ErrCtx()) + require.Equal(t, "", ctx.CurrentDB()) + require.Equal(t, variable.DefMaxAllowedPacket, ctx.GetMaxAllowedPacket()) + require.Equal(t, variable.DefDefaultWeekFormat, ctx.GetDefaultWeekFormatMode()) + require.Equal(t, variable.DefDivPrecisionIncrement, ctx.GetDivPrecisionIncrement()) + require.Empty(t, ctx.AllParamValues()) + require.Equal(t, variable.NewUserVars(), ctx.GetUserVarsReader()) + require.True(t, ctx.GetOptionalPropSet().IsEmpty()) + p, ok := ctx.GetOptionalPropProvider(exprctx.OptPropAdvisoryLock) + require.Nil(t, p) + require.False(t, ok) + + tm, err := ctx.CurrentTime() + require.NoError(t, err) + require.Same(t, time.UTC, tm.Location()) + require.InDelta(t, time.Now().Unix(), tm.Unix(), 5) + + warnHandler, ok := ctx.warnHandler.(*contextutil.StaticWarnHandler) + require.True(t, ok) + require.Equal(t, 0, warnHandler.WarningCount()) +} + +type evalCtxOptionsTestState struct { + now time.Time + loc *time.Location + warnHandler *contextutil.StaticWarnHandler + userVars *variable.UserVars + ddlOwner bool +} + +func getEvalCtxOptionsForTest(t *testing.T) ([]EvalCtxOption, *evalCtxOptionsTestState) { + loc, err := time.LoadLocation("US/Eastern") + require.NoError(t, err) + s := &evalCtxOptionsTestState{ + now: time.Now(), + loc: loc, + warnHandler: contextutil.NewStaticWarnHandler(8), + userVars: variable.NewUserVars(), + } + + provider1 := expropt.CurrentUserPropProvider(func() (*auth.UserIdentity, []*auth.RoleIdentity) { + return &auth.UserIdentity{Username: "user1", Hostname: "host1"}, + []*auth.RoleIdentity{{Username: "role1", Hostname: "host2"}} + }) + + provider2 := expropt.DDLOwnerInfoProvider(func() bool { + return s.ddlOwner + }) + + return []EvalCtxOption{ + WithWarnHandler(s.warnHandler), + WithSQLMode(mysql.ModeNoZeroDate | mysql.ModeStrictTransTables), + WithTypeFlags(types.FlagAllowNegativeToUnsigned | types.FlagSkipASCIICheck), + WithErrLevelMap(errctx.LevelMap{ + errctx.ErrGroupBadNull: errctx.LevelError, + errctx.ErrGroupNoDefault: errctx.LevelError, + errctx.ErrGroupDividedByZero: errctx.LevelWarn, + }), + WithLocation(loc), + WithCurrentDB("db1"), + WithCurrentTime(func() (time.Time, error) { + return s.now, nil + }), + WithMaxAllowedPacket(12345), + WithDefaultWeekFormatMode("3"), + WithDivPrecisionIncrement(5), + WithUserVarsReader(s.userVars), + WithOptionalProperty(provider1, provider2), + }, s +} + +func checkOptionsStaticEvalCtx(t *testing.T, ctx *EvalContext, s *evalCtxOptionsTestState) { + require.Same(t, ctx.warnHandler, s.warnHandler) + require.Equal(t, mysql.ModeNoZeroDate|mysql.ModeStrictTransTables, ctx.SQLMode()) + require.Equal(t, + types.NewContext(types.FlagAllowNegativeToUnsigned|types.FlagSkipASCIICheck, s.loc, ctx), + ctx.TypeCtx(), + ) + require.Equal(t, errctx.NewContextWithLevels(errctx.LevelMap{ + errctx.ErrGroupBadNull: errctx.LevelError, + errctx.ErrGroupNoDefault: errctx.LevelError, + errctx.ErrGroupDividedByZero: errctx.LevelWarn, + }, ctx), ctx.ErrCtx()) + require.Same(t, s.loc, ctx.Location()) + require.Equal(t, "db1", ctx.CurrentDB()) + current, err := ctx.CurrentTime() + require.NoError(t, err) + require.Equal(t, current.UnixNano(), s.now.UnixNano()) + require.Same(t, s.loc, current.Location()) + require.Equal(t, uint64(12345), ctx.GetMaxAllowedPacket()) + require.Equal(t, "3", ctx.GetDefaultWeekFormatMode()) + require.Equal(t, 5, ctx.GetDivPrecisionIncrement()) + require.Same(t, s.userVars, ctx.GetUserVarsReader()) + + var optSet exprctx.OptionalEvalPropKeySet + optSet = optSet.Add(exprctx.OptPropCurrentUser).Add(exprctx.OptPropDDLOwnerInfo) + require.Equal(t, optSet, ctx.GetOptionalPropSet()) + p, ok := ctx.GetOptionalPropProvider(exprctx.OptPropCurrentUser) + require.True(t, ok) + user, roles := p.(expropt.CurrentUserPropProvider)() + require.Equal(t, &auth.UserIdentity{Username: "user1", Hostname: "host1"}, user) + require.Equal(t, []*auth.RoleIdentity{{Username: "role1", Hostname: "host2"}}, roles) + p, ok = ctx.GetOptionalPropProvider(exprctx.OptPropDDLOwnerInfo) + s.ddlOwner = true + require.True(t, ok) + require.True(t, p.(expropt.DDLOwnerInfoProvider)()) + s.ddlOwner = false + require.False(t, p.(expropt.DDLOwnerInfoProvider)()) + p, ok = ctx.GetOptionalPropProvider(exprctx.OptPropInfoSchema) + require.False(t, ok) + require.Nil(t, p) +} + +func TestStaticEvalCtxCurrentTime(t *testing.T) { + loc1, err := time.LoadLocation("US/Eastern") + require.NoError(t, err) + + tm := time.UnixMicro(123456789).In(loc1) + calls := 0 + getTime := func() (time.Time, error) { + defer func() { + calls++ + }() + + if calls < 2 { + return time.Time{}, errors.NewNoStackError(fmt.Sprintf("err%d", calls)) + } + + if calls == 2 { + return tm, nil + } + + return time.Time{}, errors.NewNoStackError("should not reach here") + } + + ctx := NewEvalContext(WithCurrentTime(getTime)) + + // get time for the first two times should fail + got, err := ctx.CurrentTime() + require.EqualError(t, err, "err0") + require.Equal(t, time.Time{}, got) + + got, err = ctx.CurrentTime() + require.EqualError(t, err, "err1") + require.Equal(t, time.Time{}, got) + + // the third time will success + got, err = ctx.CurrentTime() + require.Nil(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, time.UTC, got.Location()) + require.Equal(t, 3, calls) + + // next ctx should cache the time without calling inner function + got, err = ctx.CurrentTime() + require.Nil(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, time.UTC, got.Location()) + require.Equal(t, 3, calls) + + // CurrentTime should have the same location with `ctx.Location()` + loc2, err := time.LoadLocation("Australia/Sydney") + require.NoError(t, err) + ctx = NewEvalContext( + WithLocation(loc2), + WithCurrentTime(func() (time.Time, error) { + return tm, nil + }), + ) + got, err = ctx.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, loc2, got.Location()) + + // Apply should copy the current time + ctx2 := ctx.Apply() + got, err = ctx2.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, loc2, got.Location()) + + // Apply with location should change current time's location + ctx2 = ctx.Apply(WithLocation(loc1)) + got, err = ctx2.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, loc1, got.Location()) + + // Apply will not affect previous current time + got, err = ctx.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, loc2, got.Location()) + + // Apply with a different current time func + ctx2 = ctx.Apply(WithCurrentTime(func() (time.Time, error) { + return time.UnixMicro(987654321), nil + })) + got, err = ctx2.CurrentTime() + require.NoError(t, err) + require.Equal(t, int64(987654321), got.UnixMicro()) + require.Same(t, loc2, got.Location()) + + // Apply will not affect previous current time + got, err = ctx.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), got.UnixNano()) + require.Same(t, loc2, got.Location()) +} + +func TestStaticEvalCtxWarnings(t *testing.T) { + // default context should have a empty StaticWarningsHandler + ctx := NewEvalContext() + h, ok := ctx.warnHandler.(*contextutil.StaticWarnHandler) + require.True(t, ok) + require.Equal(t, 0, h.WarningCount()) + + // WithWarnHandler should work + ignoreHandler := contextutil.IgnoreWarn + ctx = NewEvalContext(WithWarnHandler(ignoreHandler)) + require.True(t, ctx.warnHandler == ignoreHandler) + + // All contexts should use the same warning handler + h = contextutil.NewStaticWarnHandler(8) + ctx = NewEvalContext(WithWarnHandler(h)) + tc, ec := ctx.TypeCtx(), ctx.ErrCtx() + h.AppendWarning(errors.NewNoStackError("warn0")) + ctx.AppendWarning(errors.NewNoStackError("warn1")) + ctx.AppendNote(errors.NewNoStackError("note1")) + tc.AppendWarning(errors.NewNoStackError("warn2")) + ec.AppendWarning(errors.NewNoStackError("warn3")) + require.Equal(t, 5, h.WarningCount()) + require.Equal(t, h.WarningCount(), ctx.WarningCount()) + + // ctx.CopyWarnings + warnings := ctx.CopyWarnings(nil) + require.Equal(t, []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn0")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn1")}, + {Level: contextutil.WarnLevelNote, Err: errors.NewNoStackError("note1")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn2")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn3")}, + }, warnings) + require.Equal(t, 5, h.WarningCount()) + require.Equal(t, h.WarningCount(), ctx.WarningCount()) + + // ctx.TruncateWarnings + warnings = ctx.TruncateWarnings(2) + require.Equal(t, []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelNote, Err: errors.NewNoStackError("note1")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn2")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn3")}, + }, warnings) + require.Equal(t, 2, h.WarningCount()) + require.Equal(t, h.WarningCount(), ctx.WarningCount()) + warnings = ctx.CopyWarnings(nil) + require.Equal(t, []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn0")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn1")}, + }, warnings) + + // Apply should use the old warning handler by default + ctx2 := ctx.Apply() + require.NotSame(t, ctx, ctx2) + require.True(t, ctx.warnHandler == ctx2.warnHandler) + require.True(t, ctx.warnHandler == h) + + // Apply with `WithWarnHandler` + h2 := contextutil.NewStaticWarnHandler(16) + ctx2 = ctx.Apply(WithWarnHandler(h2)) + require.True(t, ctx2.warnHandler == h2) + require.True(t, ctx.warnHandler == h) + + // The type context and error context should use the new handler. + ctx.TruncateWarnings(0) + tc, ec = ctx.TypeCtx(), ctx.ErrCtx() + tc2, ec2 := ctx2.TypeCtx(), ctx2.ErrCtx() + tc2.AppendWarning(errors.NewNoStackError("warn4")) + ec2.AppendWarning(errors.NewNoStackError("warn5")) + tc.AppendWarning(errors.NewNoStackError("warn6")) + ec.AppendWarning(errors.NewNoStackError("warn7")) + require.Equal(t, []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn4")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn5")}, + }, ctx2.CopyWarnings(nil)) + require.Equal(t, []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn6")}, + {Level: contextutil.WarnLevelWarning, Err: errors.NewNoStackError("warn7")}, + }, ctx.CopyWarnings(nil)) +} + +func TestStaticEvalContextOptionalProps(t *testing.T) { + ctx := NewEvalContext() + require.True(t, ctx.GetOptionalPropSet().IsEmpty()) + + ctx2 := ctx.Apply(WithOptionalProperty( + expropt.CurrentUserPropProvider(func() (u *auth.UserIdentity, r []*auth.RoleIdentity) { return }), + )) + var emptySet exprctx.OptionalEvalPropKeySet + require.Equal(t, emptySet, ctx.GetOptionalPropSet()) + require.Equal(t, emptySet.Add(exprctx.OptPropCurrentUser), ctx2.GetOptionalPropSet()) + + // Apply should override all optional properties + ctx3 := ctx2.Apply(WithOptionalProperty( + expropt.DDLOwnerInfoProvider(func() bool { return true }), + expropt.InfoSchemaPropProvider(func(isDomain bool) infoschema.MetaOnlyInfoSchema { return nil }), + )) + require.Equal(t, + emptySet.Add(exprctx.OptPropDDLOwnerInfo).Add(exprctx.OptPropInfoSchema), + ctx3.GetOptionalPropSet(), + ) + require.Equal(t, emptySet, ctx.GetOptionalPropSet()) + require.Equal(t, emptySet.Add(exprctx.OptPropCurrentUser), ctx2.GetOptionalPropSet()) +} + +func TestUpdateStaticEvalContext(t *testing.T) { + oldCtx := NewEvalContext() + ctx := oldCtx.Apply() + + // Should return a different instance + require.NotSame(t, oldCtx, ctx) + + // CtxID should be different + require.Greater(t, ctx.CtxID(), oldCtx.CtxID()) + + // inner state should not be the same address + require.NotSame(t, &oldCtx.evalCtxState, &ctx.evalCtxState) + + // compare a state object by excluding some changed fields + excludeChangedFields := func(s *evalCtxState) evalCtxState { + state := *s + state.typeCtx = types.DefaultStmtNoWarningContext + state.errCtx = errctx.StrictNoWarningContext + state.currentTime = nil + return state + } + require.Equal(t, excludeChangedFields(&oldCtx.evalCtxState), excludeChangedFields(&ctx.evalCtxState)) + + // check fields + checkDefaultStaticEvalCtx(t, ctx) + + // apply options + opts, optState := getEvalCtxOptionsForTest(t) + ctx2 := oldCtx.Apply(opts...) + require.Greater(t, ctx2.CtxID(), ctx.CtxID()) + checkOptionsStaticEvalCtx(t, ctx2, optState) + + // old ctx aren't affected + checkDefaultStaticEvalCtx(t, oldCtx) + + // create with options + opts, optState = getEvalCtxOptionsForTest(t) + ctx3 := NewEvalContext(opts...) + require.Greater(t, ctx3.CtxID(), ctx2.CtxID()) + checkOptionsStaticEvalCtx(t, ctx3, optState) +} + +func TestParamList(t *testing.T) { + paramList := variable.NewPlanCacheParamList() + paramList.Append(types.NewDatum(1)) + paramList.Append(types.NewDatum(2)) + paramList.Append(types.NewDatum(3)) + ctx := NewEvalContext( + WithParamList(paramList), + ) + for i := 0; i < 3; i++ { + val, err := ctx.GetParamValue(i) + require.NoError(t, err) + require.Equal(t, int64(i+1), val.GetInt64()) + } + + // after reset the paramList and append new one, the value is still persisted + paramList.Reset() + paramList.Append(types.NewDatum(4)) + for i := 0; i < 3; i++ { + val, err := ctx.GetParamValue(i) + require.NoError(t, err) + require.Equal(t, int64(i+1), val.GetInt64()) + } +} + +func TestMakeEvalContextStatic(t *testing.T) { + // This test is to ensure that the `MakeEvalContextStatic` function works as expected. + // It requires the developers to create a special `EvalContext`, whose every fields + // are non-empty. Then, the `MakeEvalContextStatic` function is called to create a new + // clone of it. Finally, the new clone is compared with the original one to ensure that + // the fields are correctly copied. + paramList := variable.NewPlanCacheParamList() + paramList.Append(types.NewDatum(1)) + + userVars := variable.NewUserVars() + userVars.SetUserVarVal("a", types.NewStringDatum("v1")) + userVars.SetUserVarVal("b", types.NewIntDatum(2)) + + provider := expropt.DDLOwnerInfoProvider(func() bool { + return true + }) + + obj := NewEvalContext( + WithWarnHandler(contextutil.NewStaticWarnHandler(16)), + WithSQLMode(mysql.ModeNoZeroDate|mysql.ModeStrictTransTables), + WithTypeFlags(types.FlagAllowNegativeToUnsigned|types.FlagSkipASCIICheck), + WithErrLevelMap(errctx.LevelMap{}), + WithLocation(time.UTC), + WithCurrentDB("db1"), + WithCurrentTime(func() (time.Time, error) { + return time.Now(), nil + }), + WithMaxAllowedPacket(12345), + WithDefaultWeekFormatMode("3"), + WithDivPrecisionIncrement(5), + WithParamList(paramList), + WithUserVarsReader(userVars), + WithOptionalProperty(provider), + WithEnableRedactLog("test"), + ) + obj.AppendWarning(errors.New("test warning")) + + ignorePath := []string{ + "$.evalCtxState.warnHandler.**", + "$.evalCtxState.typeCtx.**", + "$.evalCtxState.errCtx.**", + "$.evalCtxState.currentTime.**", + "$.evalCtxState.userVars.lock", + "$.evalCtxState.props", + "$.id", + } + deeptest.AssertRecursivelyNotEqual(t, obj, NewEvalContext(), + deeptest.WithIgnorePath(ignorePath), + ) + + staticObj := MakeEvalContextStatic(obj) + + deeptest.AssertDeepClonedEqual(t, obj, staticObj, + deeptest.WithIgnorePath(ignorePath), + deeptest.WithPointerComparePath([]string{ + "$.evalCtxState.warnHandler", + "$.evalCtxState.paramList*.b", + }), + ) + + require.Equal(t, obj.GetWarnHandler(), staticObj.GetWarnHandler()) + require.Equal(t, obj.typeCtx.Flags(), staticObj.typeCtx.Flags()) + require.Equal(t, obj.errCtx.LevelMap(), staticObj.errCtx.LevelMap()) + + oldT, err := obj.CurrentTime() + require.NoError(t, err) + newT, err := staticObj.CurrentTime() + require.NoError(t, err) + require.Equal(t, oldT.Unix(), newT.Unix()) + + require.NotEqual(t, obj.GetOptionalPropSet(), staticObj.GetOptionalPropSet()) + // Now, it didn't copy any optional properties. + require.Equal(t, exprctx.OptionalEvalPropKeySet(0), staticObj.GetOptionalPropSet()) +} + +func TestEvalCtxLoadSystemVars(t *testing.T) { + vars := []struct { + name string + val string + field string + assert func(ctx *EvalContext, vars *variable.SessionVars) + }{ + { + name: "time_zone", + val: "Europe/Berlin", + field: "$.typeCtx.loc", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, "Europe/Berlin", ctx.Location().String()) + require.Equal(t, vars.Location().String(), ctx.Location().String()) + }, + }, + { + name: "sql_mode", + val: "ALLOW_INVALID_DATES,ONLY_FULL_GROUP_BY", + field: "$.sqlMode", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, mysql.ModeAllowInvalidDates|mysql.ModeOnlyFullGroupBy, ctx.SQLMode()) + require.Equal(t, vars.SQLMode, ctx.SQLMode()) + }, + }, + { + name: "timestamp", + val: "1234567890.123456", + field: "$.currentTime", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + currentTime, err := ctx.CurrentTime() + require.NoError(t, err) + require.Equal(t, int64(1234567890123456), currentTime.UnixMicro()) + require.Equal(t, vars.Location().String(), currentTime.Location().String()) + }, + }, + { + name: strings.ToUpper("max_allowed_packet"), // test for settings an upper case variable + val: "524288", + field: "$.maxAllowedPacket", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, uint64(524288), ctx.GetMaxAllowedPacket()) + require.Equal(t, vars.MaxAllowedPacket, ctx.GetMaxAllowedPacket()) + }, + }, + { + name: strings.ToUpper("tidb_redact_log"), // test for settings an upper case variable + val: "on", + field: "$.enableRedactLog", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, "ON", ctx.GetTiDBRedactLog()) + require.Equal(t, vars.EnableRedactLog, ctx.GetTiDBRedactLog()) + }, + }, + { + name: "default_week_format", + val: "5", + field: "$.defaultWeekFormatMode", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, "5", ctx.GetDefaultWeekFormatMode()) + mode, ok := vars.GetSystemVar(variable.DefaultWeekFormat) + require.True(t, ok) + require.Equal(t, mode, ctx.GetDefaultWeekFormatMode()) + }, + }, + { + name: "div_precision_increment", + val: "12", + field: "$.divPrecisionIncrement", + assert: func(ctx *EvalContext, vars *variable.SessionVars) { + require.Equal(t, 12, ctx.GetDivPrecisionIncrement()) + require.Equal(t, vars.DivPrecisionIncrement, ctx.GetDivPrecisionIncrement()) + }, + }, + } + + // nonVarRelatedFields means the fields not related to any system variables. + // To make sure that all the variables which affect the context state are covered in the above test list, + // we need to test all inner fields except those in `nonVarRelatedFields` are changed after `LoadSystemVars`. + nonVarRelatedFields := []string{ + "$.warnHandler", + "$.typeCtx.flags", + "$.typeCtx.warnHandler", + "$.errCtx", + "$.currentDB", + "$.paramList", + "$.userVars", + "$.props", + } + + // varsRelatedFields means the fields related to + varsRelatedFields := make([]string, 0, len(vars)) + varsMap := make(map[string]string) + sessionVars := variable.NewSessionVars(nil) + for _, sysVar := range vars { + varsMap[sysVar.name] = sysVar.val + if sysVar.field != "" { + varsRelatedFields = append(varsRelatedFields, sysVar.field) + } + require.NoError(t, sessionVars.SetSystemVar(sysVar.name, sysVar.val)) + } + + defaultEvalCtx := NewEvalContext() + ctx, err := defaultEvalCtx.LoadSystemVars(varsMap) + require.NoError(t, err) + require.Greater(t, ctx.CtxID(), defaultEvalCtx.CtxID()) + + // Check all fields except these in `nonVarRelatedFields` are changed after `LoadSystemVars` to make sure + // all system variables related fields are covered in the test list. + deeptest.AssertRecursivelyNotEqual( + t, + defaultEvalCtx.evalCtxState, + ctx.evalCtxState, + deeptest.WithIgnorePath(nonVarRelatedFields), + deeptest.WithPointerComparePath([]string{"$.currentTime"}), + ) + + // We need to compare the new context again with an empty one to make sure those values are set from sys vars, + // not inherited from the empty go value. + deeptest.AssertRecursivelyNotEqual( + t, + evalCtxState{}, + ctx.evalCtxState, + deeptest.WithIgnorePath(nonVarRelatedFields), + deeptest.WithPointerComparePath([]string{"$.currentTime"}), + ) + + // Check all system vars unrelated fields are not changed after `LoadSystemVars`. + deeptest.AssertDeepClonedEqual( + t, + defaultEvalCtx.evalCtxState, + ctx.evalCtxState, + deeptest.WithIgnorePath(append( + varsRelatedFields, + // Do not check warnHandler in `typeCtx` and `errCtx` because they should be changed to even if + // they are not related to any system variable. + "$.typeCtx.warnHandler", + "$.errCtx.warnHandler", + )), + // LoadSystemVars only does shallow copy for `EvalContext` so we just need to compare the pointers. + deeptest.WithPointerComparePath(nonVarRelatedFields), + ) + + for _, sysVar := range vars { + sysVar.assert(ctx, sessionVars) + } + + // additional check about @@timestamp + // setting to `variable.DefTimestamp` should return the current timestamp + ctx, err = defaultEvalCtx.LoadSystemVars(map[string]string{ + "timestamp": variable.DefTimestamp, + }) + require.NoError(t, err) + tm, err := ctx.CurrentTime() + require.NoError(t, err) + require.InDelta(t, time.Now().Unix(), tm.Unix(), 5) +} diff --git a/pkg/expression/sessionexpr/sessionctx_test.go b/pkg/expression/sessionexpr/sessionctx_test.go new file mode 100644 index 0000000000000..d1ef9d7c64402 --- /dev/null +++ b/pkg/expression/sessionexpr/sessionctx_test.go @@ -0,0 +1,333 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sessionexpr_test + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/expression/exprctx" + "github.com/pingcap/tidb/pkg/expression/expropt" + "github.com/pingcap/tidb/pkg/expression/sessionexpr" + "github.com/pingcap/tidb/pkg/parser/auth" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/privilege" + "github.com/pingcap/tidb/pkg/types" + contextutil "github.com/pingcap/tidb/pkg/util/context" + "github.com/pingcap/tidb/pkg/util/mathutil" + "github.com/pingcap/tidb/pkg/util/mock" + tmock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/tikv/client-go/v2/oracle" +) + +func TestSessionEvalContextBasic(t *testing.T) { + ctx := mock.NewContext() + vars := ctx.GetSessionVars() + sc := vars.StmtCtx + impl := sessionexpr.NewEvalContext(ctx) + require.True(t, impl.GetOptionalPropSet().IsFull()) + + // should contain all the optional properties + for i := 0; i < exprctx.OptPropsCnt; i++ { + provider, ok := impl.GetOptionalPropProvider(exprctx.OptionalEvalPropKey(i)) + require.True(t, ok) + require.NotNil(t, provider) + require.Same(t, exprctx.OptionalEvalPropKey(i).Desc(), provider.Desc()) + } + + ctx.ResetSessionAndStmtTimeZone(time.FixedZone("UTC+11", 11*3600)) + vars.SQLMode = mysql.ModeStrictTransTables | mysql.ModeNoZeroDate + sc.SetTypeFlags(types.FlagIgnoreInvalidDateErr | types.FlagSkipUTF8Check) + sc.SetErrLevels(errctx.LevelMap{ + errctx.ErrGroupDupKey: errctx.LevelWarn, + errctx.ErrGroupBadNull: errctx.LevelIgnore, + errctx.ErrGroupNoDefault: errctx.LevelIgnore, + }) + vars.CurrentDB = "db1" + vars.MaxAllowedPacket = 123456 + + // basic fields + tc, ec := impl.TypeCtx(), sc.ErrCtx() + require.Equal(t, tc, sc.TypeCtx()) + require.Equal(t, ec, impl.ErrCtx()) + require.Equal(t, vars.SQLMode, impl.SQLMode()) + require.Same(t, vars.Location(), impl.Location()) + require.Same(t, sc.TimeZone(), impl.Location()) + require.Same(t, tc.Location(), impl.Location()) + require.Equal(t, "db1", impl.CurrentDB()) + require.Equal(t, uint64(123456), impl.GetMaxAllowedPacket()) + require.Equal(t, "0", impl.GetDefaultWeekFormatMode()) + require.NoError(t, ctx.GetSessionVars().SetSystemVar("default_week_format", "5")) + require.Equal(t, "5", impl.GetDefaultWeekFormatMode()) + require.Same(t, vars.UserVars, impl.GetUserVarsReader()) + + // handle warnings + require.Equal(t, 0, impl.WarningCount()) + impl.AppendWarning(errors.New("err1")) + require.Equal(t, 1, impl.WarningCount()) + tc.AppendWarning(errors.New("err2")) + require.Equal(t, 2, impl.WarningCount()) + ec.AppendWarning(errors.New("err3")) + require.Equal(t, 3, impl.WarningCount()) + + for _, dst := range [][]contextutil.SQLWarn{ + nil, + make([]contextutil.SQLWarn, 1), + make([]contextutil.SQLWarn, 3), + make([]contextutil.SQLWarn, 0, 3), + } { + warnings := impl.CopyWarnings(dst) + require.Equal(t, 3, len(warnings)) + require.Equal(t, contextutil.WarnLevelWarning, warnings[0].Level) + require.Equal(t, contextutil.WarnLevelWarning, warnings[1].Level) + require.Equal(t, contextutil.WarnLevelWarning, warnings[2].Level) + require.Equal(t, "err1", warnings[0].Err.Error()) + require.Equal(t, "err2", warnings[1].Err.Error()) + require.Equal(t, "err3", warnings[2].Err.Error()) + } + + warnings := impl.TruncateWarnings(1) + require.Equal(t, 2, len(warnings)) + require.Equal(t, contextutil.WarnLevelWarning, warnings[0].Level) + require.Equal(t, contextutil.WarnLevelWarning, warnings[1].Level) + require.Equal(t, "err2", warnings[0].Err.Error()) + require.Equal(t, "err3", warnings[1].Err.Error()) + + warnings = impl.TruncateWarnings(0) + require.Equal(t, 1, len(warnings)) + require.Equal(t, contextutil.WarnLevelWarning, warnings[0].Level) + require.Equal(t, "err1", warnings[0].Err.Error()) +} + +func TestSessionEvalContextCurrentTime(t *testing.T) { + ctx := mock.NewContext() + vars := ctx.GetSessionVars() + sc := vars.StmtCtx + impl := sessionexpr.NewEvalContext(ctx) + + var now atomic.Pointer[time.Time] + sc.SetStaleTSOProvider(func() (uint64, error) { + v := time.UnixMilli(123456789) + // should only be called once + require.True(t, now.CompareAndSwap(nil, &v)) + return oracle.GoTimeToTS(v), nil + }) + + // now should return the stable TSO if set + tm, err := impl.CurrentTime() + require.NoError(t, err) + v := now.Load() + require.NotNil(t, v) + require.Equal(t, v.UnixNano(), tm.UnixNano()) + + // The second call should return the same value + tm, err = impl.CurrentTime() + require.NoError(t, err) + require.Equal(t, v.UnixNano(), tm.UnixNano()) + + // now should return the system variable if "timestamp" is set + sc.SetStaleTSOProvider(nil) + sc.Reset() + require.NoError(t, vars.SetSystemVar("timestamp", "7654321.875")) + tm, err = impl.CurrentTime() + require.NoError(t, err) + require.Equal(t, int64(7654321_875_000_000), tm.UnixNano()) + + // The second call should return the same value + tm, err = impl.CurrentTime() + require.NoError(t, err) + require.Equal(t, int64(7654321_875_000_000), tm.UnixNano()) + + // now should return the system current time if not stale TSO or "timestamp" is set + require.NoError(t, vars.SetSystemVar("timestamp", "0")) + sc.Reset() + tm, err = impl.CurrentTime() + require.NoError(t, err) + require.InDelta(t, time.Now().Unix(), tm.Unix(), 5) + + // The second call should return the same value + tm2, err := impl.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.UnixNano(), tm2.UnixNano()) +} + +type mockPrivManager struct { + tmock.Mock + privilege.Manager +} + +func (m *mockPrivManager) RequestVerification( + activeRole []*auth.RoleIdentity, db, table, column string, priv mysql.PrivilegeType, +) bool { + return m.Called(activeRole, db, table, column, priv).Bool(0) +} + +func (m *mockPrivManager) RequestDynamicVerification( + activeRoles []*auth.RoleIdentity, privName string, grantable bool, +) bool { + return m.Called(activeRoles, privName, grantable).Bool(0) +} + +func TestSessionEvalContextPrivilegeCheck(t *testing.T) { + ctx := mock.NewContext() + impl := sessionexpr.NewEvalContext(ctx) + activeRoles := []*auth.RoleIdentity{ + {Username: "role1", Hostname: "host1"}, + {Username: "role2", Hostname: "host2"}, + } + ctx.GetSessionVars().ActiveRoles = activeRoles + + // no privilege manager should always return true for privilege check + privilege.BindPrivilegeManager(ctx, nil) + require.True(t, impl.RequestVerification("test", "tbl1", "col1", mysql.SuperPriv)) + require.True(t, impl.RequestDynamicVerification("RESTRICTED_TABLES_ADMIN", true)) + require.True(t, impl.RequestDynamicVerification("RESTRICTED_TABLES_ADMIN", false)) + + // if privilege manager bound, it should return the privilege manager value + mgr := &mockPrivManager{} + privilege.BindPrivilegeManager(ctx, mgr) + mgr.On("RequestVerification", activeRoles, "db1", "t1", "c1", mysql.CreatePriv). + Return(true).Once() + require.True(t, impl.RequestVerification("db1", "t1", "c1", mysql.CreatePriv)) + mgr.AssertExpectations(t) + + mgr.On("RequestVerification", activeRoles, "db2", "t2", "c2", mysql.SuperPriv). + Return(false).Once() + require.False(t, impl.RequestVerification("db2", "t2", "c2", mysql.SuperPriv)) + mgr.AssertExpectations(t) + + mgr.On("RequestDynamicVerification", activeRoles, "RESTRICTED_USER_ADMIN", false). + Return(true).Once() + require.True(t, impl.RequestDynamicVerification("RESTRICTED_USER_ADMIN", false)) + + mgr.On("RequestDynamicVerification", activeRoles, "RESTRICTED_CONNECTION_ADMIN", true). + Return(false).Once() + require.False(t, impl.RequestDynamicVerification("RESTRICTED_CONNECTION_ADMIN", true)) +} + +func getProvider[T exprctx.OptionalEvalPropProvider]( + t *testing.T, + impl *sessionexpr.EvalContext, + key exprctx.OptionalEvalPropKey, +) T { + val, ok := impl.GetOptionalPropProvider(key) + require.True(t, ok) + p, ok := val.(T) + require.True(t, ok) + require.Equal(t, key, p.Desc().Key()) + return p +} + +func TestSessionEvalContextOptProps(t *testing.T) { + ctx := mock.NewContext() + impl := sessionexpr.NewEvalContext(ctx) + + // test for OptPropCurrentUser + ctx.GetSessionVars().User = &auth.UserIdentity{Username: "user1", Hostname: "host1"} + ctx.GetSessionVars().ActiveRoles = []*auth.RoleIdentity{ + {Username: "role1", Hostname: "host1"}, + {Username: "role2", Hostname: "host2"}, + } + user, roles := getProvider[expropt.CurrentUserPropProvider](t, impl, exprctx.OptPropCurrentUser)() + require.Equal(t, ctx.GetSessionVars().User, user) + require.Equal(t, ctx.GetSessionVars().ActiveRoles, roles) + + // test for OptPropSessionVars + sessVarsProvider := getProvider[*expropt.SessionVarsPropProvider](t, impl, exprctx.OptPropSessionVars) + require.NotNil(t, sessVarsProvider) + gotVars, err := expropt.SessionVarsPropReader{}.GetSessionVars(impl) + require.NoError(t, err) + require.Same(t, ctx.GetSessionVars(), gotVars) + + // test for OptPropAdvisoryLock + lockProvider := getProvider[*expropt.AdvisoryLockPropProvider](t, impl, exprctx.OptPropAdvisoryLock) + gotCtx, ok := lockProvider.AdvisoryLockContext.(*mock.Context) + require.True(t, ok) + require.Same(t, ctx, gotCtx) + + // test for OptPropDDLOwnerInfo + ddlInfoProvider := getProvider[expropt.DDLOwnerInfoProvider](t, impl, exprctx.OptPropDDLOwnerInfo) + require.False(t, ddlInfoProvider()) + ctx.SetIsDDLOwner(true) + require.True(t, ddlInfoProvider()) + + // test for OptPropPrivilegeChecker + privCheckerProvider := getProvider[expropt.PrivilegeCheckerProvider](t, impl, exprctx.OptPropPrivilegeChecker) + privChecker := privCheckerProvider() + require.NotNil(t, privChecker) + require.Same(t, impl, privChecker) +} + +func TestSessionBuildContext(t *testing.T) { + ctx := mock.NewContext() + impl := sessionexpr.NewExprContext(ctx) + evalCtx, ok := impl.GetEvalCtx().(*sessionexpr.EvalContext) + require.True(t, ok) + require.Same(t, evalCtx, impl.EvalContext) + require.True(t, evalCtx.GetOptionalPropSet().IsFull()) + require.Same(t, ctx, evalCtx.Sctx()) + + // charset and collation + vars := ctx.GetSessionVars() + err := vars.SetSystemVar("character_set_connection", "gbk") + require.NoError(t, err) + err = vars.SetSystemVar("collation_connection", "gbk_chinese_ci") + require.NoError(t, err) + vars.DefaultCollationForUTF8MB4 = "utf8mb4_0900_ai_ci" + + charset, collate := impl.GetCharsetInfo() + require.Equal(t, "gbk", charset) + require.Equal(t, "gbk_chinese_ci", collate) + require.Equal(t, "utf8mb4_0900_ai_ci", impl.GetDefaultCollationForUTF8MB4()) + + // SysdateIsNow + vars.SysdateIsNow = true + require.True(t, impl.GetSysdateIsNow()) + + // NoopFuncsMode + vars.NoopFuncsMode = 2 + require.Equal(t, 2, impl.GetNoopFuncsMode()) + + // Rng + vars.Rng = mathutil.NewWithSeed(123) + require.Same(t, vars.Rng, impl.Rng()) + + // PlanCache + vars.StmtCtx.EnablePlanCache() + require.True(t, impl.IsUseCache()) + impl.SetSkipPlanCache("mockReason") + require.False(t, impl.IsUseCache()) + + // Alloc column id + prevID := vars.PlanColumnID.Load() + colID := impl.AllocPlanColumnID() + require.Equal(t, colID, prevID+1) + colID = impl.AllocPlanColumnID() + require.Equal(t, colID, prevID+2) + vars.AllocPlanColumnID() + colID = impl.AllocPlanColumnID() + require.Equal(t, colID, prevID+4) + + // InNullRejectCheck + require.False(t, impl.IsInNullRejectCheck()) + + // ConnID + vars.ConnectionID = 123 + require.Equal(t, uint64(123), impl.ConnectionID()) +} diff --git a/pkg/lightning/backend/kv/context.go b/pkg/lightning/backend/kv/context.go new file mode 100644 index 0000000000000..14c1963723c21 --- /dev/null +++ b/pkg/lightning/backend/kv/context.go @@ -0,0 +1,255 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "maps" + "math/rand" + "sync" + "time" + + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/expression/exprctx" + "github.com/pingcap/tidb/pkg/expression/exprstatic" + "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/table" + "github.com/pingcap/tidb/pkg/table/tblctx" + "github.com/pingcap/tidb/pkg/types" + contextutil "github.com/pingcap/tidb/pkg/util/context" + "github.com/pingcap/tidb/pkg/util/intest" + "github.com/pingcap/tidb/pkg/util/timeutil" +) + +var _ exprctx.ExprContext = &litExprContext{} + +// litExprContext implements the `exprctx.ExprContext` interface for lightning import. +// It provides the context to build and evaluate expressions, furthermore, it allows to set user variables +// for `IMPORT INTO ...` statements. +type litExprContext struct { + *exprstatic.ExprContext + userVars *variable.UserVars +} + +// NewExpressionContext creates a new `*ExprContext` for lightning import. +func newLitExprContext(sqlMode mysql.SQLMode, sysVars map[string]string, timestamp int64) (*litExprContext, error) { + flags := types.DefaultStmtFlags. + WithTruncateAsWarning(!sqlMode.HasStrictMode()). + WithIgnoreInvalidDateErr(sqlMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!sqlMode.HasStrictMode() || sqlMode.HasAllowInvalidDatesMode() || + !sqlMode.HasNoZeroInDateMode() || !sqlMode.HasNoZeroDateMode()) + + errLevels := stmtctx.DefaultStmtErrLevels + errLevels[errctx.ErrGroupTruncate] = errctx.ResolveErrLevel(flags.IgnoreTruncateErr(), flags.TruncateAsWarning()) + errLevels[errctx.ErrGroupBadNull] = errctx.ResolveErrLevel(false, !sqlMode.HasStrictMode()) + errLevels[errctx.ErrGroupNoDefault] = errctx.ResolveErrLevel(false, !sqlMode.HasStrictMode()) + errLevels[errctx.ErrGroupDividedByZero] = + errctx.ResolveErrLevel(!sqlMode.HasErrorForDivisionByZeroMode(), !sqlMode.HasStrictMode()) + + userVars := variable.NewUserVars() + evalCtx := exprstatic.NewEvalContext( + exprstatic.WithSQLMode(sqlMode), + exprstatic.WithTypeFlags(flags), + exprstatic.WithLocation(timeutil.SystemLocation()), + exprstatic.WithErrLevelMap(errLevels), + exprstatic.WithUserVarsReader(userVars), + ) + + // no need to build as plan cache. + planCacheTracker := contextutil.NewPlanCacheTracker(contextutil.IgnoreWarn) + intest.Assert(!planCacheTracker.UseCache()) + ctx := exprstatic.NewExprContext( + exprstatic.WithEvalCtx(evalCtx), + exprstatic.WithPlanCacheTracker(&planCacheTracker), + ) + + if len(sysVars) > 0 { + var err error + ctx, err = ctx.LoadSystemVars(sysVars) + if err != nil { + return nil, err + } + evalCtx = ctx.GetStaticEvalCtx() + } + + currentTime := func() (time.Time, error) { return time.Now(), nil } + if timestamp > 0 { + currentTime = func() (time.Time, error) { return time.Unix(timestamp, 0), nil } + } + + evalCtx = evalCtx.Apply(exprstatic.WithCurrentTime(currentTime)) + ctx = ctx.Apply(exprstatic.WithEvalCtx(evalCtx)) + + return &litExprContext{ + ExprContext: ctx, + userVars: userVars, + }, nil +} + +// setUserVarVal sets the value of a user variable. +func (ctx *litExprContext) setUserVarVal(name string, dt types.Datum) { + ctx.userVars.SetUserVarVal(name, dt) +} + +// UnsetUserVar unsets a user variable. +func (ctx *litExprContext) unsetUserVar(varName string) { + ctx.userVars.UnsetUserVar(varName) +} + +var _ table.MutateContext = &litTableMutateContext{} + +// litTableMutateContext implements the `table.MutateContext` interface for lightning import. +type litTableMutateContext struct { + exprCtx *litExprContext + encodingConfig tblctx.RowEncodingConfig + mutateBuffers *tblctx.MutateBuffers + shardID *variable.RowIDShardGenerator + reservedRowIDAlloc stmtctx.ReservedRowIDAlloc + enableMutationChecker bool + assertionLevel variable.AssertionLevel + tableDelta struct { + sync.Mutex + // tblID -> (colID -> deltaSize) + m map[int64]map[int64]int64 + } +} + +// AlternativeAllocators implements the `table.MutateContext` interface. +func (*litTableMutateContext) AlternativeAllocators(*model.TableInfo) (autoid.Allocators, bool) { + // lightning does not support temporary tables, so we don't need to provide alternative allocators. + return autoid.Allocators{}, false +} + +// GetExprCtx implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetExprCtx() exprctx.ExprContext { + return ctx.exprCtx +} + +// ConnectionID implements the `table.MutateContext` interface. +func (*litTableMutateContext) ConnectionID() uint64 { + // Just return 0 because lightning import does not in any connection. + return 0 +} + +// InRestrictedSQL implements the `table.MutateContext` interface. +func (*litTableMutateContext) InRestrictedSQL() bool { + // Just return false because lightning import does not in any SQL. + return false +} + +// TxnAssertionLevel implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) TxnAssertionLevel() variable.AssertionLevel { + return ctx.assertionLevel +} + +// EnableMutationChecker implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) EnableMutationChecker() bool { + return ctx.enableMutationChecker +} + +// GetRowEncodingConfig implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetRowEncodingConfig() tblctx.RowEncodingConfig { + return ctx.encodingConfig +} + +// GetMutateBuffers implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetMutateBuffers() *tblctx.MutateBuffers { + return ctx.mutateBuffers +} + +// GetRowIDShardGenerator implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetRowIDShardGenerator() *variable.RowIDShardGenerator { + return ctx.shardID +} + +// GetReservedRowIDAlloc implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetReservedRowIDAlloc() (*stmtctx.ReservedRowIDAlloc, bool) { + return &ctx.reservedRowIDAlloc, true +} + +// GetStatisticsSupport implements the `table.MutateContext` interface. +func (ctx *litTableMutateContext) GetStatisticsSupport() (tblctx.StatisticsSupport, bool) { + return ctx, true +} + +// UpdatePhysicalTableDelta implements the `table.StatisticsSupport` interface. +func (ctx *litTableMutateContext) UpdatePhysicalTableDelta( + physicalTableID int64, _ int64, + _ int64, cols variable.DeltaCols, +) { + ctx.tableDelta.Lock() + defer ctx.tableDelta.Unlock() + if ctx.tableDelta.m == nil { + ctx.tableDelta.m = make(map[int64]map[int64]int64) + } + tableMap := ctx.tableDelta.m + colSize := tableMap[physicalTableID] + tableMap[physicalTableID] = cols.UpdateColSizeMap(colSize) +} + +// GetColumnSize returns the colum size map (colID -> deltaSize) for the given table ID. +func (ctx *litTableMutateContext) GetColumnSize(tblID int64) (ret map[int64]int64) { + ctx.tableDelta.Lock() + defer ctx.tableDelta.Unlock() + return maps.Clone(ctx.tableDelta.m[tblID]) +} + +// GetCachedTableSupport implements the `table.MutateContext` interface. +func (*litTableMutateContext) GetCachedTableSupport() (tblctx.CachedTableSupport, bool) { + // lightning import does not support cached table. + return nil, false +} + +func (*litTableMutateContext) GetTemporaryTableSupport() (tblctx.TemporaryTableSupport, bool) { + // lightning import does not support temporary table. + return nil, false +} + +func (*litTableMutateContext) GetExchangePartitionDMLSupport() (tblctx.ExchangePartitionDMLSupport, bool) { + // lightning import is not in a DML query, we do not need to support it. + return nil, false +} + +// newLitTableMutateContext creates a new `*litTableMutateContext` for lightning import. +func newLitTableMutateContext(exprCtx *litExprContext, sysVars map[string]string) (*litTableMutateContext, error) { + intest.AssertNotNil(exprCtx) + sessVars := variable.NewSessionVars(nil) + for k, v := range sysVars { + if err := sessVars.SetSystemVar(k, v); err != nil { + return nil, err + } + } + + return &litTableMutateContext{ + exprCtx: exprCtx, + encodingConfig: tblctx.RowEncodingConfig{ + IsRowLevelChecksumEnabled: sessVars.IsRowLevelChecksumEnabled(), + RowEncoder: &sessVars.RowEncoder, + }, + mutateBuffers: tblctx.NewMutateBuffers(sessVars.GetWriteStmtBufs()), + // Though the row ID is generated by lightning itself, and `GetRowIDShardGenerator` is useless, + // still return a valid object to make the context complete and avoid some potential panic + // if there are some changes in the future. + shardID: variable.NewRowIDShardGenerator( + rand.New(rand.NewSource(time.Now().UnixNano())), // #nosec G404 + int(sessVars.ShardAllocateStep), + ), + enableMutationChecker: sessVars.EnableMutationChecker, + assertionLevel: sessVars.AssertionLevel, + }, nil +} diff --git a/pkg/lightning/backend/kv/context_test.go b/pkg/lightning/backend/kv/context_test.go new file mode 100644 index 0000000000000..bb04a4f3d5bbe --- /dev/null +++ b/pkg/lightning/backend/kv/context_test.go @@ -0,0 +1,315 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kv + +import ( + "strconv" + "strings" + "testing" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/expression/exprctx" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/table/tblctx" + "github.com/pingcap/tidb/pkg/types" + contextutil "github.com/pingcap/tidb/pkg/util/context" + "github.com/pingcap/tidb/pkg/util/rowcodec" + "github.com/pingcap/tidb/pkg/util/timeutil" + "github.com/stretchr/testify/require" +) + +func TestLitExprContext(t *testing.T) { + cases := []struct { + sqlMode mysql.SQLMode + sysVars map[string]string + timestamp int64 + checkFlags types.Flags + checkErrLevel errctx.LevelMap + check func(types.Flags, errctx.LevelMap) + }{ + { + sqlMode: mysql.ModeNone, + timestamp: 1234567, + checkFlags: types.DefaultStmtFlags | types.FlagTruncateAsWarning | types.FlagIgnoreZeroInDateErr, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelWarn + m[errctx.ErrGroupBadNull] = errctx.LevelWarn + m[errctx.ErrGroupNoDefault] = errctx.LevelWarn + m[errctx.ErrGroupDividedByZero] = errctx.LevelIgnore + return m + }(), + sysVars: map[string]string{ + "max_allowed_packet": "10240", + "div_precision_increment": "5", + "time_zone": "Europe/Berlin", + "default_week_format": "2", + "block_encryption_mode": "aes-128-ofb", + "group_concat_max_len": "2048", + }, + }, + { + sqlMode: mysql.ModeStrictTransTables | mysql.ModeNoZeroDate | mysql.ModeNoZeroInDate | + mysql.ModeErrorForDivisionByZero, + checkFlags: types.DefaultStmtFlags, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelError + m[errctx.ErrGroupBadNull] = errctx.LevelError + m[errctx.ErrGroupNoDefault] = errctx.LevelError + m[errctx.ErrGroupDividedByZero] = errctx.LevelError + return m + }(), + }, + { + sqlMode: mysql.ModeNoZeroDate | mysql.ModeNoZeroInDate | mysql.ModeErrorForDivisionByZero, + checkFlags: types.DefaultStmtFlags | types.FlagTruncateAsWarning | types.FlagIgnoreZeroInDateErr, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelWarn + m[errctx.ErrGroupBadNull] = errctx.LevelWarn + m[errctx.ErrGroupNoDefault] = errctx.LevelWarn + m[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + return m + }(), + }, + { + sqlMode: mysql.ModeStrictTransTables | mysql.ModeNoZeroInDate, + checkFlags: types.DefaultStmtFlags | types.FlagIgnoreZeroInDateErr, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelError + m[errctx.ErrGroupBadNull] = errctx.LevelError + m[errctx.ErrGroupNoDefault] = errctx.LevelError + m[errctx.ErrGroupDividedByZero] = errctx.LevelIgnore + return m + }(), + }, + { + sqlMode: mysql.ModeStrictTransTables | mysql.ModeNoZeroDate, + checkFlags: types.DefaultStmtFlags | types.FlagIgnoreZeroInDateErr, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelError + m[errctx.ErrGroupBadNull] = errctx.LevelError + m[errctx.ErrGroupNoDefault] = errctx.LevelError + m[errctx.ErrGroupDividedByZero] = errctx.LevelIgnore + return m + }(), + }, + { + sqlMode: mysql.ModeStrictTransTables | mysql.ModeAllowInvalidDates, + checkFlags: types.DefaultStmtFlags | types.FlagIgnoreZeroInDateErr | types.FlagIgnoreInvalidDateErr, + checkErrLevel: func() errctx.LevelMap { + m := stmtctx.DefaultStmtErrLevels + m[errctx.ErrGroupTruncate] = errctx.LevelError + m[errctx.ErrGroupBadNull] = errctx.LevelError + m[errctx.ErrGroupNoDefault] = errctx.LevelError + m[errctx.ErrGroupDividedByZero] = errctx.LevelIgnore + return m + }(), + }, + } + + for i, c := range cases { + t.Run("case-"+strconv.Itoa(i), func(t *testing.T) { + ctx, err := newLitExprContext(c.sqlMode, c.sysVars, c.timestamp) + require.NoError(t, err) + evalCtx := ctx.GetEvalCtx() + require.Equal(t, c.sqlMode, evalCtx.SQLMode()) + tc, ec := evalCtx.TypeCtx(), evalCtx.ErrCtx() + require.Same(t, evalCtx.Location(), tc.Location()) + require.Equal(t, c.checkFlags, tc.Flags()) + require.Equal(t, c.checkErrLevel, ec.LevelMap()) + + // shares the same warning handler + warns := []contextutil.SQLWarn{ + {Level: contextutil.WarnLevelWarning, Err: errors.New("mockErr1")}, + {Level: contextutil.WarnLevelWarning, Err: errors.New("mockErr2")}, + {Level: contextutil.WarnLevelWarning, Err: errors.New("mockErr3")}, + } + require.Equal(t, 0, evalCtx.WarningCount()) + evalCtx.AppendWarning(warns[0].Err) + tc.AppendWarning(warns[1].Err) + ec.AppendWarning(warns[2].Err) + require.Equal(t, warns, evalCtx.CopyWarnings(nil)) + + // system vars + timeZone := "SYSTEM" + expectedMaxAllowedPacket := variable.DefMaxAllowedPacket + expectedDivPrecisionInc := variable.DefDivPrecisionIncrement + expectedDefaultWeekFormat := variable.DefDefaultWeekFormat + expectedBlockEncryptionMode := variable.DefBlockEncryptionMode + expectedGroupConcatMaxLen := variable.DefGroupConcatMaxLen + for k, v := range c.sysVars { + switch strings.ToLower(k) { + case "time_zone": + timeZone = v + case "max_allowed_packet": + expectedMaxAllowedPacket, err = strconv.ParseUint(v, 10, 64) + case "div_precision_increment": + expectedDivPrecisionInc, err = strconv.Atoi(v) + case "default_week_format": + expectedDefaultWeekFormat = v + case "block_encryption_mode": + expectedBlockEncryptionMode = v + case "group_concat_max_len": + expectedGroupConcatMaxLen, err = strconv.ParseUint(v, 10, 64) + } + require.NoError(t, err) + } + if strings.ToLower(timeZone) == "system" { + require.Same(t, timeutil.SystemLocation(), evalCtx.Location()) + } else { + require.Equal(t, timeZone, evalCtx.Location().String()) + } + require.Equal(t, expectedMaxAllowedPacket, evalCtx.GetMaxAllowedPacket()) + require.Equal(t, expectedDivPrecisionInc, evalCtx.GetDivPrecisionIncrement()) + require.Equal(t, expectedDefaultWeekFormat, evalCtx.GetDefaultWeekFormatMode()) + require.Equal(t, expectedBlockEncryptionMode, ctx.GetBlockEncryptionMode()) + require.Equal(t, expectedGroupConcatMaxLen, ctx.GetGroupConcatMaxLen()) + + now := time.Now() + tm, err := evalCtx.CurrentTime() + require.NoError(t, err) + require.Same(t, evalCtx.Location(), tm.Location()) + if c.timestamp == 0 { + // timestamp == 0 means use the current time. + require.InDelta(t, now.Unix(), tm.Unix(), 2) + } else { + require.Equal(t, c.timestamp*1000000000, tm.UnixNano()) + } + // CurrentTime returns the same value + tm2, err := evalCtx.CurrentTime() + require.NoError(t, err) + require.Equal(t, tm.Nanosecond(), tm2.Nanosecond()) + require.Same(t, tm.Location(), tm2.Location()) + + // currently we don't support optional properties + require.Equal(t, exprctx.OptionalEvalPropKeySet(0), evalCtx.GetOptionalPropSet()) + // not build for plan cache + require.False(t, ctx.IsUseCache()) + // rng not nil + require.NotNil(t, ctx.Rng()) + // ConnectionID + require.Equal(t, uint64(0), ctx.ConnectionID()) + // user vars + userVars := evalCtx.GetUserVarsReader() + _, ok := userVars.GetUserVarVal("a") + require.False(t, ok) + ctx.setUserVarVal("a", types.NewIntDatum(123)) + d, ok := userVars.GetUserVarVal("a") + require.True(t, ok) + require.Equal(t, types.NewIntDatum(123), d) + ctx.unsetUserVar("a") + _, ok = userVars.GetUserVarVal("a") + require.False(t, ok) + }) + } +} + +func TestLitTableMutateContext(t *testing.T) { + exprCtx, err := newLitExprContext(mysql.ModeNone, nil, 0) + require.NoError(t, err) + + checkCommon := func(t *testing.T, tblCtx *litTableMutateContext) { + require.Same(t, exprCtx, tblCtx.GetExprCtx()) + _, ok := tblCtx.AlternativeAllocators(&model.TableInfo{ID: 1}) + require.False(t, ok) + require.Equal(t, uint64(0), tblCtx.ConnectionID()) + require.Equal(t, tblCtx.GetExprCtx().ConnectionID(), tblCtx.ConnectionID()) + require.False(t, tblCtx.InRestrictedSQL()) + require.NotNil(t, tblCtx.GetMutateBuffers()) + require.NotNil(t, tblCtx.GetMutateBuffers().GetWriteStmtBufs()) + alloc, ok := tblCtx.GetReservedRowIDAlloc() + require.True(t, ok) + require.NotNil(t, alloc) + require.Equal(t, &stmtctx.ReservedRowIDAlloc{}, alloc) + require.True(t, alloc.Exhausted()) + _, ok = tblCtx.GetCachedTableSupport() + require.False(t, ok) + _, ok = tblCtx.GetTemporaryTableSupport() + require.False(t, ok) + stats, ok := tblCtx.GetStatisticsSupport() + require.True(t, ok) + // test for `UpdatePhysicalTableDelta` and `GetColumnSize` + stats.UpdatePhysicalTableDelta(123, 5, 2, variable.DeltaColsMap{1: 2, 3: 4}) + r := tblCtx.GetColumnSize(123) + require.Equal(t, map[int64]int64{1: 2, 3: 4}, r) + stats.UpdatePhysicalTableDelta(123, 8, 2, variable.DeltaColsMap{3: 5, 4: 3}) + r = tblCtx.GetColumnSize(123) + require.Equal(t, map[int64]int64{1: 2, 3: 9, 4: 3}, r) + // the result should be a cloned value + r[1] = 100 + require.Equal(t, map[int64]int64{1: 2, 3: 9, 4: 3}, tblCtx.GetColumnSize(123)) + // test gets a non-existed table + require.Empty(t, tblCtx.GetColumnSize(456)) + } + + // test for default + tblCtx, err := newLitTableMutateContext(exprCtx, nil) + require.NoError(t, err) + checkCommon(t, tblCtx) + require.Equal(t, variable.AssertionLevelOff, tblCtx.TxnAssertionLevel()) + require.Equal(t, variable.DefTiDBEnableMutationChecker, tblCtx.EnableMutationChecker()) + require.False(t, tblCtx.EnableMutationChecker()) + require.Equal(t, tblctx.RowEncodingConfig{ + IsRowLevelChecksumEnabled: false, + RowEncoder: &rowcodec.Encoder{Enable: false}, + }, tblCtx.GetRowEncodingConfig()) + g := tblCtx.GetRowIDShardGenerator() + require.NotNil(t, g) + require.Equal(t, variable.DefTiDBShardAllocateStep, g.GetShardStep()) + + // test for load vars + sysVars := map[string]string{ + "tidb_txn_assertion_level": "STRICT", + "tidb_enable_mutation_checker": "ON", + "tidb_row_format_version": "2", + "tidb_shard_allocate_step": "1234567", + } + tblCtx, err = newLitTableMutateContext(exprCtx, sysVars) + require.NoError(t, err) + checkCommon(t, tblCtx) + require.Equal(t, variable.AssertionLevelStrict, tblCtx.TxnAssertionLevel()) + require.True(t, tblCtx.EnableMutationChecker()) + require.Equal(t, tblctx.RowEncodingConfig{ + IsRowLevelChecksumEnabled: false, + RowEncoder: &rowcodec.Encoder{Enable: true}, + }, tblCtx.GetRowEncodingConfig()) + g = tblCtx.GetRowIDShardGenerator() + require.NotNil(t, g) + require.NotEqual(t, variable.DefTiDBShardAllocateStep, g.GetShardStep()) + require.Equal(t, 1234567, g.GetShardStep()) + + // test for `RowEncodingConfig.IsRowLevelChecksumEnabled` which should be loaded from global variable. + require.False(t, variable.EnableRowLevelChecksum.Load()) + defer variable.EnableRowLevelChecksum.Store(false) + variable.EnableRowLevelChecksum.Store(true) + sysVars = map[string]string{ + "tidb_row_format_version": "2", + } + tblCtx, err = newLitTableMutateContext(exprCtx, sysVars) + require.NoError(t, err) + require.Equal(t, tblctx.RowEncodingConfig{ + IsRowLevelChecksumEnabled: true, + RowEncoder: &rowcodec.Encoder{Enable: true}, + }, tblCtx.GetRowEncodingConfig()) +} diff --git a/pkg/lightning/errormanager/errormanager.go b/pkg/lightning/errormanager/errormanager.go new file mode 100644 index 0000000000000..5649bf7be1279 --- /dev/null +++ b/pkg/lightning/errormanager/errormanager.go @@ -0,0 +1,1141 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errormanager + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "math" + "strings" + "sync" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" + "github.com/pingcap/errors" + "github.com/pingcap/tidb/br/pkg/logutil" + "github.com/pingcap/tidb/pkg/lightning/backend/encode" + "github.com/pingcap/tidb/pkg/lightning/backend/kv" + "github.com/pingcap/tidb/pkg/lightning/common" + "github.com/pingcap/tidb/pkg/lightning/config" + "github.com/pingcap/tidb/pkg/lightning/log" + "github.com/pingcap/tidb/pkg/parser/mysql" + tidbtbl "github.com/pingcap/tidb/pkg/table" + "github.com/pingcap/tidb/pkg/table/tables" + "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/redact" + tikverr "github.com/tikv/client-go/v2/error" + "go.uber.org/atomic" + "go.uber.org/multierr" + "go.uber.org/zap" + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" +) + +const ( + createSchema = ` + CREATE SCHEMA IF NOT EXISTS %s; + ` + + syntaxErrorTableName = "syntax_error_v1" + typeErrorTableName = "type_error_v1" + // ConflictErrorTableName is the table name for duplicate detection. + ConflictErrorTableName = "conflict_error_v3" + // DupRecordTableName is the table name to record duplicate data that displayed to user. + DupRecordTableName = "conflict_records" + // ConflictViewName is the view name for presenting the union information of ConflictErrorTable and DupRecordTable. + ConflictViewName = "conflict_view" + + createSyntaxErrorTable = ` + CREATE TABLE IF NOT EXISTS %s.` + syntaxErrorTableName + ` ( + task_id bigint NOT NULL, + create_time datetime(6) NOT NULL DEFAULT now(6), + table_name varchar(261) NOT NULL, + path varchar(2048) NOT NULL, + offset bigint NOT NULL, + error text NOT NULL, + context text + ); + ` + + createTypeErrorTable = ` + CREATE TABLE IF NOT EXISTS %s.` + typeErrorTableName + ` ( + task_id bigint NOT NULL, + create_time datetime(6) NOT NULL DEFAULT now(6), + table_name varchar(261) NOT NULL, + path varchar(2048) NOT NULL, + offset bigint NOT NULL, + error text NOT NULL, + row_data text NOT NULL + ); + ` + + createConflictErrorTable = ` + CREATE TABLE IF NOT EXISTS %s.` + ConflictErrorTableName + ` ( + task_id bigint NOT NULL, + create_time datetime(6) NOT NULL DEFAULT now(6), + table_name varchar(261) NOT NULL, + index_name varchar(128) NOT NULL, + key_data text NOT NULL COMMENT 'decoded from raw_key, human readable only, not for machine use', + row_data text NOT NULL COMMENT 'decoded from raw_row, human readable only, not for machine use', + raw_key mediumblob NOT NULL COMMENT 'the conflicted key', + raw_value mediumblob NOT NULL COMMENT 'the value of the conflicted key', + raw_handle mediumblob NOT NULL COMMENT 'the data handle derived from the conflicted key or value', + raw_row mediumblob NOT NULL COMMENT 'the data retrieved from the handle', + kv_type tinyint(1) NOT NULL COMMENT '0 for index kv, 1 for data kv, 2 for additionally inserted data kv', + INDEX (task_id, table_name), + INDEX (index_name), + INDEX (table_name, index_name), + INDEX (kv_type) + ); + ` + + createDupRecordTableName = ` + CREATE TABLE IF NOT EXISTS %s.` + DupRecordTableName + ` ( + task_id bigint NOT NULL, + create_time datetime(6) NOT NULL DEFAULT now(6), + table_name varchar(261) NOT NULL, + path varchar(2048) NOT NULL, + offset bigint NOT NULL, + error text NOT NULL, + row_id bigint NOT NULL COMMENT 'the row id of the conflicted row', + row_data text NOT NULL COMMENT 'the row data of the conflicted row', + KEY (task_id, table_name) + ); + ` + + createConflictV1View = ` + CREATE OR REPLACE VIEW %s.` + ConflictViewName + ` + AS SELECT 0 AS is_precheck_conflict, task_id, create_time, table_name, index_name, key_data, row_data, + raw_key, raw_value, raw_handle, raw_row, kv_type, NULL AS path, NULL AS offset, NULL AS error, NULL AS row_id + FROM %s.` + ConflictErrorTableName + `; + ` + + createConflictV2View = ` + CREATE OR REPLACE VIEW %s.` + ConflictViewName + ` + AS SELECT 1 AS is_precheck_conflict, task_id, create_time, table_name, NULL AS index_name, NULL AS key_data, + row_data, NULL AS raw_key, NULL AS raw_value, NULL AS raw_handle, NULL AS raw_row, NULL AS kv_type, path, + offset, error, row_id FROM %s.` + DupRecordTableName + `; + ` + + createConflictV1V2View = ` + CREATE OR REPLACE VIEW %s.` + ConflictViewName + ` + AS SELECT 0 AS is_precheck_conflict, task_id, create_time, table_name, index_name, key_data, row_data, + raw_key, raw_value, raw_handle, raw_row, kv_type, NULL AS path, NULL AS offset, NULL AS error, NULL AS row_id + FROM %s.` + ConflictErrorTableName + ` + UNION ALL SELECT 1 AS is_precheck_conflict, task_id, create_time, table_name, NULL AS index_name, NULL AS key_data, + row_data, NULL AS raw_key, NULL AS raw_value, NULL AS raw_handle, NULL AS raw_row, NULL AS kv_type, path, + offset, error, row_id FROM %s.` + DupRecordTableName + `; + ` + + insertIntoTypeError = ` + INSERT INTO %s.` + typeErrorTableName + ` + (task_id, table_name, path, offset, error, row_data) + VALUES (?, ?, ?, ?, ?, ?); + ` + + insertIntoConflictErrorData = ` + INSERT IGNORE INTO %s.` + ConflictErrorTableName + ` + (task_id, table_name, index_name, key_data, row_data, raw_key, raw_value, raw_handle, raw_row, kv_type) + VALUES + ` + + sqlValuesConflictErrorData = "(?,?,'PRIMARY',?,?,?,?,raw_key,raw_value,?)" + + insertIntoConflictErrorIndex = ` + INSERT INTO %s.` + ConflictErrorTableName + ` + (task_id, table_name, index_name, key_data, row_data, raw_key, raw_value, raw_handle, raw_row, kv_type) + VALUES + ` + + sqlValuesConflictErrorIndex = "(?,?,?,?,?,?,?,?,?,?)" + + selectIndexConflictKeysReplace = ` + SELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle + FROM %s.` + ConflictErrorTableName + ` + WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? + ORDER BY _tidb_rowid LIMIT ?; + ` + + selectDataConflictKeysReplace = ` + SELECT _tidb_rowid, raw_key, raw_value + FROM %s.` + ConflictErrorTableName + ` + WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? + ORDER BY _tidb_rowid LIMIT ?; + ` + + deleteNullDataRow = ` + DELETE FROM %s.` + ConflictErrorTableName + ` + WHERE kv_type = 2 + LIMIT ?; + ` + + insertIntoDupRecord = ` + INSERT INTO %s.` + DupRecordTableName + ` + (task_id, table_name, path, offset, error, row_id, row_data) + VALUES (?, ?, ?, ?, ?, ?, ?); + ` +) + +// ErrorManager records errors during the import process. +type ErrorManager struct { + db *sql.DB + taskID int64 + schema string + configError *config.MaxError + remainingError config.MaxError + + configConflict *config.Conflict + conflictErrRemain *atomic.Int64 + conflictRecordsRemain *atomic.Int64 + conflictV1Enabled bool + conflictV2Enabled bool + logger log.Logger + recordErrorOnce *atomic.Bool +} + +// TypeErrorsRemain returns the number of type errors that can be recorded. +func (em *ErrorManager) TypeErrorsRemain() int64 { + return em.remainingError.Type.Load() +} + +// ConflictErrorsRemain returns the number of conflict errors that can be recorded. +func (em *ErrorManager) ConflictErrorsRemain() int64 { + return em.conflictErrRemain.Load() +} + +// ConflictRecordsRemain returns the number of errors that need be recorded. +func (em *ErrorManager) ConflictRecordsRemain() int64 { + return em.conflictRecordsRemain.Load() +} + +// RecordErrorOnce returns if RecordDuplicateOnce has been called. Not that this +// method is not atomic with RecordDuplicateOnce. +func (em *ErrorManager) RecordErrorOnce() bool { + return em.recordErrorOnce.Load() +} + +// New creates a new error manager. +func New(db *sql.DB, cfg *config.Config, logger log.Logger) *ErrorManager { + conflictErrRemain := atomic.NewInt64(cfg.Conflict.Threshold) + conflictRecordsRemain := atomic.NewInt64(cfg.Conflict.MaxRecordRows) + em := &ErrorManager{ + taskID: cfg.TaskID, + configError: &cfg.App.MaxError, + remainingError: cfg.App.MaxError, + conflictV1Enabled: cfg.TikvImporter.Backend == config.BackendLocal && cfg.Conflict.Strategy != config.NoneOnDup, + configConflict: &cfg.Conflict, + conflictErrRemain: conflictErrRemain, + conflictRecordsRemain: conflictRecordsRemain, + logger: logger, + recordErrorOnce: atomic.NewBool(false), + } + switch cfg.TikvImporter.Backend { + case config.BackendLocal: + if cfg.Conflict.PrecheckConflictBeforeImport && cfg.Conflict.Strategy != config.NoneOnDup { + em.conflictV2Enabled = true + } + case config.BackendTiDB: + em.conflictV2Enabled = true + } + if len(cfg.App.TaskInfoSchemaName) != 0 { + em.db = db + em.schema = cfg.App.TaskInfoSchemaName + } + return em +} + +// Init creates the schemas and tables to store the task information. +func (em *ErrorManager) Init(ctx context.Context) error { + if em.db == nil { + return nil + } + + exec := common.SQLWithRetry{ + DB: em.db, + Logger: em.logger, + } + + sqls := make([][2]string, 0) + sqls = append(sqls, [2]string{"create task info schema", createSchema}) + if em.remainingError.Syntax.Load() > 0 { + sqls = append(sqls, [2]string{"create syntax error table", createSyntaxErrorTable}) + } + if em.remainingError.Type.Load() > 0 { + sqls = append(sqls, [2]string{"create type error table", createTypeErrorTable}) + } + if em.conflictV1Enabled { + sqls = append(sqls, [2]string{"create conflict error table", createConflictErrorTable}) + } + if em.conflictV2Enabled { + sqls = append(sqls, [2]string{"create duplicate records table", createDupRecordTableName}) + } + + // No need to create task info schema if no error is allowed. + if len(sqls) == 1 { + return nil + } + + for _, sql := range sqls { + // trim spaces for unit test pattern matching + err := exec.Exec(ctx, sql[0], strings.TrimSpace(common.SprintfWithIdentifiers(sql[1], em.schema))) + if err != nil { + return err + } + } + + if em.conflictV1Enabled && em.conflictV2Enabled { + err := exec.Exec(ctx, "create conflict view", strings.TrimSpace(common.SprintfWithIdentifiers(createConflictV1V2View, em.schema, em.schema, em.schema))) + if err != nil { + return err + } + } else if em.conflictV1Enabled { + err := exec.Exec(ctx, "create conflict view", strings.TrimSpace(common.SprintfWithIdentifiers(createConflictV1View, em.schema, em.schema))) + if err != nil { + return err + } + } else if em.conflictV2Enabled { + err := exec.Exec(ctx, "create conflict view", strings.TrimSpace(common.SprintfWithIdentifiers(createConflictV2View, em.schema, em.schema))) + if err != nil { + return err + } + } + + return nil +} + +// RecordTypeError records a type error. +// If the number of recorded type errors exceed the max-error count, also returns `err` directly. +func (em *ErrorManager) RecordTypeError( + ctx context.Context, + logger log.Logger, + tableName string, + path string, + offset int64, + rowText string, + encodeErr error, +) error { + // elide the encode error if needed. + if em.remainingError.Type.Dec() < 0 { + threshold := em.configError.Type.Load() + if threshold > 0 { + encodeErr = errors.Annotatef(encodeErr, + "The number of type errors exceeds the threshold configured by `max-error.type`: '%d'", + em.configError.Type.Load()) + } + return encodeErr + } + + if em.db != nil { + errMsg := encodeErr.Error() + logger = logger.With( + zap.Int64("offset", offset), + zap.String("row", redact.Value(rowText)), + zap.String("message", errMsg)) + + // put it into the database. + exec := common.SQLWithRetry{ + DB: em.db, + Logger: logger, + HideQueryLog: redact.NeedRedact(), + } + if err := exec.Exec(ctx, "insert type error record", + common.SprintfWithIdentifiers(insertIntoTypeError, em.schema), + em.taskID, + tableName, + path, + offset, + errMsg, + rowText, + ); err != nil { + return multierr.Append(encodeErr, err) + } + } + return nil +} + +// DataConflictInfo is the information of a data conflict error. +type DataConflictInfo struct { + RawKey []byte + RawValue []byte + KeyData string + Row string +} + +// RecordDataConflictError records a data conflict error. +func (em *ErrorManager) RecordDataConflictError( + ctx context.Context, + logger log.Logger, + tableName string, + conflictInfos []DataConflictInfo, +) error { + var gerr error + if len(conflictInfos) == 0 { + return nil + } + + if em.conflictErrRemain.Sub(int64(len(conflictInfos))) < 0 { + threshold := em.configConflict.Threshold + // Still need to record this batch of conflict records, and then return this error at last. + // Otherwise, if the max-error.conflict is set a very small value, none of the conflict errors will be recorded + gerr = errors.Errorf( + "The number of conflict errors exceeds the threshold configured by `conflict.threshold`: '%d'", + threshold) + } + + if em.db == nil { + return gerr + } + + exec := common.SQLWithRetry{ + DB: em.db, + Logger: logger, + HideQueryLog: redact.NeedRedact(), + } + if err := exec.Transact(ctx, "insert data conflict error record", func(c context.Context, txn *sql.Tx) error { + sb := &strings.Builder{} + _, err := common.FprintfWithIdentifiers(sb, insertIntoConflictErrorData, em.schema) + if err != nil { + return err + } + var sqlArgs []any + for i, conflictInfo := range conflictInfos { + if i > 0 { + sb.WriteByte(',') + } + sb.WriteString(sqlValuesConflictErrorData) + sqlArgs = append(sqlArgs, + em.taskID, + tableName, + conflictInfo.KeyData, + conflictInfo.Row, + conflictInfo.RawKey, + conflictInfo.RawValue, + tablecodec.IsRecordKey(conflictInfo.RawKey), + ) + } + _, err = txn.ExecContext(c, sb.String(), sqlArgs...) + return err + }); err != nil { + gerr = err + } + return gerr +} + +// RecordIndexConflictError records a index conflict error. +func (em *ErrorManager) RecordIndexConflictError( + ctx context.Context, + logger log.Logger, + tableName string, + indexNames []string, + conflictInfos []DataConflictInfo, + rawHandles, rawRows [][]byte, +) error { + var gerr error + if len(conflictInfos) == 0 { + return nil + } + + if em.conflictErrRemain.Sub(int64(len(conflictInfos))) < 0 { + threshold := em.configConflict.Threshold + // Still need to record this batch of conflict records, and then return this error at last. + // Otherwise, if the max-error.conflict is set a very small value, non of the conflict errors will be recorded + gerr = errors.Errorf( + "The number of conflict errors exceeds the threshold configured by `conflict.threshold`: '%d'", + threshold) + } + + if em.db == nil { + return gerr + } + + exec := common.SQLWithRetry{ + DB: em.db, + Logger: logger, + HideQueryLog: redact.NeedRedact(), + } + if err := exec.Transact(ctx, "insert index conflict error record", func(c context.Context, txn *sql.Tx) error { + sb := &strings.Builder{} + _, err := common.FprintfWithIdentifiers(sb, insertIntoConflictErrorIndex, em.schema) + if err != nil { + return err + } + var sqlArgs []any + for i, conflictInfo := range conflictInfos { + if i > 0 { + sb.WriteByte(',') + } + sb.WriteString(sqlValuesConflictErrorIndex) + sqlArgs = append(sqlArgs, + em.taskID, + tableName, + indexNames[i], + conflictInfo.KeyData, + conflictInfo.Row, + conflictInfo.RawKey, + conflictInfo.RawValue, + rawHandles[i], + rawRows[i], + tablecodec.IsRecordKey(conflictInfo.RawKey), + ) + } + _, err = txn.ExecContext(c, sb.String(), sqlArgs...) + return err + }); err != nil { + gerr = err + } + return gerr +} + +// ReplaceConflictKeys query all conflicting rows (handle and their +// values) from the current error report and resolve them +// by replacing the necessary rows and reserving the others. +func (em *ErrorManager) ReplaceConflictKeys( + ctx context.Context, + tbl tidbtbl.Table, + tableName string, + pool *util.WorkerPool, + fnGetLatest func(ctx context.Context, key []byte) ([]byte, error), + fnDeleteKeys func(ctx context.Context, key [][]byte) error, +) error { + if em.db == nil { + return nil + } + + exec := common.SQLWithRetry{ + DB: em.db, + Logger: em.logger, + HideQueryLog: redact.NeedRedact(), + } + + const rowLimit = 1000 + indexTaskCh := make(chan [2]int64) + indexTaskWg := &sync.WaitGroup{} + indexG, indexGCtx := errgroup.WithContext(ctx) + + go func() { + //nolint:staticcheck + //lint:ignore SA2000 + indexTaskWg.Add(1) + indexTaskCh <- [2]int64{0, math.MaxInt64} + indexTaskWg.Wait() + close(indexTaskCh) + }() + + // TODO: provide a detailed document to explain the algorithm and link it here + // demo for "replace" algorithm: https://github.com/lyzx2001/tidb-conflict-replace + // check index KV + for t := range indexTaskCh { + start, end := t[0], t[1] + pool.ApplyOnErrorGroup(indexG, func() error { + defer indexTaskWg.Done() + + sessionOpts := encode.SessionOptions{ + // TODO: need to find the correct value for SQLMode + SQLMode: mysql.ModeStrictAllTables, + } + encoder, err := kv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: em.logger, + }) + if err != nil { + return errors.Trace(err) + } + + var handleKeys [][]byte + var insertRows [][2][]byte + for start < end { + indexKvRows, err := em.db.QueryContext( + indexGCtx, common.SprintfWithIdentifiers(selectIndexConflictKeysReplace, em.schema), + tableName, start, end, rowLimit) + if err != nil { + return errors.Trace(err) + } + + var lastRowID int64 + for indexKvRows.Next() { + var rawKey, rawValue, rawHandle []byte + var indexName string + if err := indexKvRows.Scan(&lastRowID, &rawKey, &indexName, &rawValue, &rawHandle); err != nil { + return errors.Trace(err) + } + em.logger.Debug("got raw_key, index_name, raw_value, raw_handle from table", + zap.Binary("raw_key", rawKey), + zap.String("index_name", indexName), + zap.Binary("raw_value", rawValue), + zap.Binary("raw_handle", rawHandle)) + + // get the latest value of rawKey from downstream TiDB + latestValue, err := fnGetLatest(indexGCtx, rawKey) + if tikverr.IsErrNotFound(err) { + continue + } + if err != nil { + return errors.Trace(err) + } + + // if the latest value of rawKey equals to rawValue, that means this index KV is maintained in downstream TiDB + // if not, that means this index KV has been overwritten, and its corresponding data KV needs to be deleted + if bytes.Equal(rawValue, latestValue) { + continue + } + + // rawHandle is the row key of the data KV that needs to be deleted + // get the latest value of the row key of the data KV that needs to be deleted + overwritten, err := fnGetLatest(indexGCtx, rawHandle) + // if the latest value cannot be found, that means the data KV has been deleted + if tikverr.IsErrNotFound(err) { + continue + } + if err != nil { + return errors.Trace(err) + } + + overwrittenHandle, err := tablecodec.DecodeRowKey(rawHandle) + if err != nil { + return errors.Trace(err) + } + decodedData, _, err := tables.DecodeRawRowData(encoder.SessionCtx.GetExprCtx(), + tbl.Meta(), overwrittenHandle, tbl.Cols(), overwritten) + if err != nil { + return errors.Trace(err) + } + if !tbl.Meta().HasClusteredIndex() { + // for nonclustered PK, need to append handle to decodedData for AddRecord + decodedData = append(decodedData, types.NewIntDatum(overwrittenHandle.IntValue())) + } + _, err = encoder.AddRecord(decodedData) + if err != nil { + return errors.Trace(err) + } + + // find out all the KV pairs that are contained in the data KV + kvPairs := encoder.SessionCtx.TakeKvPairs() + + for _, kvPair := range kvPairs.Pairs { + em.logger.Debug("got encoded KV", + logutil.Key("key", kvPair.Key), + zap.Binary("value", kvPair.Val), + logutil.Key("rawKey", rawKey), + zap.Binary("rawValue", rawValue)) + + // If rawKey equals to KV pair's key and rawValue equals to KV pair's value, + // this latest data KV of the index KV needs to be deleted; + // if not, this latest data KV of the index KV was inserted by other rows, + // so it is unrelated to the index KV that needs to be deleted, we cannot delete it. + + // An example is: + // (pk, uk) + // (1, a) + // (1, b) + // (2, a) + + // (1, a) is overwritten by (2, a). We found a->1 is an overwritten index KV, + // and we are considering if its data KV with key "1" can be deleted. + // We got the latest value of key "1" which is (1, b), + // and encode it to get all KV pairs which is [1->b, b->1]. + // Only if there is a->1 we dare to delete data KV with key "1". + + if bytes.Equal(kvPair.Key, rawKey) && bytes.Equal(kvPair.Val, rawValue) { + handleKeys = append(handleKeys, rawHandle) + var insertRow [2][]byte + insertRow[0] = rawHandle + insertRow[1] = overwritten + insertRows = append(insertRows, insertRow) + break + } + } + } + if err := indexKvRows.Err(); err != nil { + _ = indexKvRows.Close() + return errors.Trace(err) + } + if err := indexKvRows.Close(); err != nil { + return errors.Trace(err) + } + if len(handleKeys) == 0 { + break + } + if err := fnDeleteKeys(indexGCtx, handleKeys); err != nil { + return errors.Trace(err) + } + if err := exec.Transact(ctx, "insert data conflict record for conflict detection 'replace' mode", + func(c context.Context, txn *sql.Tx) error { + sb := &strings.Builder{} + _, err2 := common.FprintfWithIdentifiers(sb, insertIntoConflictErrorData, em.schema) + if err2 != nil { + return errors.Trace(err2) + } + var sqlArgs []any + for i, insertRow := range insertRows { + if i > 0 { + sb.WriteByte(',') + } + sb.WriteString(sqlValuesConflictErrorData) + sqlArgs = append(sqlArgs, + em.taskID, + tableName, + nil, + nil, + insertRow[0], + insertRow[1], + 2, + ) + } + _, err := txn.ExecContext(c, sb.String(), sqlArgs...) + return errors.Trace(err) + }); err != nil { + return errors.Trace(err) + } + start = lastRowID + 1 + // If the remaining tasks cannot be processed at once, split the task + // into two subtasks and send one of them to the other idle worker if possible. + if end-start > rowLimit { + mid := start + (end-start)/2 + indexTaskWg.Add(1) + select { + case indexTaskCh <- [2]int64{mid, end}: + end = mid + default: + indexTaskWg.Done() + } + } + handleKeys = handleKeys[:0] + } + return nil + }) + } + if err := indexG.Wait(); err != nil { + return errors.Trace(err) + } + + dataTaskCh := make(chan [2]int64) + dataTaskWg := &sync.WaitGroup{} + dataG, dataGCtx := errgroup.WithContext(ctx) + + go func() { + //nolint:staticcheck + //lint:ignore SA2000 + dataTaskWg.Add(1) + dataTaskCh <- [2]int64{0, math.MaxInt64} + dataTaskWg.Wait() + close(dataTaskCh) + }() + + // check data KV + for t := range dataTaskCh { + start, end := t[0], t[1] + pool.ApplyOnErrorGroup(dataG, func() error { + defer dataTaskWg.Done() + + sessionOpts := encode.SessionOptions{ + // TODO: need to find the correct value for SQLMode + SQLMode: mysql.ModeStrictAllTables, + } + encoder, err := kv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: em.logger, + }) + if err != nil { + return errors.Trace(err) + } + + var handleKeys [][]byte + for start < end { + dataKvRows, err := em.db.QueryContext( + dataGCtx, common.SprintfWithIdentifiers(selectDataConflictKeysReplace, em.schema), + tableName, start, end, rowLimit) + if err != nil { + return errors.Trace(err) + } + + var lastRowID int64 + var previousRawKey, latestValue []byte + var mustKeepKvPairs *kv.Pairs + + for dataKvRows.Next() { + var rawKey, rawValue []byte + if err := dataKvRows.Scan(&lastRowID, &rawKey, &rawValue); err != nil { + return errors.Trace(err) + } + em.logger.Debug("got group raw_key, raw_value from table", + logutil.Key("raw_key", rawKey), + zap.Binary("raw_value", rawValue)) + + if !bytes.Equal(rawKey, previousRawKey) { + previousRawKey = rawKey + // get the latest value of rawKey from downstream TiDB + latestValue, err = fnGetLatest(dataGCtx, rawKey) + if err != nil && !tikverr.IsErrNotFound(err) { + return errors.Trace(err) + } + if latestValue != nil { + handle, err := tablecodec.DecodeRowKey(rawKey) + if err != nil { + return errors.Trace(err) + } + decodedData, _, err := tables.DecodeRawRowData(encoder.SessionCtx.GetExprCtx(), + tbl.Meta(), handle, tbl.Cols(), latestValue) + if err != nil { + return errors.Trace(err) + } + if !tbl.Meta().HasClusteredIndex() { + // for nonclustered PK, need to append handle to decodedData for AddRecord + decodedData = append(decodedData, types.NewIntDatum(handle.IntValue())) + } + _, err = encoder.AddRecord(decodedData) + if err != nil { + return errors.Trace(err) + } + // calculate the new mustKeepKvPairs corresponding to the new rawKey + // find out all the KV pairs that are contained in the data KV + mustKeepKvPairs = encoder.SessionCtx.TakeKvPairs() + } + } + + // if the latest value of rawKey equals to rawValue, that means this data KV is maintained in downstream TiDB + // if not, that means this data KV has been deleted due to overwritten index KV + if bytes.Equal(rawValue, latestValue) { + continue + } + + handle, err := tablecodec.DecodeRowKey(rawKey) + if err != nil { + return errors.Trace(err) + } + decodedData, _, err := tables.DecodeRawRowData(encoder.SessionCtx.GetExprCtx(), + tbl.Meta(), handle, tbl.Cols(), rawValue) + if err != nil { + return errors.Trace(err) + } + if !tbl.Meta().HasClusteredIndex() { + // for nonclustered PK, need to append handle to decodedData for AddRecord + decodedData = append(decodedData, types.NewIntDatum(handle.IntValue())) + } + _, err = encoder.AddRecord(decodedData) + if err != nil { + return errors.Trace(err) + } + + // find out all the KV pairs that are contained in the data KV + kvPairs := encoder.SessionCtx.TakeKvPairs() + for _, kvPair := range kvPairs.Pairs { + em.logger.Debug("got encoded KV", + logutil.Key("key", kvPair.Key), + zap.Binary("value", kvPair.Val)) + kvLatestValue, err := fnGetLatest(dataGCtx, kvPair.Key) + if tikverr.IsErrNotFound(err) { + continue + } + if err != nil { + return errors.Trace(err) + } + + // if the value of the KV pair is not equal to the latest value of the key of the KV pair + // that means the value of the KV pair has been overwritten, so it needs no extra operation + if !bytes.Equal(kvLatestValue, kvPair.Val) { + continue + } + + // if the KV pair is contained in mustKeepKvPairs, we cannot delete it + // if not, delete the KV pair + if mustKeepKvPairs != nil { + isContained := slices.ContainsFunc(mustKeepKvPairs.Pairs, func(mustKeepKvPair common.KvPair) bool { + return bytes.Equal(mustKeepKvPair.Key, kvPair.Key) && bytes.Equal(mustKeepKvPair.Val, kvPair.Val) + }) + if isContained { + continue + } + } + + handleKeys = append(handleKeys, kvPair.Key) + } + } + if err := dataKvRows.Err(); err != nil { + _ = dataKvRows.Close() + return errors.Trace(err) + } + if err := dataKvRows.Close(); err != nil { + return errors.Trace(err) + } + if len(handleKeys) == 0 { + break + } + if err := fnDeleteKeys(dataGCtx, handleKeys); err != nil { + return errors.Trace(err) + } + start = lastRowID + 1 + // If the remaining tasks cannot be processed at once, split the task + // into two subtasks and send one of them to the other idle worker if possible. + if end-start > rowLimit { + mid := start + (end-start)/2 + dataTaskWg.Add(1) + select { + case dataTaskCh <- [2]int64{mid, end}: + end = mid + default: + dataTaskWg.Done() + } + } + handleKeys = handleKeys[:0] + } + return nil + }) + } + if err := dataG.Wait(); err != nil { + return errors.Trace(err) + } + + hasRow := true + for { + // delete the additionally inserted rows for nonclustered PK + if err := exec.Transact(ctx, "delete additionally inserted rows for conflict detection 'replace' mode", + func(c context.Context, txn *sql.Tx) error { + sb := &strings.Builder{} + _, err2 := common.FprintfWithIdentifiers(sb, deleteNullDataRow, em.schema) + if err2 != nil { + return errors.Trace(err2) + } + result, err := txn.ExecContext(c, sb.String(), rowLimit) + if err != nil { + return errors.Trace(err) + } + affected, err := result.RowsAffected() + if err != nil { + return errors.Trace(err) + } + if affected == 0 { + hasRow = false + } + return nil + }); err != nil { + return errors.Trace(err) + } + if !hasRow { + break + } + } + + return nil +} + +// RecordDuplicateCount reduce the counter of "duplicate entry" errors. +// Currently, the count will not be shared for multiple lightning instances. +func (em *ErrorManager) RecordDuplicateCount(cnt int64) error { + if em.conflictErrRemain.Sub(cnt) < 0 { + threshold := em.configConflict.Threshold + return errors.Errorf( + "The number of conflict errors exceeds the threshold configured by `conflict.threshold`: '%d'", + threshold) + } + return nil +} + +// RecordDuplicate records a "duplicate entry" error so user can query them later. +// Currently, the error will not be shared for multiple lightning instances. +func (em *ErrorManager) RecordDuplicate( + ctx context.Context, + logger log.Logger, + tableName string, + path string, + offset int64, + errMsg string, + rowID int64, + rowData string, +) error { + if em.conflictErrRemain.Dec() < 0 { + threshold := em.configConflict.Threshold + return errors.Errorf( + "The number of conflict errors exceeds the threshold configured by `conflict.threshold`: '%d'", + threshold) + } + if em.db == nil { + return nil + } + if em.conflictRecordsRemain.Add(-1) < 0 { + return nil + } + + return em.recordDuplicate(ctx, logger, tableName, path, offset, errMsg, rowID, rowData) +} + +func (em *ErrorManager) recordDuplicate( + ctx context.Context, + logger log.Logger, + tableName string, + path string, + offset int64, + errMsg string, + rowID int64, + rowData string, +) error { + exec := common.SQLWithRetry{ + DB: em.db, + Logger: logger, + HideQueryLog: redact.NeedRedact(), + } + return exec.Exec(ctx, "insert duplicate record", + common.SprintfWithIdentifiers(insertIntoDupRecord, em.schema), + em.taskID, + tableName, + path, + offset, + errMsg, + rowID, + rowData, + ) +} + +// RecordDuplicateOnce records a "duplicate entry" error so user can query them later. +// Currently the error will not be shared for multiple lightning instances. +// Different from RecordDuplicate, this function is used when conflict.strategy +// is "error" and will only write the first conflict error to the table. +func (em *ErrorManager) RecordDuplicateOnce( + ctx context.Context, + logger log.Logger, + tableName string, + path string, + offset int64, + errMsg string, + rowID int64, + rowData string, +) { + ok := em.recordErrorOnce.CompareAndSwap(false, true) + if !ok { + return + } + err := em.recordDuplicate(ctx, logger, tableName, path, offset, errMsg, rowID, rowData) + if err != nil { + logger.Warn("meet error when record duplicate entry error", zap.Error(err)) + } +} + +func (em *ErrorManager) errorCount(typeVal func(*config.MaxError) int64) int64 { + cfgVal := typeVal(em.configError) + val := typeVal(&em.remainingError) + if val < 0 { + val = 0 + } + return cfgVal - val +} + +func (em *ErrorManager) typeErrors() int64 { + return em.errorCount(func(maxError *config.MaxError) int64 { + return maxError.Type.Load() + }) +} + +func (em *ErrorManager) syntaxError() int64 { + return em.errorCount(func(maxError *config.MaxError) int64 { + return maxError.Syntax.Load() + }) +} + +func (em *ErrorManager) conflictError() int64 { + val := em.conflictErrRemain.Load() + if val < 0 { + val = 0 + } + return em.configConflict.Threshold - val +} + +func (em *ErrorManager) charsetError() int64 { + return em.errorCount(func(maxError *config.MaxError) int64 { + return maxError.Charset.Load() + }) +} + +// HasError returns true if any error type has reached the limit +func (em *ErrorManager) HasError() bool { + return em.typeErrors() > 0 || em.syntaxError() > 0 || + em.charsetError() > 0 || em.conflictError() > 0 +} + +// LogErrorDetails return a slice of zap.Field for each error type +func (em *ErrorManager) LogErrorDetails() { + fmtErrMsg := func(cnt int64, errType, tblName string) string { + return fmt.Sprintf("Detect %d %s errors in total, please refer to table %s for more details", + cnt, errType, em.fmtTableName(tblName)) + } + if errCnt := em.typeErrors(); errCnt > 0 { + em.logger.Warn(fmtErrMsg(errCnt, "data type", typeErrorTableName)) + } + if errCnt := em.syntaxError(); errCnt > 0 { + em.logger.Warn(fmtErrMsg(errCnt, "data syntax", syntaxErrorTableName)) + } + if errCnt := em.charsetError(); errCnt > 0 { + // TODO: add charset table name + em.logger.Warn(fmtErrMsg(errCnt, "data charset", "")) + } + errCnt := em.conflictError() + if errCnt > 0 && (em.conflictV1Enabled || em.conflictV2Enabled) { + em.logger.Warn(fmtErrMsg(errCnt, "conflict", ConflictViewName)) + } +} + +func (em *ErrorManager) fmtTableName(t string) string { + return common.UniqueTable(em.schema, t) +} + +// Output renders a table which contains error summery for each error type. +func (em *ErrorManager) Output() string { + if !em.HasError() { + return "" + } + + t := table.NewWriter() + t.AppendHeader(table.Row{"#", "Error Type", "Error Count", "Error Data Table"}) + t.SetColumnConfigs([]table.ColumnConfig{ + {Name: "#", WidthMax: 6}, + {Name: "Error Type", WidthMax: 20}, + {Name: "Error Count", WidthMax: 12}, + {Name: "Error Data Table", WidthMax: 42}, + }) + t.SetRowPainter(func(table.Row) text.Colors { + return text.Colors{text.FgRed} + }) + + count := 0 + if errCnt := em.typeErrors(); errCnt > 0 { + count++ + t.AppendRow(table.Row{count, "Data Type", errCnt, em.fmtTableName(typeErrorTableName)}) + } + if errCnt := em.syntaxError(); errCnt > 0 { + count++ + t.AppendRow(table.Row{count, "Data Syntax", errCnt, em.fmtTableName(syntaxErrorTableName)}) + } + if errCnt := em.charsetError(); errCnt > 0 { + count++ + // do not support record charset error now. + t.AppendRow(table.Row{count, "Charset Error", errCnt, ""}) + } + if errCnt := em.conflictError(); errCnt > 0 { + count++ + if em.conflictV1Enabled || em.conflictV2Enabled { + t.AppendRow(table.Row{count, "Unique Key Conflict", errCnt, em.fmtTableName(ConflictViewName)}) + } + } + + res := "\nImport Data Error Summary: \n" + res += t.Render() + res += "\n" + + return res +} diff --git a/pkg/lightning/errormanager/errormanager_test.go b/pkg/lightning/errormanager/errormanager_test.go new file mode 100644 index 0000000000000..0dc0c2368912d --- /dev/null +++ b/pkg/lightning/errormanager/errormanager_test.go @@ -0,0 +1,710 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errormanager + +import ( + "bytes" + "context" + "database/sql/driver" + "fmt" + "io" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/pingcap/tidb/pkg/lightning/backend/encode" + tidbkv "github.com/pingcap/tidb/pkg/lightning/backend/kv" + "github.com/pingcap/tidb/pkg/lightning/config" + "github.com/pingcap/tidb/pkg/lightning/log" + "github.com/pingcap/tidb/pkg/meta/model" + pmodel "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/table/tables" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" +) + +func TestInit(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + + cfg := config.NewConfig() + cfg.TikvImporter.Backend = config.BackendLocal + cfg.TikvImporter.DuplicateResolution = config.NoneOnDup + cfg.Conflict.Strategy = config.ReplaceOnDup + cfg.Conflict.PrecheckConflictBeforeImport = true + cfg.App.MaxError.Type.Store(10) + cfg.Conflict.Threshold = 20 + cfg.App.TaskInfoSchemaName = "lightning_errors" + + em := New(db, cfg, log.L()) + require.True(t, em.conflictV1Enabled) + require.True(t, em.conflictV2Enabled) + require.Equal(t, cfg.App.MaxError.Type.Load(), em.remainingError.Type.Load()) + require.Equal(t, cfg.Conflict.Threshold, em.conflictErrRemain.Load()) + + em.remainingError.Type.Store(0) + em.conflictV1Enabled = false + em.conflictV2Enabled = false + ctx := context.Background() + err = em.Init(ctx) + require.NoError(t, err) + + em.conflictV1Enabled = true + mock.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_errors`;"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_errors`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + mock.ExpectExec("CREATE OR REPLACE VIEW `lightning_errors`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + err = em.Init(ctx) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) + + em.conflictV2Enabled = true + em.remainingError.Type.Store(1) + mock.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_errors`.*"). + WillReturnResult(sqlmock.NewResult(5, 1)) + mock.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_errors`\\.type_error_v1.*"). + WillReturnResult(sqlmock.NewResult(6, 1)) + mock.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_errors`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(7, 1)) + mock.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_errors`\\.conflict_records.*"). + WillReturnResult(sqlmock.NewResult(7, 1)) + mock.ExpectExec("CREATE OR REPLACE VIEW `lightning_errors`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(7, 1)) + err = em.Init(ctx) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +type mockDriver struct { + driver.Driver + totalRows int64 +} + +func (m mockDriver) Open(_ string) (driver.Conn, error) { + return mockConn{totalRows: m.totalRows}, nil +} + +type mockConn struct { + driver.Conn + driver.ExecerContext + driver.QueryerContext + totalRows int64 +} + +func (c mockConn) ExecContext(_ context.Context, _ string, _ []driver.NamedValue) (driver.Result, error) { + return sqlmock.NewResult(1, 1), nil +} + +func (mockConn) Close() error { return nil } + +type mockRows struct { + driver.Rows + start int64 + end int64 +} + +func (r *mockRows) Columns() []string { + return []string{"_tidb_rowid", "raw_handle", "raw_row"} +} + +func (r *mockRows) Close() error { return nil } + +func (r *mockRows) Next(dest []driver.Value) error { + if r.start >= r.end { + return io.EOF + } + dest[0] = r.start // _tidb_rowid + dest[1] = []byte{} // raw_handle + dest[2] = []byte{} // raw_row + r.start++ + return nil +} + +func (c mockConn) QueryContext(_ context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + expectedQuery := "SELECT _tidb_rowid, raw_handle, raw_row.*" + if err := sqlmock.QueryMatcherRegexp.Match(expectedQuery, query); err != nil { + return &mockRows{}, nil + } + if len(args) != 4 { + return &mockRows{}, nil + } + // args are tableName, start, end, and limit. + start := args[1].Value.(int64) + if start < 1 { + start = 1 + } + end := args[2].Value.(int64) + if end > c.totalRows+1 { + end = c.totalRows + 1 + } + limit := args[3].Value.(int64) + if start+limit < end { + end = start + limit + } + return &mockRows{start: start, end: end}, nil +} + +func TestReplaceConflictOneKey(t *testing.T) { + column1 := &model.ColumnInfo{ + ID: 1, + Name: pmodel.NewCIStr("a"), + Offset: 0, + DefaultValue: 0, + FieldType: *types.NewFieldType(mysql.TypeLong), + Hidden: true, + State: model.StatePublic, + } + column1.AddFlag(mysql.PriKeyFlag) + + column2 := &model.ColumnInfo{ + ID: 2, + Name: pmodel.NewCIStr("b"), + Offset: 1, + DefaultValue: 0, + FieldType: *types.NewFieldType(mysql.TypeLong), + Hidden: true, + State: model.StatePublic, + } + + column3 := &model.ColumnInfo{ + ID: 3, + Name: pmodel.NewCIStr("c"), + Offset: 2, + DefaultValue: 0, + FieldType: *types.NewFieldType(mysql.TypeBlob), + Hidden: true, + State: model.StatePublic, + } + + index := &model.IndexInfo{ + ID: 1, + Name: pmodel.NewCIStr("key_b"), + Table: pmodel.NewCIStr(""), + Columns: []*model.IndexColumn{ + { + Name: pmodel.NewCIStr("b"), + Offset: 1, + Length: -1, + }}, + Unique: false, + Primary: false, + State: model.StatePublic, + } + + table := &model.TableInfo{ + ID: 104, + Name: pmodel.NewCIStr("a"), + Charset: "utf8mb4", + Collate: "utf8mb4_bin", + Columns: []*model.ColumnInfo{column1, column2, column3}, + Indices: []*model.IndexInfo{index}, + PKIsHandle: true, + State: model.StatePublic, + } + + tbl, err := tables.TableFromMeta(tidbkv.NewPanickingAllocators(table.SepAutoInc()), table) + require.NoError(t, err) + + sessionOpts := encode.SessionOptions{ + SQLMode: mysql.ModeStrictAllTables, + Timestamp: 1234567890, + } + + encoder, err := tidbkv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: log.L(), + }) + require.NoError(t, err) + encoder.SessionCtx.GetTableCtx().GetRowEncodingConfig().RowEncoder.Enable = true + + data1 := []types.Datum{ + types.NewIntDatum(1), + types.NewIntDatum(6), + types.NewStringDatum("1.csv"), + } + data2 := []types.Datum{ + types.NewIntDatum(2), + types.NewIntDatum(6), + types.NewStringDatum("2.csv"), + } + data3 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + } + data4 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(4), + types.NewStringDatum("4.csv"), + } + data5 := []types.Datum{ + types.NewIntDatum(5), + types.NewIntDatum(4), + types.NewStringDatum("5.csv"), + } + _, err = encoder.AddRecord(data1) + require.NoError(t, err) + _, err = encoder.AddRecord(data2) + require.NoError(t, err) + _, err = encoder.AddRecord(data3) + require.NoError(t, err) + _, err = encoder.AddRecord(data4) + require.NoError(t, err) + _, err = encoder.AddRecord(data5) + require.NoError(t, err) + kvPairs := encoder.SessionCtx.TakeKvPairs() + + data1IndexKey := kvPairs.Pairs[7].Key + data1IndexValue := kvPairs.Pairs[7].Val + data1RowKey := kvPairs.Pairs[4].Key + data1RowValue := kvPairs.Pairs[4].Val + data2RowValue := kvPairs.Pairs[6].Val + + db, mockDB, err := sqlmock.New() + require.NoError(t, err) + defer func() { + _ = db.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockDB.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_task_info`"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockDB.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + mockDB.ExpectExec("CREATE OR REPLACE VIEW `lightning_task_info`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"})) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"}). + AddRow(1, data1RowKey, data1RowValue). + AddRow(2, data1RowKey, data2RowValue)) + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"})) + } + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mockDB.ExpectCommit() + + cfg := config.NewConfig() + cfg.Conflict.Strategy = config.ReplaceOnDup + cfg.TikvImporter.Backend = config.BackendLocal + cfg.App.TaskInfoSchemaName = "lightning_task_info" + em := New(db, cfg, log.L()) + err = em.Init(ctx) + require.NoError(t, err) + + fnGetLatestCount := atomic.NewInt32(0) + fnDeleteKeyCount := atomic.NewInt32(0) + pool := util.NewWorkerPool(16, "resolve duplicate rows by replace") + err = em.ReplaceConflictKeys( + ctx, tbl, "test", pool, + func(ctx context.Context, key []byte) ([]byte, error) { + fnGetLatestCount.Add(1) + switch { + case bytes.Equal(key, data1IndexKey): + return data1IndexValue, nil + case bytes.Equal(key, data1RowKey): + return data1RowValue, nil + default: + return nil, fmt.Errorf("key %v is not expected", key) + } + }, + func(ctx context.Context, keys [][]byte) error { + fnDeleteKeyCount.Add(int32(len(keys))) + for _, key := range keys { + if !bytes.Equal(key, data1IndexKey) { + return fmt.Errorf("key %v is not expected", key) + } + } + return nil + }, + ) + require.NoError(t, err) + require.Equal(t, int32(3), fnGetLatestCount.Load()) + require.Equal(t, int32(1), fnDeleteKeyCount.Load()) + err = mockDB.ExpectationsWereMet() + require.NoError(t, err) +} + +func TestReplaceConflictOneUniqueKey(t *testing.T) { + column1 := &model.ColumnInfo{ + ID: 1, + Name: pmodel.NewCIStr("a"), + Offset: 0, + DefaultValue: 0, + FieldType: *types.NewFieldType(mysql.TypeLong), + Hidden: true, + State: model.StatePublic, + } + column1.AddFlag(mysql.PriKeyFlag) + + column2 := &model.ColumnInfo{ + ID: 2, + Name: pmodel.NewCIStr("b"), + Offset: 1, + DefaultValue: 0, + FieldType: *types.NewFieldType(mysql.TypeLong), + Hidden: true, + State: model.StatePublic, + } + column2.AddFlag(mysql.UniqueKeyFlag) + + column3 := &model.ColumnInfo{ + ID: 3, + Name: pmodel.NewCIStr("c"), + Offset: 2, + DefaultValue: 0, + FieldType: *types.NewFieldType(mysql.TypeBlob), + Hidden: true, + State: model.StatePublic, + } + + index := &model.IndexInfo{ + ID: 1, + Name: pmodel.NewCIStr("uni_b"), + Table: pmodel.NewCIStr(""), + Columns: []*model.IndexColumn{ + { + Name: pmodel.NewCIStr("b"), + Offset: 1, + Length: -1, + }}, + Unique: true, + Primary: false, + State: model.StatePublic, + } + + table := &model.TableInfo{ + ID: 104, + Name: pmodel.NewCIStr("a"), + Charset: "utf8mb4", + Collate: "utf8mb4_bin", + Columns: []*model.ColumnInfo{column1, column2, column3}, + Indices: []*model.IndexInfo{index}, + PKIsHandle: true, + State: model.StatePublic, + } + + tbl, err := tables.TableFromMeta(tidbkv.NewPanickingAllocators(table.SepAutoInc()), table) + require.NoError(t, err) + + sessionOpts := encode.SessionOptions{ + SQLMode: mysql.ModeStrictAllTables, + Timestamp: 1234567890, + } + + encoder, err := tidbkv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: log.L(), + }) + require.NoError(t, err) + encoder.SessionCtx.GetTableCtx().GetRowEncodingConfig().RowEncoder.Enable = true + + data1 := []types.Datum{ + types.NewIntDatum(1), + types.NewIntDatum(6), + types.NewStringDatum("1.csv"), + } + data2 := []types.Datum{ + types.NewIntDatum(2), + types.NewIntDatum(6), + types.NewStringDatum("2.csv"), + } + data3 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + } + data4 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(4), + types.NewStringDatum("4.csv"), + } + data5 := []types.Datum{ + types.NewIntDatum(5), + types.NewIntDatum(4), + types.NewStringDatum("5.csv"), + } + _, err = encoder.AddRecord(data1) + require.NoError(t, err) + _, err = encoder.AddRecord(data2) + require.NoError(t, err) + _, err = encoder.AddRecord(data3) + require.NoError(t, err) + _, err = encoder.AddRecord(data4) + require.NoError(t, err) + _, err = encoder.AddRecord(data5) + require.NoError(t, err) + kvPairs := encoder.SessionCtx.TakeKvPairs() + + data1IndexKey := kvPairs.Pairs[7].Key + data3IndexKey := kvPairs.Pairs[1].Key + data1IndexValue := kvPairs.Pairs[7].Val + data2IndexValue := kvPairs.Pairs[9].Val + data3IndexValue := kvPairs.Pairs[1].Val + data4IndexValue := kvPairs.Pairs[3].Val + data1RowKey := kvPairs.Pairs[4].Key + data2RowKey := kvPairs.Pairs[8].Key + data3RowKey := kvPairs.Pairs[0].Key + data4RowKey := kvPairs.Pairs[2].Key + data1RowValue := kvPairs.Pairs[4].Val + data2RowValue := kvPairs.Pairs[8].Val + data3RowValue := kvPairs.Pairs[6].Val + data4RowValue := kvPairs.Pairs[2].Val + + db, mockDB, err := sqlmock.New() + require.NoError(t, err) + defer func() { + _ = db.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockDB.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_task_info`"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockDB.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + mockDB.ExpectExec("CREATE OR REPLACE VIEW `lightning_task_info`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"}). + AddRow(1, data1IndexKey, "uni_b", data1IndexValue, data1RowKey). + AddRow(2, data1IndexKey, "uni_b", data2IndexValue, data2RowKey). + AddRow(3, data3IndexKey, "uni_b", data3IndexValue, data3RowKey). + AddRow(4, data3IndexKey, "uni_b", data4IndexValue, data4RowKey)) + mockDB.ExpectBegin() + mockDB.ExpectExec("INSERT IGNORE INTO `lightning_task_info`\\.conflict_error_v3.*"). + WithArgs(0, "test", nil, nil, data2RowKey, data2RowValue, 2, + 0, "test", nil, nil, data4RowKey, data4RowValue, 2). + WillReturnResult(driver.ResultNoRows) + mockDB.ExpectCommit() + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"})) + } + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"}). + AddRow(1, data1RowKey, data1RowValue). + AddRow(2, data1RowKey, data3RowValue)) + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"})) + } + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 2)) + mockDB.ExpectCommit() + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mockDB.ExpectCommit() + + cfg := config.NewConfig() + cfg.Conflict.Strategy = config.ReplaceOnDup + cfg.TikvImporter.Backend = config.BackendLocal + cfg.App.TaskInfoSchemaName = "lightning_task_info" + em := New(db, cfg, log.L()) + err = em.Init(ctx) + require.NoError(t, err) + + fnGetLatestCount := atomic.NewInt32(0) + fnDeleteKeyCount := atomic.NewInt32(0) + pool := util.NewWorkerPool(16, "resolve duplicate rows by replace") + err = em.ReplaceConflictKeys( + ctx, tbl, "test", pool, + func(ctx context.Context, key []byte) ([]byte, error) { + fnGetLatestCount.Add(1) + switch { + case bytes.Equal(key, data1IndexKey): + return data1IndexValue, nil + case bytes.Equal(key, data3IndexKey): + return data3IndexValue, nil + case bytes.Equal(key, data1RowKey): + return data1RowValue, nil + case bytes.Equal(key, data2RowKey): + return data2RowValue, nil + case bytes.Equal(key, data4RowKey): + return data4RowValue, nil + default: + return nil, fmt.Errorf("key %v is not expected", key) + } + }, + func(ctx context.Context, keys [][]byte) error { + fnDeleteKeyCount.Add(int32(len(keys))) + for _, key := range keys { + if !bytes.Equal(key, data1IndexKey) && !bytes.Equal(key, data2RowKey) && !bytes.Equal(key, data4RowKey) { + return fmt.Errorf("key %v is not expected", key) + } + } + return nil + }, + ) + require.NoError(t, err) + require.Equal(t, int32(9), fnGetLatestCount.Load()) + require.Equal(t, int32(3), fnDeleteKeyCount.Load()) + err = mockDB.ExpectationsWereMet() + require.NoError(t, err) +} + +func TestErrorMgrHasError(t *testing.T) { + cfg := &config.Config{} + cfg.App.MaxError = config.MaxError{ + Syntax: *atomic.NewInt64(100), + Charset: *atomic.NewInt64(100), + Type: *atomic.NewInt64(100), + } + cfg.Conflict.Threshold = 100 + em := &ErrorManager{ + configError: &cfg.App.MaxError, + remainingError: cfg.App.MaxError, + configConflict: &cfg.Conflict, + conflictErrRemain: atomic.NewInt64(100), + } + + // no field changes, should return false + require.False(t, em.HasError()) + + // change single field + em.remainingError.Syntax.Sub(1) + require.True(t, em.HasError()) + + em.remainingError = cfg.App.MaxError + em.remainingError.Charset.Sub(1) + require.True(t, em.HasError()) + + em.remainingError = cfg.App.MaxError + em.remainingError.Type.Sub(1) + require.True(t, em.HasError()) + + em.remainingError = cfg.App.MaxError + em.conflictErrRemain.Sub(1) + require.True(t, em.HasError()) + + // change multiple keys + em.remainingError = cfg.App.MaxError + em.remainingError.Syntax.Store(0) + em.remainingError.Charset.Store(0) + em.remainingError.Type.Store(0) + em.conflictErrRemain.Store(0) + require.True(t, em.HasError()) +} + +func TestErrorMgrErrorOutput(t *testing.T) { + cfg := &config.Config{} + cfg.App.MaxError = config.MaxError{ + Syntax: *atomic.NewInt64(100), + Charset: *atomic.NewInt64(100), + Type: *atomic.NewInt64(100), + } + cfg.Conflict.Threshold = 100 + + em := &ErrorManager{ + configError: &cfg.App.MaxError, + remainingError: cfg.App.MaxError, + configConflict: &cfg.Conflict, + conflictErrRemain: atomic.NewInt64(100), + schema: "error_info", + conflictV1Enabled: true, + } + + output := em.Output() + require.Equal(t, output, "") + + em.remainingError.Syntax.Sub(1) + output = em.Output() + expected := "\n" + + "Import Data Error Summary: \n" + + "+---+-------------+-------------+--------------------------------+\n" + + "| # | ERROR TYPE | ERROR COUNT | ERROR DATA TABLE |\n" + + "+---+-------------+-------------+--------------------------------+\n" + + "|\x1b[31m 1 \x1b[0m|\x1b[31m Data Syntax \x1b[0m|\x1b[31m 1 \x1b[0m|\x1b[31m `error_info`.`syntax_error_v1` \x1b[0m|\n" + + "+---+-------------+-------------+--------------------------------+\n" + require.Equal(t, expected, output) + + em.remainingError = cfg.App.MaxError + em.remainingError.Syntax.Sub(10) + em.remainingError.Type.Store(10) + output = em.Output() + expected = "\n" + + "Import Data Error Summary: \n" + + "+---+-------------+-------------+--------------------------------+\n" + + "| # | ERROR TYPE | ERROR COUNT | ERROR DATA TABLE |\n" + + "+---+-------------+-------------+--------------------------------+\n" + + "|\x1b[31m 1 \x1b[0m|\x1b[31m Data Type \x1b[0m|\x1b[31m 90 \x1b[0m|\x1b[31m `error_info`.`type_error_v1` \x1b[0m|\n" + + "|\x1b[31m 2 \x1b[0m|\x1b[31m Data Syntax \x1b[0m|\x1b[31m 10 \x1b[0m|\x1b[31m `error_info`.`syntax_error_v1` \x1b[0m|\n" + + "+---+-------------+-------------+--------------------------------+\n" + require.Equal(t, expected, output) + + // change multiple keys + em.remainingError = cfg.App.MaxError + em.remainingError.Syntax.Store(0) + em.remainingError.Charset.Store(0) + em.remainingError.Type.Store(0) + em.conflictErrRemain.Store(0) + output = em.Output() + expected = "\n" + + "Import Data Error Summary: \n" + + "+---+---------------------+-------------+--------------------------------+\n" + + "| # | ERROR TYPE | ERROR COUNT | ERROR DATA TABLE |\n" + + "+---+---------------------+-------------+--------------------------------+\n" + + "|\x1b[31m 1 \x1b[0m|\x1b[31m Data Type \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m `error_info`.`type_error_v1` \x1b[0m|\n" + + "|\x1b[31m 2 \x1b[0m|\x1b[31m Data Syntax \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m `error_info`.`syntax_error_v1` \x1b[0m|\n" + + "|\x1b[31m 3 \x1b[0m|\x1b[31m Charset Error \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m \x1b[0m|\n" + + "|\x1b[31m 4 \x1b[0m|\x1b[31m Unique Key Conflict \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m `error_info`.`conflict_view` \x1b[0m|\n" + + "+---+---------------------+-------------+--------------------------------+\n" + require.Equal(t, expected, output) + + em.conflictV2Enabled = true + em.conflictV1Enabled = false + output = em.Output() + expected = "\n" + + "Import Data Error Summary: \n" + + "+---+---------------------+-------------+--------------------------------+\n" + + "| # | ERROR TYPE | ERROR COUNT | ERROR DATA TABLE |\n" + + "+---+---------------------+-------------+--------------------------------+\n" + + "|\x1b[31m 1 \x1b[0m|\x1b[31m Data Type \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m `error_info`.`type_error_v1` \x1b[0m|\n" + + "|\x1b[31m 2 \x1b[0m|\x1b[31m Data Syntax \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m `error_info`.`syntax_error_v1` \x1b[0m|\n" + + "|\x1b[31m 3 \x1b[0m|\x1b[31m Charset Error \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m \x1b[0m|\n" + + "|\x1b[31m 4 \x1b[0m|\x1b[31m Unique Key Conflict \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m `error_info`.`conflict_view` \x1b[0m|\n" + + "+---+---------------------+-------------+--------------------------------+\n" + require.Equal(t, expected, output) + + em.conflictV2Enabled = true + em.conflictV1Enabled = true + output = em.Output() + expected = "\n" + + "Import Data Error Summary: \n" + + "+---+---------------------+-------------+--------------------------------+\n" + + "| # | ERROR TYPE | ERROR COUNT | ERROR DATA TABLE |\n" + + "+---+---------------------+-------------+--------------------------------+\n" + + "|\x1b[31m 1 \x1b[0m|\x1b[31m Data Type \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m `error_info`.`type_error_v1` \x1b[0m|\n" + + "|\x1b[31m 2 \x1b[0m|\x1b[31m Data Syntax \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m `error_info`.`syntax_error_v1` \x1b[0m|\n" + + "|\x1b[31m 3 \x1b[0m|\x1b[31m Charset Error \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m \x1b[0m|\n" + + "|\x1b[31m 4 \x1b[0m|\x1b[31m Unique Key Conflict \x1b[0m|\x1b[31m 100 \x1b[0m|\x1b[31m `error_info`.`conflict_view` \x1b[0m|\n" + + "+---+---------------------+-------------+--------------------------------+\n" + require.Equal(t, expected, output) +} diff --git a/pkg/lightning/errormanager/resolveconflict_test.go b/pkg/lightning/errormanager/resolveconflict_test.go new file mode 100644 index 0000000000000..7dee78af9a715 --- /dev/null +++ b/pkg/lightning/errormanager/resolveconflict_test.go @@ -0,0 +1,848 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errormanager_test + +import ( + "bytes" + "context" + "database/sql/driver" + "fmt" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/pingcap/tidb/pkg/ddl" + "github.com/pingcap/tidb/pkg/lightning/backend/encode" + tidbkv "github.com/pingcap/tidb/pkg/lightning/backend/kv" + "github.com/pingcap/tidb/pkg/lightning/config" + "github.com/pingcap/tidb/pkg/lightning/errormanager" + "github.com/pingcap/tidb/pkg/lightning/log" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser" + "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/table/tables" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/mock" + "github.com/stretchr/testify/require" + tikverr "github.com/tikv/client-go/v2/error" + "go.uber.org/atomic" +) + +func TestReplaceConflictMultipleKeysNonclusteredPk(t *testing.T) { + p := parser.New() + node, _, err := p.ParseSQL("create table a (a int primary key nonclustered, b int not null, c int not null, d text, key key_b(b), key key_c(c));") + require.NoError(t, err) + mockSctx := mock.NewContext() + mockSctx.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOff + info, err := ddl.MockTableInfo(mockSctx, node[0].(*ast.CreateTableStmt), 108) + require.NoError(t, err) + info.State = model.StatePublic + require.False(t, info.PKIsHandle) + tbl, err := tables.TableFromMeta(tidbkv.NewPanickingAllocators(info.SepAutoInc()), info) + require.NoError(t, err) + require.False(t, tbl.Meta().HasClusteredIndex()) + + sessionOpts := encode.SessionOptions{ + SQLMode: mysql.ModeStrictAllTables, + Timestamp: 1234567890, + } + + encoder, err := tidbkv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: log.L(), + }) + require.NoError(t, err) + encoder.SessionCtx.GetTableCtx().GetRowEncodingConfig().RowEncoder.Enable = true + + data1 := []types.Datum{ + types.NewIntDatum(1), + types.NewIntDatum(1), + types.NewIntDatum(1), + types.NewStringDatum("1.csv"), + types.NewIntDatum(1), + } + data2 := []types.Datum{ + types.NewIntDatum(1), + types.NewIntDatum(1), + types.NewIntDatum(2), + types.NewStringDatum("2.csv"), + types.NewIntDatum(2), + } + data3 := []types.Datum{ + types.NewIntDatum(2), + types.NewIntDatum(2), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + types.NewIntDatum(3), + } + data4 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + types.NewIntDatum(4), + } + data5 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewStringDatum("4.csv"), + types.NewIntDatum(5), + } + data6 := []types.Datum{ + types.NewIntDatum(4), + types.NewIntDatum(4), + types.NewIntDatum(4), + types.NewStringDatum("4.csv"), + types.NewIntDatum(6), + } + data7 := []types.Datum{ + types.NewIntDatum(5), + types.NewIntDatum(4), + types.NewIntDatum(5), + types.NewStringDatum("5.csv"), + types.NewIntDatum(7), + } + _, err = encoder.AddRecord(data1) + require.NoError(t, err) + _, err = encoder.AddRecord(data2) + require.NoError(t, err) + _, err = encoder.AddRecord(data3) + require.NoError(t, err) + _, err = encoder.AddRecord(data4) + require.NoError(t, err) + _, err = encoder.AddRecord(data5) + require.NoError(t, err) + _, err = encoder.AddRecord(data6) + require.NoError(t, err) + _, err = encoder.AddRecord(data7) + require.NoError(t, err) + kvPairs := encoder.SessionCtx.TakeKvPairs() + + data2IndexKey := kvPairs.Pairs[5].Key + data2IndexValue := kvPairs.Pairs[5].Val + data6IndexKey := kvPairs.Pairs[17].Key + + data1RowKey := kvPairs.Pairs[0].Key + data2RowKey := kvPairs.Pairs[3].Key + data2RowValue := kvPairs.Pairs[3].Val + data3RowKey := kvPairs.Pairs[6].Key + data3RowValue := kvPairs.Pairs[6].Val + data5RowKey := kvPairs.Pairs[12].Key + data6RowKey := kvPairs.Pairs[15].Key + data6RowValue := kvPairs.Pairs[15].Val + data7RowKey := kvPairs.Pairs[18].Key + data7RowValue := kvPairs.Pairs[18].Val + + data2NonclusteredKey := kvPairs.Pairs[4].Key + data2NonclusteredValue := kvPairs.Pairs[4].Val + data3NonclusteredValue := kvPairs.Pairs[7].Val + data6NonclusteredKey := kvPairs.Pairs[16].Key + data6NonclusteredValue := kvPairs.Pairs[16].Val + data7NonclusteredValue := kvPairs.Pairs[19].Val + + db, mockDB, err := sqlmock.New() + require.NoError(t, err) + defer func() { + _ = db.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockDB.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_task_info`"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockDB.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + mockDB.ExpectExec("CREATE OR REPLACE VIEW `lightning_task_info`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"}). + AddRow(1, data2RowKey, "PRIMARY", data2RowValue, data1RowKey). + AddRow(2, data2RowKey, "PRIMARY", data3NonclusteredValue, data2NonclusteredKey). + AddRow(3, data6RowKey, "PRIMARY", data6RowValue, data5RowKey). + AddRow(4, data6RowKey, "PRIMARY", data7NonclusteredValue, data6NonclusteredKey)) + mockDB.ExpectBegin() + mockDB.ExpectExec("INSERT IGNORE INTO `lightning_task_info`\\.conflict_error_v3.*"). + WithArgs(0, "a", nil, nil, data2NonclusteredKey, data2NonclusteredValue, 2, + 0, "a", nil, nil, data6NonclusteredKey, data6NonclusteredValue, 2). + WillReturnResult(driver.ResultNoRows) + mockDB.ExpectCommit() + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"})) + } + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"}). + AddRow(1, data2NonclusteredKey, data2NonclusteredValue). + AddRow(2, data6NonclusteredKey, data6NonclusteredValue)) + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"})) + } + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 2)) + mockDB.ExpectCommit() + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mockDB.ExpectCommit() + + cfg := config.NewConfig() + cfg.Conflict.Strategy = config.ReplaceOnDup + cfg.TikvImporter.Backend = config.BackendLocal + cfg.App.TaskInfoSchemaName = "lightning_task_info" + em := errormanager.New(db, cfg, log.L()) + err = em.Init(ctx) + require.NoError(t, err) + + fnGetLatestCount := atomic.NewInt32(0) + fnDeleteKeyCount := atomic.NewInt32(0) + pool := util.NewWorkerPool(16, "resolve duplicate rows by replace") + err = em.ReplaceConflictKeys( + ctx, tbl, "a", pool, + func(ctx context.Context, key []byte) ([]byte, error) { + fnGetLatestCount.Add(1) + switch { + case bytes.Equal(key, data2RowKey): + return data2RowValue, nil + case bytes.Equal(key, data2NonclusteredKey): + if fnGetLatestCount.String() == "3" { + return data2NonclusteredValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data6RowKey): + return data6RowValue, nil + case bytes.Equal(key, data6NonclusteredKey): + if fnGetLatestCount.String() == "6" { + return data6NonclusteredValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data2IndexKey): + return data2IndexValue, nil + case bytes.Equal(key, data3RowKey): + return data3RowValue, nil + case bytes.Equal(key, data6IndexKey): + return data3RowValue, nil + case bytes.Equal(key, data7RowKey): + return data7RowValue, nil + default: + return nil, fmt.Errorf("key %v is not expected", key) + } + }, + func(ctx context.Context, keys [][]byte) error { + fnDeleteKeyCount.Add(int32(len(keys))) + for _, key := range keys { + if !bytes.Equal(key, data2NonclusteredKey) && !bytes.Equal(key, data6NonclusteredKey) && !bytes.Equal(key, data2IndexKey) && !bytes.Equal(key, data3RowKey) && !bytes.Equal(key, data6IndexKey) && !bytes.Equal(key, data7RowKey) { + return fmt.Errorf("key %v is not expected", key) + } + } + return nil + }, + ) + require.NoError(t, err) + require.Equal(t, int32(16), fnGetLatestCount.Load()) + require.Equal(t, int32(6), fnDeleteKeyCount.Load()) + err = mockDB.ExpectationsWereMet() + require.NoError(t, err) +} + +func TestReplaceConflictOneKeyNonclusteredPk(t *testing.T) { + p := parser.New() + node, _, err := p.ParseSQL("create table a (a int primary key nonclustered, b int not null, c text, key key_b(b));") + require.NoError(t, err) + mockSctx := mock.NewContext() + mockSctx.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOff + info, err := ddl.MockTableInfo(mockSctx, node[0].(*ast.CreateTableStmt), 108) + require.NoError(t, err) + info.State = model.StatePublic + require.False(t, info.PKIsHandle) + tbl, err := tables.TableFromMeta(tidbkv.NewPanickingAllocators(info.SepAutoInc()), info) + require.NoError(t, err) + require.False(t, tbl.Meta().HasClusteredIndex()) + + sessionOpts := encode.SessionOptions{ + SQLMode: mysql.ModeStrictAllTables, + Timestamp: 1234567890, + } + + encoder, err := tidbkv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: log.L(), + }) + require.NoError(t, err) + encoder.SessionCtx.GetTableCtx().GetRowEncodingConfig().RowEncoder.Enable = true + + data1 := []types.Datum{ + types.NewIntDatum(1), + types.NewIntDatum(6), + types.NewStringDatum("1.csv"), + types.NewIntDatum(1), + } + data2 := []types.Datum{ + types.NewIntDatum(2), + types.NewIntDatum(6), + types.NewStringDatum("2.csv"), + types.NewIntDatum(2), + } + data3 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + types.NewIntDatum(3), + } + data4 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(4), + types.NewStringDatum("4.csv"), + types.NewIntDatum(4), + } + data5 := []types.Datum{ + types.NewIntDatum(5), + types.NewIntDatum(4), + types.NewStringDatum("5.csv"), + types.NewIntDatum(5), + } + _, err = encoder.AddRecord(data1) + require.NoError(t, err) + _, err = encoder.AddRecord(data2) + require.NoError(t, err) + _, err = encoder.AddRecord(data3) + require.NoError(t, err) + _, err = encoder.AddRecord(data4) + require.NoError(t, err) + _, err = encoder.AddRecord(data5) + require.NoError(t, err) + kvPairs := encoder.SessionCtx.TakeKvPairs() + + data3IndexKey := kvPairs.Pairs[8].Key + data3IndexValue := kvPairs.Pairs[8].Val + data4IndexValue := kvPairs.Pairs[11].Val + data3RowKey := kvPairs.Pairs[6].Key + data4RowKey := kvPairs.Pairs[9].Key + data4RowValue := kvPairs.Pairs[9].Val + data4NonclusteredKey := kvPairs.Pairs[10].Key + data4NonclusteredValue := kvPairs.Pairs[10].Val + + db, mockDB, err := sqlmock.New() + require.NoError(t, err) + defer func() { + _ = db.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockDB.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_task_info`"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockDB.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + mockDB.ExpectExec("CREATE OR REPLACE VIEW `lightning_task_info`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"}). + AddRow(1, data3IndexKey, "PRIMARY", data3IndexValue, data3RowKey). + AddRow(2, data3IndexKey, "PRIMARY", data4IndexValue, data4RowKey)) + mockDB.ExpectBegin() + mockDB.ExpectExec("INSERT IGNORE INTO `lightning_task_info`\\.conflict_error_v3.*"). + WithArgs(0, "a", nil, nil, data4RowKey, data4RowValue, 2). + WillReturnResult(driver.ResultNoRows) + mockDB.ExpectCommit() + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"})) + } + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"}). + AddRow(1, data4RowKey, data4RowValue)) + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"})) + } + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 1)) + mockDB.ExpectCommit() + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mockDB.ExpectCommit() + + cfg := config.NewConfig() + cfg.Conflict.Strategy = config.ReplaceOnDup + cfg.TikvImporter.Backend = config.BackendLocal + cfg.App.TaskInfoSchemaName = "lightning_task_info" + em := errormanager.New(db, cfg, log.L()) + err = em.Init(ctx) + require.NoError(t, err) + + fnGetLatestCount := atomic.NewInt32(0) + fnDeleteKeyCount := atomic.NewInt32(0) + pool := util.NewWorkerPool(16, "resolve duplicate rows by replace") + err = em.ReplaceConflictKeys( + ctx, tbl, "a", pool, + func(ctx context.Context, key []byte) ([]byte, error) { + fnGetLatestCount.Add(1) + switch { + case bytes.Equal(key, data3IndexKey): + return data3IndexValue, nil + case bytes.Equal(key, data4RowKey): + if fnGetLatestCount.String() == "3" { + return data4RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data4NonclusteredKey): + return data4NonclusteredValue, nil + default: + return nil, fmt.Errorf("key %v is not expected", key) + } + }, + func(ctx context.Context, keys [][]byte) error { + fnDeleteKeyCount.Add(int32(len(keys))) + for _, key := range keys { + if !bytes.Equal(key, data4RowKey) && !bytes.Equal(key, data4NonclusteredKey) { + return fmt.Errorf("key %v is not expected", key) + } + } + return nil + }, + ) + require.NoError(t, err) + require.Equal(t, int32(7), fnGetLatestCount.Load()) + require.Equal(t, int32(2), fnDeleteKeyCount.Load()) + err = mockDB.ExpectationsWereMet() + require.NoError(t, err) +} + +func TestReplaceConflictOneUniqueKeyNonclusteredPk(t *testing.T) { + p := parser.New() + node, _, err := p.ParseSQL("create table a (a int primary key nonclustered, b int not null, c text, unique key uni_b(b));") + require.NoError(t, err) + mockSctx := mock.NewContext() + mockSctx.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOff + info, err := ddl.MockTableInfo(mockSctx, node[0].(*ast.CreateTableStmt), 108) + require.NoError(t, err) + info.State = model.StatePublic + require.False(t, info.PKIsHandle) + tbl, err := tables.TableFromMeta(tidbkv.NewPanickingAllocators(info.SepAutoInc()), info) + require.NoError(t, err) + require.False(t, tbl.Meta().HasClusteredIndex()) + + sessionOpts := encode.SessionOptions{ + SQLMode: mysql.ModeStrictAllTables, + Timestamp: 1234567890, + } + + encoder, err := tidbkv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: log.L(), + }) + require.NoError(t, err) + encoder.SessionCtx.GetTableCtx().GetRowEncodingConfig().RowEncoder.Enable = true + + data1 := []types.Datum{ + types.NewIntDatum(1), + types.NewIntDatum(6), + types.NewStringDatum("1.csv"), + types.NewIntDatum(1), + } + data2 := []types.Datum{ + types.NewIntDatum(2), + types.NewIntDatum(6), + types.NewStringDatum("2.csv"), + types.NewIntDatum(2), + } + data3 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + types.NewIntDatum(3), + } + data4 := []types.Datum{ + types.NewIntDatum(3), + types.NewIntDatum(4), + types.NewStringDatum("4.csv"), + types.NewIntDatum(4), + } + data5 := []types.Datum{ + types.NewIntDatum(5), + types.NewIntDatum(4), + types.NewStringDatum("5.csv"), + types.NewIntDatum(5), + } + _, err = encoder.AddRecord(data1) + require.NoError(t, err) + _, err = encoder.AddRecord(data2) + require.NoError(t, err) + _, err = encoder.AddRecord(data3) + require.NoError(t, err) + _, err = encoder.AddRecord(data4) + require.NoError(t, err) + _, err = encoder.AddRecord(data5) + require.NoError(t, err) + kvPairs := encoder.SessionCtx.TakeKvPairs() + + data1RowKey := kvPairs.Pairs[0].Key + data2RowKey := kvPairs.Pairs[3].Key + data2RowValue := kvPairs.Pairs[3].Val + data3RowKey := kvPairs.Pairs[6].Key + data4RowKey := kvPairs.Pairs[9].Key + data4RowValue := kvPairs.Pairs[9].Val + data5RowKey := kvPairs.Pairs[12].Key + data5RowValue := kvPairs.Pairs[12].Val + + data2IndexKey := kvPairs.Pairs[5].Key + data2IndexValue := kvPairs.Pairs[5].Val + data3IndexKey := kvPairs.Pairs[8].Key + data3IndexValue := kvPairs.Pairs[8].Val + data5IndexKey := kvPairs.Pairs[14].Key + data5IndexValue := kvPairs.Pairs[14].Val + + data1NonclusteredKey := kvPairs.Pairs[1].Key + data1NonclusteredValue := kvPairs.Pairs[1].Val + data2NonclusteredValue := kvPairs.Pairs[4].Val + data4NonclusteredKey := kvPairs.Pairs[10].Key + data4NonclusteredValue := kvPairs.Pairs[10].Val + data5NonclusteredValue := kvPairs.Pairs[13].Val + + db, mockDB, err := sqlmock.New() + require.NoError(t, err) + defer func() { + _ = db.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockDB.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_task_info`"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockDB.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + mockDB.ExpectExec("CREATE OR REPLACE VIEW `lightning_task_info`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"}). + AddRow(1, data4NonclusteredKey, "uni_b", data4NonclusteredValue, data4RowKey). + AddRow(2, data4NonclusteredKey, "uni_b", data5NonclusteredValue, data5RowKey). + AddRow(3, data1NonclusteredKey, "uni_b", data1NonclusteredValue, data1RowKey). + AddRow(4, data1NonclusteredKey, "uni_b", data2NonclusteredValue, data2RowKey). + AddRow(5, data3IndexKey, "PRIMARY", data3IndexValue, data3RowKey). + AddRow(6, data3IndexKey, "PRIMARY", data4NonclusteredValue, data4RowKey)) + mockDB.ExpectBegin() + mockDB.ExpectExec("INSERT IGNORE INTO `lightning_task_info`\\.conflict_error_v3.*"). + WithArgs(0, "a", nil, nil, data5RowKey, data5RowValue, 2, + 0, "a", nil, nil, data2RowKey, data2RowValue, 2, + 0, "a", nil, nil, data4RowKey, data4RowValue, 2). + WillReturnResult(driver.ResultNoRows) + mockDB.ExpectCommit() + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"})) + } + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"}). + AddRow(1, data5RowKey, data5RowValue). + AddRow(2, data2RowKey, data2RowValue). + AddRow(3, data4RowKey, data4RowValue)) + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"})) + } + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 3)) + mockDB.ExpectCommit() + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mockDB.ExpectCommit() + + cfg := config.NewConfig() + cfg.Conflict.Strategy = config.ReplaceOnDup + cfg.TikvImporter.Backend = config.BackendLocal + cfg.App.TaskInfoSchemaName = "lightning_task_info" + em := errormanager.New(db, cfg, log.L()) + err = em.Init(ctx) + require.NoError(t, err) + + fnGetLatestCount := atomic.NewInt32(0) + fnDeleteKeyCount := atomic.NewInt32(0) + pool := util.NewWorkerPool(16, "resolve duplicate rows by replace") + err = em.ReplaceConflictKeys( + ctx, tbl, "a", pool, + func(ctx context.Context, key []byte) ([]byte, error) { + fnGetLatestCount.Add(1) + switch { + case bytes.Equal(key, data4NonclusteredKey): + if fnGetLatestCount.String() != "20" { + return data4NonclusteredValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data5RowKey): + if fnGetLatestCount.String() == "3" { + return data5RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data1NonclusteredKey): + return data1NonclusteredValue, nil + case bytes.Equal(key, data2RowKey): + if fnGetLatestCount.String() == "6" { + return data2RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data3IndexKey): + return data3IndexValue, nil + case bytes.Equal(key, data4RowKey): + return data4RowValue, nil + case bytes.Equal(key, data2IndexKey): + return data2IndexValue, nil + case bytes.Equal(key, data5IndexKey): + return data5IndexValue, nil + default: + return nil, fmt.Errorf("key %x is not expected", key) + } + }, + func(ctx context.Context, keys [][]byte) error { + fnDeleteKeyCount.Add(int32(len(keys))) + for _, key := range keys { + if !bytes.Equal(key, data5RowKey) && !bytes.Equal(key, data2RowKey) && !bytes.Equal(key, data4RowKey) && !bytes.Equal(key, data2IndexKey) && !bytes.Equal(key, data4NonclusteredKey) && !bytes.Equal(key, data5IndexKey) { + return fmt.Errorf("key %v is not expected", key) + } + } + return nil + }, + ) + require.NoError(t, err) + require.Equal(t, int32(18), fnGetLatestCount.Load()) + require.Equal(t, int32(5), fnDeleteKeyCount.Load()) + err = mockDB.ExpectationsWereMet() + require.NoError(t, err) +} + +func TestReplaceConflictOneUniqueKeyNonclusteredVarcharPk(t *testing.T) { + p := parser.New() + node, _, err := p.ParseSQL("create table a (a varchar(20) primary key nonclustered, b int not null, c text, unique key uni_b(b));") + require.NoError(t, err) + mockSctx := mock.NewContext() + mockSctx.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOff + info, err := ddl.MockTableInfo(mockSctx, node[0].(*ast.CreateTableStmt), 108) + require.NoError(t, err) + info.State = model.StatePublic + require.False(t, info.PKIsHandle) + tbl, err := tables.TableFromMeta(tidbkv.NewPanickingAllocators(info.SepAutoInc()), info) + require.NoError(t, err) + require.False(t, tbl.Meta().HasClusteredIndex()) + + sessionOpts := encode.SessionOptions{ + SQLMode: mysql.ModeStrictAllTables, + Timestamp: 1234567890, + } + + encoder, err := tidbkv.NewBaseKVEncoder(&encode.EncodingConfig{ + Table: tbl, + SessionOptions: sessionOpts, + Logger: log.L(), + }) + require.NoError(t, err) + encoder.SessionCtx.GetTableCtx().GetRowEncodingConfig().RowEncoder.Enable = true + + data1 := []types.Datum{ + types.NewStringDatum("x"), + types.NewIntDatum(6), + types.NewStringDatum("1.csv"), + types.NewIntDatum(1), + } + data2 := []types.Datum{ + types.NewStringDatum("y"), + types.NewIntDatum(6), + types.NewStringDatum("2.csv"), + types.NewIntDatum(2), + } + data3 := []types.Datum{ + types.NewStringDatum("z"), + types.NewIntDatum(3), + types.NewStringDatum("3.csv"), + types.NewIntDatum(3), + } + data4 := []types.Datum{ + types.NewStringDatum("z"), + types.NewIntDatum(4), + types.NewStringDatum("4.csv"), + types.NewIntDatum(4), + } + data5 := []types.Datum{ + types.NewStringDatum("t"), + types.NewIntDatum(4), + types.NewStringDatum("5.csv"), + types.NewIntDatum(5), + } + _, err = encoder.AddRecord(data1) + require.NoError(t, err) + _, err = encoder.AddRecord(data2) + require.NoError(t, err) + _, err = encoder.AddRecord(data3) + require.NoError(t, err) + _, err = encoder.AddRecord(data4) + require.NoError(t, err) + _, err = encoder.AddRecord(data5) + require.NoError(t, err) + kvPairs := encoder.SessionCtx.TakeKvPairs() + + data1RowKey := kvPairs.Pairs[0].Key + data2RowKey := kvPairs.Pairs[3].Key + data2RowValue := kvPairs.Pairs[3].Val + data3RowKey := kvPairs.Pairs[6].Key + data4RowKey := kvPairs.Pairs[9].Key + data4RowValue := kvPairs.Pairs[9].Val + data5RowKey := kvPairs.Pairs[12].Key + data5RowValue := kvPairs.Pairs[12].Val + + data2IndexKey := kvPairs.Pairs[5].Key + data2IndexValue := kvPairs.Pairs[5].Val + data3IndexKey := kvPairs.Pairs[8].Key + data3IndexValue := kvPairs.Pairs[8].Val + data4IndexValue := kvPairs.Pairs[11].Val + data5IndexKey := kvPairs.Pairs[14].Key + data5IndexValue := kvPairs.Pairs[14].Val + + data1NonclusteredKey := kvPairs.Pairs[1].Key + data1NonclusteredValue := kvPairs.Pairs[1].Val + data2NonclusteredValue := kvPairs.Pairs[4].Val + data4NonclusteredKey := kvPairs.Pairs[10].Key + data4NonclusteredValue := kvPairs.Pairs[10].Val + data5NonclusteredValue := kvPairs.Pairs[13].Val + + db, mockDB, err := sqlmock.New() + require.NoError(t, err) + defer func() { + _ = db.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockDB.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_task_info`"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mockDB.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + mockDB.ExpectExec("CREATE OR REPLACE VIEW `lightning_task_info`\\.conflict_view.*"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"}). + AddRow(1, data4NonclusteredKey, "uni_b", data4NonclusteredValue, data4RowKey). + AddRow(2, data4NonclusteredKey, "uni_b", data5NonclusteredValue, data5RowKey). + AddRow(3, data1NonclusteredKey, "uni_b", data1NonclusteredValue, data1RowKey). + AddRow(4, data1NonclusteredKey, "uni_b", data2NonclusteredValue, data2RowKey). + AddRow(5, data3IndexKey, "PRIMARY", data3IndexValue, data3RowKey). + AddRow(6, data3IndexKey, "PRIMARY", data4IndexValue, data4RowKey)) + mockDB.ExpectBegin() + mockDB.ExpectExec("INSERT IGNORE INTO `lightning_task_info`\\.conflict_error_v3.*"). + WithArgs(0, "a", nil, nil, data5RowKey, data5RowValue, 2, + 0, "a", nil, nil, data2RowKey, data2RowValue, 2, + 0, "a", nil, nil, data4RowKey, data4RowValue, 2). + WillReturnResult(driver.ResultNoRows) + mockDB.ExpectCommit() + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, index_name, raw_value, raw_handle FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type = 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "index_name", "raw_value", "raw_handle"})) + } + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"}). + AddRow(1, data5RowKey, data5RowValue). + AddRow(2, data2RowKey, data2RowValue). + AddRow(3, data4RowKey, data4RowValue)) + for i := 0; i < 2; i++ { + mockDB.ExpectQuery("\\QSELECT _tidb_rowid, raw_key, raw_value FROM `lightning_task_info`.conflict_error_v3 WHERE table_name = ? AND kv_type <> 0 AND _tidb_rowid >= ? and _tidb_rowid < ? ORDER BY _tidb_rowid LIMIT ?\\E"). + WillReturnRows(sqlmock.NewRows([]string{"_tidb_rowid", "raw_key", "raw_value"})) + } + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 3)) + mockDB.ExpectCommit() + mockDB.ExpectBegin() + mockDB.ExpectExec("DELETE FROM `lightning_task_info`\\.conflict_error_v3.*"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mockDB.ExpectCommit() + + cfg := config.NewConfig() + cfg.Conflict.Strategy = config.ReplaceOnDup + cfg.TikvImporter.Backend = config.BackendLocal + cfg.App.TaskInfoSchemaName = "lightning_task_info" + em := errormanager.New(db, cfg, log.L()) + err = em.Init(ctx) + require.NoError(t, err) + + fnGetLatestCount := atomic.NewInt32(0) + fnDeleteKeyCount := atomic.NewInt32(0) + pool := util.NewWorkerPool(16, "resolve duplicate rows by replace") + err = em.ReplaceConflictKeys( + ctx, tbl, "a", pool, + func(ctx context.Context, key []byte) ([]byte, error) { + fnGetLatestCount.Add(1) + switch { + case bytes.Equal(key, data4NonclusteredKey): + if fnGetLatestCount.String() != "20" { + return data4NonclusteredValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data5RowKey): + if fnGetLatestCount.String() == "3" { + return data5RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data1NonclusteredKey): + return data1NonclusteredValue, nil + case bytes.Equal(key, data2RowKey): + if fnGetLatestCount.String() == "6" { + return data2RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data3IndexKey): + return data3IndexValue, nil + case bytes.Equal(key, data4RowKey): + if fnGetLatestCount.String() == "9" { + return data4RowValue, nil + } + return nil, tikverr.ErrNotExist + case bytes.Equal(key, data2IndexKey): + return data2IndexValue, nil + case bytes.Equal(key, data5IndexKey): + return data5IndexValue, nil + default: + return nil, fmt.Errorf("key %x is not expected", key) + } + }, + func(ctx context.Context, keys [][]byte) error { + fnDeleteKeyCount.Add(int32(len(keys))) + for _, key := range keys { + if !bytes.Equal(key, data5RowKey) && !bytes.Equal(key, data2RowKey) && !bytes.Equal(key, data4RowKey) && !bytes.Equal(key, data2IndexKey) && !bytes.Equal(key, data4NonclusteredKey) && !bytes.Equal(key, data5IndexKey) { + return fmt.Errorf("key %v is not expected", key) + } + } + return nil + }, + ) + require.NoError(t, err) + require.Equal(t, int32(21), fnGetLatestCount.Load()) + require.Equal(t, int32(5), fnDeleteKeyCount.Load()) + err = mockDB.ExpectationsWereMet() + require.NoError(t, err) +} diff --git a/table/column.go b/table/column.go index 83ebdfae51745..abd9bacd96bee 100644 --- a/table/column.go +++ b/table/column.go @@ -491,7 +491,14 @@ func (c *Column) CheckNotNull(data *types.Datum, rowCntInLoadData uint64) error // error is ErrWarnNullToNotnull. // Otherwise, the error is ErrColumnCantNull. // If BadNullAsWarning is true, it will append the error as a warning, else return the error. +<<<<<<< HEAD:table/column.go func (c *Column) HandleBadNull(d *types.Datum, sc *stmtctx.StatementContext, rowCntInLoadData uint64) error { +======= +func (c *Column) HandleBadNull( + ec errctx.Context, + d *types.Datum, + rowCntInLoadData uint64) error { +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)):pkg/table/column.go if err := c.CheckNotNull(d, rowCntInLoadData); err != nil { if sc.BadNullAsWarning { sc.AppendWarning(err) @@ -535,7 +542,12 @@ func GetColOriginDefaultValueWithoutStrictSQLMode(ctx sessionctx.Context, col *m // But CheckNoDefaultValueForInsert logic should only check before insert. func CheckNoDefaultValueForInsert(sc *stmtctx.StatementContext, col *model.ColumnInfo) error { if mysql.HasNoDefaultValueFlag(col.GetFlag()) && !col.DefaultIsExpr && col.GetDefaultValue() == nil && col.GetType() != mysql.TypeEnum { +<<<<<<< HEAD:table/column.go if !sc.BadNullAsWarning { +======= + ignoreErr := sc.ErrGroupLevel(errctx.ErrGroupNoDefault) != errctx.LevelError + if !ignoreErr { +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)):pkg/table/column.go return ErrNoDefaultValue.GenWithStackByArgs(col.Name) } if !mysql.HasNotNullFlag(col.GetFlag()) { diff --git a/table/column_test.go b/table/column_test.go index a0c14a2360e6e..4ca8cf740cd9e 100644 --- a/table/column_test.go +++ b/table/column_test.go @@ -464,7 +464,19 @@ func TestGetDefaultValue(t *testing.T) { }() for _, tt := range tests { +<<<<<<< HEAD:table/column_test.go ctx.GetSessionVars().StmtCtx.BadNullAsWarning = !tt.strict +======= + sc := ctx.GetSessionVars().StmtCtx + if tt.strict { + ctx.GetSessionVars().SQLMode = defaultMode + } else { + ctx.GetSessionVars().SQLMode = mysql.DelSQLMode(defaultMode, mysql.ModeStrictAllTables|mysql.ModeStrictTransTables) + } + levels := sc.ErrLevels() + levels[errctx.ErrGroupNoDefault] = errctx.ResolveErrLevel(false, !tt.strict) + sc.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)):pkg/table/column_test.go val, err := GetColDefaultValue(ctx, tt.colInfo) if err != nil { require.Errorf(t, tt.err, "%v", err) @@ -478,7 +490,19 @@ func TestGetDefaultValue(t *testing.T) { } for _, tt := range tests { +<<<<<<< HEAD:table/column_test.go ctx.GetSessionVars().StmtCtx.BadNullAsWarning = !tt.strict +======= + sc := ctx.GetSessionVars().StmtCtx + if tt.strict { + ctx.GetSessionVars().SQLMode = defaultMode + } else { + ctx.GetSessionVars().SQLMode = mysql.DelSQLMode(defaultMode, mysql.ModeStrictAllTables|mysql.ModeStrictTransTables) + } + levels := sc.ErrLevels() + levels[errctx.ErrGroupNoDefault] = errctx.ResolveErrLevel(false, !tt.strict) + sc.SetErrLevels(levels) +>>>>>>> 91beef4bb14 (*: disable insert null to not-null column for single-row insertion in non-strict mode (#55477)):pkg/table/column_test.go val, err := GetColOriginDefaultValue(ctx, tt.colInfo) if err != nil { require.Errorf(t, tt.err, "%v", err) diff --git a/tests/integrationtest/r/executor/executor.result b/tests/integrationtest/r/executor/executor.result new file mode 100644 index 0000000000000..5a61cfa41c802 --- /dev/null +++ b/tests/integrationtest/r/executor/executor.result @@ -0,0 +1,4376 @@ +select 1 + 2*3; +1 + 2*3 +7 +select _utf8"string"; +string +string +select 1 order by 1; +1 +1 +SELECT 'a' as f1 having f1 = 'a'; +f1 +a +SELECT (SELECT * FROM (SELECT 'a') t) AS f1 HAVING (f1 = 'a' OR TRUE); +f1 +a +SELECT (SELECT * FROM (SELECT 'a') t) + 1 AS f1 HAVING (f1 = 'a' OR TRUE); +f1 +1 +create table t (c1 int, c2 int, c3 varchar(20)); +insert into t values (1, 2, 'abc'), (2, 1, 'bcd'); +select c1 as a, c1 as b from t order by c1; +a b +1 1 +2 2 +select c1 as a, t.c1 as a from t order by a desc; +a a +2 2 +1 1 +select c1 as c2 from t order by c2; +c2 +1 +2 +select sum(c1) from t order by sum(c1); +sum(c1) +3 +select c1 as c2 from t order by c2 + 1; +c2 +2 +1 +select * from t order by 1; +c1 c2 c3 +1 2 abc +2 1 bcd +select * from t order by 2; +c1 c2 c3 +2 1 bcd +1 2 abc +select c1, c3 from t order by binary c1 desc; +c1 c3 +2 bcd +1 abc +select c1, c2 from t order by binary c3; +c1 c2 +1 2 +2 1 +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 value(1, 1), (2, 2); +insert into t2 value(1, 1), (2, 2); +select sum(c) from (select t1.a as a, t1.a as c, length(t1.b) from t1 union select a, b, b from t2) t; +sum(c) +5 +drop table if exists t; +create table t(a bigint, b bigint, c bigint); +insert into t values(1, 1, 1), (2, 2, 2), (3, 3, 3); +select cast(count(a) as signed), a as another, a from t group by a order by cast(count(a) as signed), a limit 10; +cast(count(a) as signed) another a +1 1 1 +1 2 2 +1 3 3 +drop table if exists t; +create table t (a int primary key auto_increment, b int, index idx (b)); +insert t (b) values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9); +select b from t order by b desc; +b +9 +8 +7 +6 +5 +4 +3 +2 +1 +0 +select b from t where b <3 or (b >=6 and b < 8) order by b desc; +b +7 +6 +2 +1 +0 +drop table if exists t; +create table t (a int, b int, index idx (b, a)); +insert t values (0, 2), (1, 2), (2, 2), (0, 1), (1, 1), (2, 1), (0, 0), (1, 0), (2, 0); +select b, a from t order by b, a desc; +b a +0 2 +0 1 +0 0 +1 2 +1 1 +1 0 +2 2 +2 1 +2 0 +drop table if exists t; +create table t (a int primary key auto_increment, b int); +insert t (b) values (1), (2), (3), (4), (5), (6), (7), (8), (9); +select b from t order by a desc; +b +9 +8 +7 +6 +5 +4 +3 +2 +1 +select a from t where a <3 or (a >=6 and a < 8) order by a desc; +a +7 +6 +2 +1 +drop table if exists t; +create table t (a int unsigned primary key, b int, c int, key idx_ba (b, c, a)); +insert t values (1, 1, 1); +select * from t; +a b c +1 1 1 +update t set c=2 where a=1; +select * from t where b=1; +a b c +1 1 2 +CREATE TABLE test_mu (a int primary key, b int, c int); +INSERT INTO test_mu VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9); +INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE b = 3, c = b; +SELECT * FROM test_mu ORDER BY a; +a b c +1 3 3 +4 5 6 +7 8 9 +INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE c = 2, b = c+5; +SELECT * FROM test_mu ORDER BY a; +a b c +1 7 2 +4 5 6 +7 8 9 +UPDATE test_mu SET b = 0, c = b WHERE a = 4; +SELECT * FROM test_mu ORDER BY a; +a b c +1 7 2 +4 0 5 +7 8 9 +UPDATE test_mu SET c = 8, b = c WHERE a = 4; +SELECT * FROM test_mu ORDER BY a; +a b c +1 7 2 +4 5 8 +7 8 9 +UPDATE test_mu SET c = b, b = c WHERE a = 7; +SELECT * FROM test_mu ORDER BY a; +a b c +1 7 2 +4 5 8 +7 9 8 +drop table if exists tu; +CREATE TABLE tu(a int, b int, c int GENERATED ALWAYS AS (a + b) VIRTUAL, d int as (a * b) stored, e int GENERATED ALWAYS as (b * 2) VIRTUAL, PRIMARY KEY (a), UNIQUE KEY ukc (c), unique key ukd(d), key ke(e)); +insert into tu(a, b) values(1, 2); +insert into tu(a, b) values(5, 6); +select * from tu for update; +a b c d e +1 2 3 2 4 +5 6 11 30 12 +select * from tu where a = 1; +a b c d e +1 2 3 2 4 +select * from tu where a in (1, 2); +a b c d e +1 2 3 2 4 +select * from tu where c in (1, 2, 3); +a b c d e +1 2 3 2 4 +select * from tu where c = 3; +a b c d e +1 2 3 2 4 +select d, e from tu where c = 3; +d e +2 4 +select * from tu where d in (1, 2, 3); +a b c d e +1 2 3 2 4 +select * from tu where d = 2; +a b c d e +1 2 3 2 4 +select c, d from tu where d = 2; +c d +3 2 +select d, e from tu where e = 4; +d e +2 4 +select * from tu where e = 4; +a b c d e +1 2 3 2 4 +update tu set a = a + 1, b = b + 1 where c = 11; +select * from tu for update; +a b c d e +1 2 3 2 4 +6 7 13 42 14 +select * from tu where a = 6; +a b c d e +6 7 13 42 14 +select * from tu where c in (5, 6, 13); +a b c d e +6 7 13 42 14 +select b, c, e, d from tu where c = 13; +b c e d +7 13 14 42 +select a, e, d from tu where c in (5, 6, 13); +a e d +6 14 42 +drop table if exists tu; +drop table if exists t1,t2; +create table t1 (id int, i int, b bigint, d double, dd decimal); +create table t2 (id int, i int unsigned, b bigint unsigned, d double unsigned, dd decimal unsigned); +insert into t1 values(1, -1, -1, -1.1, -1); +insert into t2 values(2, 1, 1, 1.1, 1); +select * from t1 union select * from t2 order by id; +id i b d dd +1 -1 -1 -1.1 -1 +2 1 1 1.1 1 +select id, i, b, d, dd from t2 union select id, i, b, d, dd from t1 order by id; +id i b d dd +1 -1 -1 -1.1 -1 +2 1 1 1.1 1 +select id, i from t2 union select id, cast(i as unsigned int) from t1 order by id; +id i +1 18446744073709551615 +2 1 +select dd from t2 union all select dd from t2; +dd +1 +1 +drop table if exists t3,t4; +create table t3 (id int, v int); +create table t4 (id int, v double unsigned); +insert into t3 values (1, -1); +insert into t4 values (2, 1); +select id, v from t3 union select id, v from t4 order by id; +id v +1 -1 +2 1 +select id, v from t4 union select id, v from t3 order by id; +id v +1 -1 +2 1 +drop table if exists t5,t6,t7; +create table t5 (id int, v bigint unsigned); +create table t6 (id int, v decimal); +create table t7 (id int, v bigint); +insert into t5 values (1, 1); +insert into t6 values (2, -1); +insert into t7 values (3, -1); +select id, v from t5 union select id, v from t6 order by id; +id v +1 1 +2 -1 +select id, v from t5 union select id, v from t7 union select id, v from t6 order by id; +id v +1 1 +2 -1 +3 -1 +drop table if exists t1; +create table t1 (a int) partition by range (a) ( +partition p0 values less than (10), +partition p1 values less than (20), +partition p2 values less than (30), +partition p3 values less than (40), +partition p4 values less than MAXVALUE +); +insert into t1 values (1),(11),(21),(31); +delete from t1 partition (p4); +select * from t1 order by a; +a +1 +11 +21 +31 +delete from t1 partition (p0) where a > 10; +select * from t1 order by a; +a +1 +11 +21 +31 +delete from t1 partition (p0,p1,p2); +select * from t1; +a +31 +drop table if exists t_1; +create table t_1 (c1 int, c2 int, c3 int default 1, index (c1)) comment = 'test table'; +alter table `t_1` comment 'this is table comment'; +select table_comment from information_schema.tables where table_name = 't_1'; +table_comment +this is table comment +alter table `t_1` comment 'table t comment'; +select table_comment from information_schema.tables where table_name = 't_1'; +table_comment +table t comment +drop table if exists t; +create table t (c enum('a', 'b', 'c')); +insert into t values ('a'), (2), ('c'); +select * from t where c = 'a'; +c +a +select c + 1 from t where c = 2; +c + 1 +3 +delete from t; +insert into t values (); +insert into t values (null), ('1'); +select c + 1 from t where c = 1; +c + 1 +2 +delete from t; +insert into t values(1), (2), (3); +select * from t where c; +c +a +b +c +drop table if exists t; +create table t (c set('a', 'b', 'c')); +insert into t values ('a'), (2), ('c'), ('a,b'), ('b,a'); +select * from t where c = 'a'; +c +a +select * from t where c = 'a,b'; +c +a,b +a,b +select c + 1 from t where c = 2; +c + 1 +3 +delete from t; +insert into t values (); +insert into t values (null), ('1'); +select c + 1 from t where c = 1; +c + 1 +2 +delete from t; +insert into t values(3); +select * from t where c; +c +a,b +drop table if exists t; +create table t (id int, name varchar(20)); +drop table if exists t1; +create table t1 (gid int); +insert into t1 (gid) value (1); +insert into t (id, name) value ((select gid from t1) ,'asd'); +select * from t; +id name +1 asd +drop table if exists t; +create table t (a int primary key, b int); +insert into t values(1, 2), (2, 1); +select * from t where (a = 1 and b = 2) or (a = 2 and b = 1); +a b +1 2 +2 1 +select * from t where (a = 1 and b = 1) or (a = 2 and b = 2); +a b +drop table if exists t; +create table t(id int, PRIMARY KEY (id)); +insert into t values(1), (5), (10); +select * from t where id in(1, 2, 10); +id +1 +10 +drop table if exists admin_test; +create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2)); +insert admin_test (c1, c2) values (1, 1), (2, 2), (NULL, NULL); +admin check table admin_test; +drop table if exists t; +create table t(a bigint, b bigint); +insert into t values(1, 1), (2, 2), (3, 30), (4, 40), (5, 5), (6, 6); +select * from t order by a limit 1, 1; +a b +2 2 +select * from t order by a limit 1, 2; +a b +2 2 +3 30 +select * from t order by a limit 1, 3; +a b +2 2 +3 30 +4 40 +select * from t order by a limit 1, 4; +a b +2 2 +3 30 +4 40 +5 5 +select a from t where a > 0 limit 1, 1; +a +2 +select a from t where a > 0 limit 1, 2; +a +2 +3 +select b from t where a > 0 limit 1, 3; +b +2 +30 +40 +select b from t where a > 0 limit 1, 4; +b +2 +30 +40 +5 +set @@tidb_init_chunk_size=2; +select * from t where a > 0 limit 2, 1; +a b +3 30 +select * from t where a > 0 limit 2, 2; +a b +3 30 +4 40 +select * from t where a > 0 limit 2, 3; +a b +3 30 +4 40 +5 5 +select * from t where a > 0 limit 2, 4; +a b +3 30 +4 40 +5 5 +6 6 +select a from t order by a limit 2, 1; +a +3 +select b from t order by a limit 2, 2; +b +30 +40 +select a from t order by a limit 2, 3; +a +3 +4 +5 +select b from t order by a limit 2, 4; +b +30 +40 +5 +6 +set @@tidb_init_chunk_size = default; +drop table if exists t; +create table t (a int unique); +insert t values (-1), (2), (3), (5), (6), (7), (8), (9); +select a from t where a < 0 or (a >= 2.1 and a < 5.1) or ( a > 5.9 and a <= 7.9) or a > '8.1'; +a +-1 +3 +5 +6 +7 +9 +drop table if exists t; +create table t (a int unique); +insert t values (0); +select NULL from t ; +NULL +NULL +drop table if exists t; +create table t (a int unique, b int); +insert t values (5, 0); +insert t values (4, 0); +insert t values (3, 0); +insert t values (2, 0); +insert t values (1, 0); +insert t values (0, 0); +select * from t order by a limit 3; +a b +0 0 +1 0 +2 0 +drop table if exists t; +create table t (a int unique, b int); +insert t values (0, 1); +insert t values (1, 2); +insert t values (2, 1); +insert t values (3, 2); +insert t values (4, 1); +insert t values (5, 2); +select * from t where a < 5 and b = 1 limit 2; +a b +0 1 +2 1 +drop table if exists tab1; +CREATE TABLE tab1(pk INTEGER PRIMARY KEY, col0 INTEGER, col1 FLOAT, col3 INTEGER, col4 FLOAT); +CREATE INDEX idx_tab1_0 on tab1 (col0); +CREATE INDEX idx_tab1_1 on tab1 (col1); +CREATE INDEX idx_tab1_3 on tab1 (col3); +CREATE INDEX idx_tab1_4 on tab1 (col4); +INSERT INTO tab1 VALUES(1,37,20.85,30,10.69); +SELECT pk FROM tab1 WHERE ((col3 <= 6 OR col3 < 29 AND (col0 < 41)) OR col3 > 42) AND col1 >= 96.1 AND col3 = 30 AND col3 > 17 AND (col0 BETWEEN 36 AND 42); +pk +drop table if exists tab1; +CREATE TABLE tab1(pk INTEGER PRIMARY KEY, a INTEGER, b INTEGER); +CREATE INDEX idx_tab1_0 on tab1 (a); +INSERT INTO tab1 VALUES(1,1,1); +INSERT INTO tab1 VALUES(2,2,1); +INSERT INTO tab1 VALUES(3,1,2); +INSERT INTO tab1 VALUES(4,2,2); +SELECT * FROM tab1 WHERE pk <= 3 AND a = 1; +pk a b +1 1 1 +3 1 2 +SELECT * FROM tab1 WHERE pk <= 4 AND a = 1 AND b = 2; +pk a b +3 1 2 +CREATE INDEX idx_tab1_1 on tab1 (b, a); +SELECT pk FROM tab1 WHERE b > 1; +pk +3 +4 +drop table if exists t; +CREATE TABLE t (a varchar(3), index(a)); +insert t values('aaa'), ('aab'); +select * from t where a >= 'aaaa' and a < 'aabb'; +a +aab +drop table if exists t; +CREATE TABLE t (a int primary key, b int, c int, index(c)); +insert t values(1, 1, 1), (2, 2, 2), (4, 4, 4), (3, 3, 3), (5, 5, 5); +select a from t where c >= 2 order by b desc limit 1; +a +5 +drop table if exists t; +create table t(a varchar(50) primary key, b int, c int, index idx(b)); +insert into t values('aa', 1, 1); +select * from t use index(idx) where a > 'a'; +a b c +aa 1 1 +drop table if exists t; +CREATE TABLE `t` (a int, KEY (a)); +SELECT * FROM (SELECT * FROM (SELECT a as d FROM t WHERE a IN ('100')) AS x WHERE x.d < "123" ) tmp_count; +d +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +drop table if exists t4; +drop table if exists t5; +create table t1(k int, v int); +create table t2(k int, v int); +create table t3(id int auto_increment, k int, v int, primary key(id)); +create table t4(k int, v int); +create table t5(v int, k int, primary key(k)); +insert into t1 values (1, 1); +insert into t4 values (3, 3); +drop table if exists t6; +drop table if exists t7; +create table t6 (id int, v longtext); +create table t7 (x int, id int, v longtext, primary key(id)); +update t1 set v = 0 where k = 1; +select k, v from t1 where k = 1; +k v +1 0 +update t1 left join t3 on t1.k = t3.k set t1.v = 1; +select k, v from t1; +k v +1 1 +select id, k, v from t3; +id k v +update t1 left join t2 on t1.k = t2.k set t1.v = t2.v, t2.v = 3; +select k, v from t1; +k v +1 NULL +select k, v from t2; +k v +update t1 left join t2 on t1.k = t2.k set t2.v = 3, t1.v = t2.v; +select k, v from t1; +k v +1 NULL +select k, v from t2; +k v +update t2 right join t1 on t2.k = t1.k set t2.v = 4, t1.v = 0; +select k, v from t1; +k v +1 0 +select k, v from t2; +k v +update t1 left join t2 on t1.k = t2.k right join t4 on t4.k = t2.k set t1.v = 4, t2.v = 4, t4.v = 4; +select k, v from t1; +k v +1 0 +select k, v from t2; +k v +select k, v from t4; +k v +3 4 +insert t2 values (1, 10); +update t1 left join t2 on t1.k = t2.k set t2.v = 11; +select k, v from t2; +k v +1 11 +update t1 t11 left join t2 on t11.k = t2.k left join t1 t12 on t2.v = t12.k set t12.v = 233, t11.v = 111; +select k, v from t1; +k v +1 111 +select k, v from t2; +k v +1 11 +delete from t1; +delete from t2; +insert into t1 values (null, null); +update t1 left join t2 on t1.k = t2.k set t1.v = 1; +select k, v from t1; +k v +NULL 1 +insert t5 values(0, 0); +update t1 left join t5 on t1.k = t5.k set t1.v = 2; +select k, v from t1; +k v +NULL 2 +select k, v from t5; +k v +0 0 +insert into t6 values (1, NULL); +insert into t7 values (5, 1, 'a'); +update t6, t7 set t6.v = t7.v where t6.id = t7.id and t7.x = 5; +select v from t6; +v +a +drop table if exists t1, t2; +create table t1(id int primary key, v int, gv int GENERATED ALWAYS AS (v * 2) STORED); +create table t2(id int, v int); +update t1 tt1 inner join (select count(t1.id) a, t1.id from t1 left join t2 on t1.id = t2.id group by t1.id) x on tt1.id = x.id set tt1.v = tt1.v + x.a; +drop table if exists t; +create table t(a int primary key, b int, c int, index idx_b(b)); +insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3); +select (select count(1) k from t s where s.b = t1.c) from t t1; +(select count(1) k from t s where s.b = t1.c) +3 +3 +1 +0 +drop table if exists t; +create table t(a int primary key, b int, c int); +insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3); +select a from t; +a +1 +2 +3 +4 +select * from t where a = 4; +a b c +4 2 3 +select a from t limit 1; +a +1 +select a from t order by a desc; +a +4 +3 +2 +1 +select a from t order by a desc limit 1; +a +4 +select a from t order by b desc limit 1; +a +4 +select a from t where a < 3; +a +1 +2 +select a from t where b > 1; +a +4 +select a from t where b > 1 and a < 3; +a +select count(*) from t where b > 1 and a < 3; +count(*) +0 +select count(*) from t; +count(*) +4 +select count(*), c from t group by c order by c; +count(*) c +2 1 +1 2 +1 3 +select sum(c) as s from t group by b order by s; +s +3 +4 +select avg(a) as s from t group by b order by s; +s +2.0000 +4.0000 +select sum(distinct c) from t group by b; +sum(distinct c) +3 +3 +create index i on t(c,b); +select a from t where c = 1; +a +1 +2 +select a from t where c = 1 and a < 2; +a +1 +select a from t where c = 1 order by a limit 1; +a +1 +select count(*) from t where c = 1 ; +count(*) +2 +create index i1 on t(b); +select c from t where b = 2; +c +3 +select * from t where b = 2; +a b c +4 2 3 +select count(*) from t where b = 1; +count(*) +3 +select * from t where b = 1 and a > 1 limit 1; +a b c +2 1 1 +drop table if exists t; +create table t (id int, c1 datetime); +insert into t values (1, '2015-06-07 12:12:12'); +select id from t where c1 = '2015-06-07 12:12:12'; +id +1 +drop table if exists t0; +CREATE TABLE t0(c0 INT); +INSERT INTO t0 VALUES (100000); +SELECT * FROM t0 WHERE NOT SPACE(t0.c0); +c0 +100000 +drop table if exists t; +create table t(a int, primary key(a)); +insert into t(a) values(1); +alter table t add column b int default 1; +alter table t alter b set default 2; +select b from t where a = 1; +b +1 +drop table if exists t1; +create table t1 (a int, b int as (a + 1) virtual not null, unique index idx(b)); +REPLACE INTO `t1` (`a`) VALUES (2); +REPLACE INTO `t1` (`a`) VALUES (2); +select * from t1; +a b +2 3 +insert into `t1` (`a`) VALUES (2) on duplicate key update a = 3; +select * from t1; +a b +3 4 +drop table if exists t1; +create table t1 (c_int int, c_str varchar(40), key(c_str)); +drop table if exists t2; +create table t2 like t1; +insert into t1 values (1, 'a'), (2, 'b'), (3, 'c'); +insert into t2 select * from t1; +select (select t2.c_str from t2 where t2.c_str <= t1.c_str and t2.c_int in (1, 2) order by t2.c_str limit 1) x from t1 order by c_int; +x +a +a +a +drop table if exists t1, t2; +create table t1 (c1 int); +create table t2 (c1 int primary key, c2 int); +insert into t1 values(3); +insert into t2 values(2, 2); +insert into t2 values(0, 0); +delete from t1, t2 using t1 left join t2 on t1.c1 = t2.c2; +select * from t1 order by c1; +c1 +select * from t2 order by c1; +c1 c2 +0 0 +2 2 +drop table if exists t1, t2; +create table t1 (c1 int); +create table t2 (c2 int); +insert into t1 values(null); +insert into t2 values(null); +delete from t1, t2 using t1 join t2 where t1.c1 is null; +select * from t1; +c1 +select * from t2; +c2 +drop table if exists t1, t2; +create table t1 (pk int(11) primary key, a int(11) not null, b int(11), key idx_b(b), key idx_a(a)); +insert into `t1` values (1,1,0),(2,7,6),(3,2,null),(4,1,null),(5,4,5); +create table t2 (a int); +insert into t2 values (1),(null); +select (select a from t1 use index(idx_a) where b >= t2.a order by a limit 1) as field from t2; +field +4 +NULL +drop table if exists t, s; +create table t(a date, b float); +create table s(b float); +insert into t values(NULL,-37), ("2011-11-04",105), ("2013-03-02",-22), ("2006-07-02",-56), (NULL,124), (NULL,111), ("2018-03-03",-5); +insert into s values(-37),(105),(-22),(-56),(124),(105),(111),(-5); +select count(distinct t.a, t.b) from t join s on t.b= s.b; +count(distinct t.a, t.b) +4 +drop table if exists t; +create table t (a decimal(10,6), b decimal, index idx_b (b)); +set sql_mode = ''; +insert t values (1.1, 1.1); +insert t values (2.4, 2.4); +insert t values (3.3, 2.7); +select * from t where a < 2.399999; +a b +1.100000 1 +select * from t where a > 1.5; +a b +2.400000 2 +3.300000 3 +select * from t where a <= 1.1; +a b +1.100000 1 +select * from t where b >= 3; +a b +3.300000 3 +select * from t where not (b = 1); +a b +2.400000 2 +3.300000 3 +select * from t where b&1 = a|1; +a b +1.100000 1 +select * from t where b != 2 and b <=> 3; +a b +3.300000 3 +select * from t where b in (3); +a b +3.300000 3 +select * from t where b not in (1, 2); +a b +3.300000 3 +drop table if exists t; +create table t (a varchar(255), b int); +insert t values ('abc123', 1); +insert t values ('ab123', 2); +select * from t where a like 'ab%'; +a b +abc123 1 +ab123 2 +select * from t where a like 'ab_12'; +a b +drop table if exists t; +create table t (a int primary key); +insert t values (1); +insert t values (2); +select * from t where not (a = 1); +a +2 +select * from t where not(not (a = 1)); +a +1 +select * from t where not(a != 1 and a != 2); +a +1 +2 +set @@sql_mode = default; +drop table if exists t; +create table t (a decimal(10,6), b decimal, index idx_b (b)); +set sql_mode = ''; +insert t values (1.1, 1.1); +insert t values (2.2, 2.2); +insert t values (3.3, 2.7); +select * from t where a > 1.5; +a b +2.200000 2 +3.300000 3 +select * from t where b > 1.5; +a b +2.200000 2 +3.300000 3 +drop table if exists t; +create table t (a time(3), b time, index idx_a (a)); +insert t values ('11:11:11', '11:11:11'); +insert t values ('11:11:12', '11:11:12'); +insert t values ('11:11:13', '11:11:13'); +select * from t where a > '11:11:11.5'; +a b +11:11:12.000 11:11:12 +11:11:13.000 11:11:13 +select * from t where b > '11:11:11.5'; +a b +11:11:12.000 11:11:12 +11:11:13.000 11:11:13 +set @@sql_mode = default; +Select 1; +1 +1 +Select 1 from dual; +1 +1 +Select count(*) from dual; +count(*) +1 +Select 1 from dual where 1; +1 +1 +drop table if exists t; +create table t(a int primary key); +select t1.* from t t1, t t2 where t1.a=t2.a and 1=0; +a +drop table if exists t; +create table t (c int, d int); +insert t values (1, 1); +insert t values (1, 3); +insert t values (2, 1); +insert t values (2, 3); +select * from t where (c, d) < (2,2); +c d +1 1 +1 3 +2 1 +select * from t where (1,2,3) > (3,2,1); +c d +select * from t where row(1,2,3) > (3,2,1); +c d +select * from t where (c, d) = (select * from t where (c,d) = (1,1)); +c d +1 1 +select * from t where (c, d) = (select * from t k where (t.c,t.d) = (c,d)); +c d +1 1 +1 3 +2 1 +2 3 +select (1, 2, 3) < (2, 3, 4); +(1, 2, 3) < (2, 3, 4) +1 +select (2, 3, 4) <= (2, 3, 3); +(2, 3, 4) <= (2, 3, 3) +0 +select (2, 3, 4) <= (2, 3, 4); +(2, 3, 4) <= (2, 3, 4) +1 +select (2, 3, 4) <= (2, 1, 4); +(2, 3, 4) <= (2, 1, 4) +0 +select (2, 3, 4) >= (2, 3, 4); +(2, 3, 4) >= (2, 3, 4) +1 +select (2, 3, 4) = (2, 3, 4); +(2, 3, 4) = (2, 3, 4) +1 +select (2, 3, 4) != (2, 3, 4); +(2, 3, 4) != (2, 3, 4) +0 +select row(1, 1) in (row(1, 1)); +row(1, 1) in (row(1, 1)) +1 +select row(1, 0) in (row(1, 1)); +row(1, 0) in (row(1, 1)) +0 +select row(1, 1) in (select 1, 1); +row(1, 1) in (select 1, 1) +1 +select row(1, 1) > row(1, 0); +row(1, 1) > row(1, 0) +1 +select row(1, 1) > (select 1, 0); +row(1, 1) > (select 1, 0) +1 +select 1 > (select 1); +1 > (select 1) +0 +select (select 1); +(select 1) +1 +drop table if exists t1; +create table t1 (a int, b int); +insert t1 values (1,2),(1,null); +drop table if exists t2; +create table t2 (c int, d int); +insert t2 values (0,0); +select * from t2 where (1,2) in (select * from t1); +c d +0 0 +select * from t2 where (1,2) not in (select * from t1); +c d +select * from t2 where (1,1) not in (select * from t1); +c d +select * from t2 where (1,null) in (select * from t1); +c d +select * from t2 where (null,null) in (select * from t1); +c d +delete from t1 where a=1 and b=2; +select (1,1) in (select * from t2) from t1; +(1,1) in (select * from t2) +0 +select (1,1) not in (select * from t2) from t1; +(1,1) not in (select * from t2) +1 +select (1,1) in (select 1,1 from t2) from t1; +(1,1) in (select 1,1 from t2) +1 +select (1,1) not in (select 1,1 from t2) from t1; +(1,1) not in (select 1,1 from t2) +0 +select (1,null) not in (select 1,1 from t2) from t1; +(1,null) not in (select 1,1 from t2) +NULL +select (t1.a,null) not in (select 1,1 from t2) from t1; +(t1.a,null) not in (select 1,1 from t2) +NULL +select (1,null) in (select * from t1); +(1,null) in (select * from t1) +NULL +select (1,null) not in (select * from t1); +(1,null) not in (select * from t1) +NULL +select str_to_date('20190101','%Y%m%d%!') from dual; +str_to_date('20190101','%Y%m%d%!') +2019-01-01 +select str_to_date('20190101','%Y%m%d%f') from dual; +str_to_date('20190101','%Y%m%d%f') +2019-01-01 00:00:00.000000 +select str_to_date('20190101','%Y%m%d%H%i%s') from dual; +str_to_date('20190101','%Y%m%d%H%i%s') +2019-01-01 00:00:00 +select str_to_date('18/10/22','%y/%m/%d') from dual; +str_to_date('18/10/22','%y/%m/%d') +2018-10-22 +select str_to_date('a18/10/22','%y/%m/%d') from dual; +str_to_date('a18/10/22','%y/%m/%d') +NULL +select str_to_date('69/10/22','%y/%m/%d') from dual; +str_to_date('69/10/22','%y/%m/%d') +2069-10-22 +select str_to_date('70/10/22','%y/%m/%d') from dual; +str_to_date('70/10/22','%y/%m/%d') +1970-10-22 +select str_to_date('8/10/22','%y/%m/%d') from dual; +str_to_date('8/10/22','%y/%m/%d') +2008-10-22 +select str_to_date('8/10/22','%Y/%m/%d') from dual; +str_to_date('8/10/22','%Y/%m/%d') +2008-10-22 +select str_to_date('18/10/22','%Y/%m/%d') from dual; +str_to_date('18/10/22','%Y/%m/%d') +2018-10-22 +select str_to_date('a18/10/22','%Y/%m/%d') from dual; +str_to_date('a18/10/22','%Y/%m/%d') +NULL +select str_to_date('69/10/22','%Y/%m/%d') from dual; +str_to_date('69/10/22','%Y/%m/%d') +2069-10-22 +select str_to_date('70/10/22','%Y/%m/%d') from dual; +str_to_date('70/10/22','%Y/%m/%d') +1970-10-22 +select str_to_date('018/10/22','%Y/%m/%d') from dual; +str_to_date('018/10/22','%Y/%m/%d') +0018-10-22 +select str_to_date('2018/10/22','%Y/%m/%d') from dual; +str_to_date('2018/10/22','%Y/%m/%d') +2018-10-22 +select str_to_date('018/10/22','%y/%m/%d') from dual; +str_to_date('018/10/22','%y/%m/%d') +NULL +select str_to_date('18/10/22','%y0/%m/%d') from dual; +str_to_date('18/10/22','%y0/%m/%d') +NULL +select str_to_date('18/10/22','%Y0/%m/%d') from dual; +str_to_date('18/10/22','%Y0/%m/%d') +NULL +select str_to_date('18a/10/22','%y/%m/%d') from dual; +str_to_date('18a/10/22','%y/%m/%d') +NULL +select str_to_date('18a/10/22','%Y/%m/%d') from dual; +str_to_date('18a/10/22','%Y/%m/%d') +NULL +select str_to_date('20188/10/22','%Y/%m/%d') from dual; +str_to_date('20188/10/22','%Y/%m/%d') +NULL +select str_to_date('2018510522','%Y5%m5%d') from dual; +str_to_date('2018510522','%Y5%m5%d') +2018-10-22 +select str_to_date('2018^10^22','%Y^%m^%d') from dual; +str_to_date('2018^10^22','%Y^%m^%d') +2018-10-22 +select str_to_date('2018@10@22','%Y@%m@%d') from dual; +str_to_date('2018@10@22','%Y@%m@%d') +2018-10-22 +select str_to_date('2018%10%22','%Y%%m%%d') from dual; +str_to_date('2018%10%22','%Y%%m%%d') +NULL +select str_to_date('2018(10(22','%Y(%m(%d') from dual; +str_to_date('2018(10(22','%Y(%m(%d') +2018-10-22 +select str_to_date('2018\10\22','%Y\%m\%d') from dual; +str_to_date('2018\10\22','%Y\%m\%d') +NULL +select str_to_date('2018=10=22','%Y=%m=%d') from dual; +str_to_date('2018=10=22','%Y=%m=%d') +2018-10-22 +select str_to_date('2018+10+22','%Y+%m+%d') from dual; +str_to_date('2018+10+22','%Y+%m+%d') +2018-10-22 +select str_to_date('2018_10_22','%Y_%m_%d') from dual; +str_to_date('2018_10_22','%Y_%m_%d') +2018-10-22 +select str_to_date('69510522','%y5%m5%d') from dual; +str_to_date('69510522','%y5%m5%d') +2069-10-22 +select str_to_date('69^10^22','%y^%m^%d') from dual; +str_to_date('69^10^22','%y^%m^%d') +2069-10-22 +select str_to_date('18@10@22','%y@%m@%d') from dual; +str_to_date('18@10@22','%y@%m@%d') +2018-10-22 +select str_to_date('18%10%22','%y%%m%%d') from dual; +str_to_date('18%10%22','%y%%m%%d') +NULL +select str_to_date('18(10(22','%y(%m(%d') from dual; +str_to_date('18(10(22','%y(%m(%d') +2018-10-22 +select str_to_date('18\10\22','%y\%m\%d') from dual; +str_to_date('18\10\22','%y\%m\%d') +NULL +select str_to_date('18+10+22','%y+%m+%d') from dual; +str_to_date('18+10+22','%y+%m+%d') +2018-10-22 +select str_to_date('18=10=22','%y=%m=%d') from dual; +str_to_date('18=10=22','%y=%m=%d') +2018-10-22 +select str_to_date('18_10_22','%y_%m_%d') from dual; +str_to_date('18_10_22','%y_%m_%d') +2018-10-22 +SELECT STR_TO_DATE('2020-07-04 11:22:33 PM', '%Y-%m-%d %r'); +STR_TO_DATE('2020-07-04 11:22:33 PM', '%Y-%m-%d %r') +2020-07-04 23:22:33 +SELECT STR_TO_DATE('2020-07-04 12:22:33 AM', '%Y-%m-%d %r'); +STR_TO_DATE('2020-07-04 12:22:33 AM', '%Y-%m-%d %r') +2020-07-04 00:22:33 +SELECT STR_TO_DATE('2020-07-04 12:22:33', '%Y-%m-%d %T'); +STR_TO_DATE('2020-07-04 12:22:33', '%Y-%m-%d %T') +2020-07-04 12:22:33 +SELECT STR_TO_DATE('2020-07-04 00:22:33', '%Y-%m-%d %T'); +STR_TO_DATE('2020-07-04 00:22:33', '%Y-%m-%d %T') +2020-07-04 00:22:33 +drop table if exists pt; +create table pt (a int, b int, index i_b(b)) partition by range (a) (partition p1 values less than (2), partition p2 values less than (4), partition p3 values less than (6)); +insert into pt values(0, 0); +insert into pt values(1, 1); +insert into pt values(2, 2); +insert into pt values(3, 3); +insert into pt values(4, 4); +insert into pt values(5, 5); +select * from pt order by a; +a b +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +select b from pt where b = 3; +b +3 +select a from pt where b = 3; +a +3 +drop table if exists t1; +create table t1(i int, j int, k int); +insert into t1 VALUES (1,1,1),(2,2,2),(3,3,3),(4,4,4); +INSERT INTO t1 SELECT 10*i,j,5*j FROM t1 UNION SELECT 20*i,j,5*j FROM t1 UNION SELECT 30*i,j,5*j FROM t1; +set @@session.tidb_enable_window_function=1; +SELECT SUM(i) OVER W FROM t1 WINDOW w AS (PARTITION BY j ORDER BY i) ORDER BY 1+SUM(i) OVER w; +SUM(i) OVER W +1 +2 +3 +4 +11 +22 +31 +33 +44 +61 +62 +93 +122 +124 +183 +244 +set @@session.tidb_enable_window_function=default; +drop table if exists a; +create table a (f1 int, f2 varchar(32), primary key (f1)); +insert into a (f1,f2) values (1,'a'), (2,'b'), (3,'c'); +select /*+ inl_merge_join(a) */ a.* from a inner join (select 1 as k1,'k2-1' as k2) as k on a.f1=k.k1; +f1 f2 +1 a +drop table if exists t1, t2; +create table t1(a int); +create table t2(a int); +insert into t1(a) select 1; +select b.n from t1 left join (select a as a, null as n from t2) b on b.a = t1.a order by t1.a; +n +NULL +drop table if exists t; +drop table if exists s; +CREATE TABLE `t` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL); +CREATE TABLE `s` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL); +insert into t values(1,1),(2,2); +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into s values(3,3),(4,4),(1,null),(2,null),(null,null); +insert into s select * from s; +insert into s select * from s; +insert into s select * from s; +insert into s select * from s; +insert into s select * from s; +set @@tidb_max_chunk_size=32; +set @@tidb_enable_null_aware_anti_join=true; +select * from t where (a,b) not in (select a, b from s); +a b +set @@tidb_max_chunk_size=default; +set @@tidb_enable_null_aware_anti_join=default; +drop table if exists t; +create table t(id int primary key, a int); +insert into t values(1, 1); +begin PESSIMISTIC; +select a from t where id=1 for update; +a +1 +update t set a=a+1 where id=1; +commit; +select a from t where id=1; +a +2 +drop table if exists select_limit; +create table select_limit(id int not null default 1, name varchar(255), PRIMARY KEY(id)); +insert INTO select_limit VALUES (1, "hello"); +insert into select_limit values (2, "hello"); +insert INTO select_limit VALUES (3, "hello"); +insert INTO select_limit VALUES (4, "hello"); +select * from select_limit limit 1; +id name +1 hello +select id from (select * from select_limit limit 1) k where id != 1; +id +select * from select_limit limit 18446744073709551615 offset 0; +id name +1 hello +2 hello +3 hello +4 hello +select * from select_limit limit 18446744073709551615 offset 1; +id name +2 hello +3 hello +4 hello +select * from select_limit limit 18446744073709551615 offset 3; +id name +4 hello +select * from select_limit limit 18446744073709551616 offset 3; +Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 53 near "18446744073709551616 offset 3;" +drop table if exists select_order_test; +create table select_order_test(id int not null default 1, name varchar(255), PRIMARY KEY(id)); +insert INTO select_order_test VALUES (1, "hello"); +insert into select_order_test values (2, "hello"); +select * from select_order_test where id = 1 order by id limit 1 offset 0; +id name +1 hello +select id from select_order_test order by id desc limit 1 ; +id +2 +select id from select_order_test order by id + 1 desc limit 1 ; +id +2 +select * from select_order_test order by name, id limit 1 offset 0; +id name +1 hello +select id as c1, name from select_order_test order by 2, id limit 1 offset 0; +c1 name +1 hello +select * from select_order_test order by name, id limit 100 offset 0; +id name +1 hello +2 hello +select * from select_order_test order by name, id limit 1 offset 100; +id name +select id from select_order_test order by name, id limit 18446744073709551615; +id +1 +2 +select id, name from select_order_test where id = 1 group by id, name limit 1 offset 0; +id name +1 hello +insert INTO select_order_test VALUES (3, "zz"); +insert INTO select_order_test VALUES (4, "zz"); +insert INTO select_order_test VALUES (5, "zz"); +insert INTO select_order_test VALUES (6, "zz"); +insert INTO select_order_test VALUES (7, "zz"); +insert INTO select_order_test VALUES (8, "zz"); +insert INTO select_order_test VALUES (9, "zz"); +insert INTO select_order_test VALUES (10, "zz"); +insert INTO select_order_test VALUES (10086, "hi"); +insert INTO select_order_test VALUES (11, "hh"); +insert INTO select_order_test VALUES (12, "hh"); +insert INTO select_order_test VALUES (13, "hh"); +insert INTO select_order_test VALUES (14, "hh"); +insert INTO select_order_test VALUES (15, "hh"); +insert INTO select_order_test VALUES (16, "hh"); +insert INTO select_order_test VALUES (17, "hh"); +insert INTO select_order_test VALUES (18, "hh"); +insert INTO select_order_test VALUES (19, "hh"); +insert INTO select_order_test VALUES (20, "hh"); +insert INTO select_order_test VALUES (21, "zz"); +insert INTO select_order_test VALUES (22, "zz"); +insert INTO select_order_test VALUES (23, "zz"); +insert INTO select_order_test VALUES (24, "zz"); +insert INTO select_order_test VALUES (25, "zz"); +insert INTO select_order_test VALUES (26, "zz"); +insert INTO select_order_test VALUES (27, "zz"); +insert INTO select_order_test VALUES (28, "zz"); +insert INTO select_order_test VALUES (29, "zz"); +insert INTO select_order_test VALUES (30, "zz"); +insert INTO select_order_test VALUES (1501, "aa"); +select * from select_order_test order by name, id limit 1 offset 3; +id name +11 hh +drop table if exists select_order_test; +drop table if exists t; +create table t (c int, d int); +insert t values (1, 1); +insert t values (1, 2); +insert t values (1, 3); +select 1-d as d from t order by d; +d +-2 +-1 +0 +select 1-d as d from t order by d + 1; +d +0 +-1 +-2 +select t.d from t order by d; +d +1 +2 +3 +drop table if exists t; +create table t (a int, b int, c int); +insert t values (1, 2, 3); +select b from (select a,b from t order by a,c) t; +b +2 +select b from (select a,b from t order by a,c limit 1) t; +b +2 +drop table if exists t; +create table t(a int, b int, index idx(a)); +insert into t values(1, 1), (2, 2); +select * from t where 1 order by b; +a b +1 1 +2 2 +select * from t where a between 1 and 2 order by a desc; +a b +2 2 +1 1 +drop table if exists t; +create table t(a int primary key, b int, c int, index idx(b)); +insert into t values(1, 3, 1); +insert into t values(2, 2, 2); +insert into t values(3, 1, 3); +select * from t use index(idx) order by a desc limit 1; +a b c +3 1 3 +drop table if exists t; +create table t(a int, b int, key b (b)); +set @@tidb_index_lookup_size = 3; +insert into t values(0, 10); +insert into t values(1, 9); +insert into t values(2, 8); +insert into t values(3, 7); +insert into t values(4, 6); +insert into t values(5, 5); +insert into t values(6, 4); +insert into t values(7, 3); +insert into t values(8, 2); +insert into t values(9, 1); +select a from t use index(b) order by b; +a +9 +8 +7 +6 +5 +4 +3 +2 +1 +0 +set @@tidb_index_lookup_size = default; +select row(1, 1) from test; +Error 1146 (42S02): Table 'executor__executor.test' doesn't exist +select * from test group by row(1, 1); +Error 1146 (42S02): Table 'executor__executor.test' doesn't exist +select * from test order by row(1, 1); +Error 1146 (42S02): Table 'executor__executor.test' doesn't exist +select * from test having row(1, 1); +Error 1146 (42S02): Table 'executor__executor.test' doesn't exist +select (select 1, 1) from test; +Error 1146 (42S02): Table 'executor__executor.test' doesn't exist +select * from test group by (select 1, 1); +Error 1146 (42S02): Table 'executor__executor.test' doesn't exist +select * from test order by (select 1, 1); +Error 1146 (42S02): Table 'executor__executor.test' doesn't exist +select * from test having (select 1, 1); +Error 1146 (42S02): Table 'executor__executor.test' doesn't exist +drop table if exists t; +create table t (c1 int primary key, c2 int, key c (c2)); +insert t values(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29), (30, 30), (31, 31), (32, 32), (33, 33), (34, 34), (35, 35), (36, 36), (37, 37), (38, 38), (39, 39), (40, 40), (41, 41), (42, 42), (43, 43), (44, 44), (45, 45), (46, 46), (47, 47), (48, 48), (49, 49), (50, 50), (51, 51), (52, 52), (53, 53), (54, 54), (55, 55), (56, 56), (57, 57), (58, 58), (59, 59), (60, 60), (61, 61), (62, 62), (63, 63), (64, 64), (65, 65), (66, 66), (67, 67), (68, 68), (69, 69), (70, 70), (71, 71), (72, 72), (73, 73), (74, 74), (75, 75), (76, 76), (77, 77), (78, 78), (79, 79), (80, 80), (81, 81), (82, 82), (83, 83), (84, 84), (85, 85), (86, 86), (87, 87), (88, 88), (89, 89), (90, 90), (91, 91), (92, 92), (93, 93), (94, 94), (95, 95), (96, 96), (97, 97), (98, 98), (99, 99), (100, 100), (101, 101), (102, 102), (103, 103), (104, 104), (105, 105), (106, 106), (107, 107), (108, 108), (109, 109), (110, 110), (111, 111), (112, 112), (113, 113), (114, 114), (115, 115), (116, 116), (117, 117), (118, 118), (119, 119), (120, 120), (121, 121), (122, 122), (123, 123), (124, 124), (125, 125), (126, 126), (127, 127), (128, 128), (129, 129), (130, 130), (131, 131), (132, 132), (133, 133), (134, 134), (135, 135), (136, 136), (137, 137), (138, 138), (139, 139), (140, 140), (141, 141), (142, 142), (143, 143), (144, 144), (145, 145), (146, 146), (147, 147), (148, 148), (149, 149), (150, 150), (151, 151), (152, 152), (153, 153), (154, 154), (155, 155), (156, 156), (157, 157), (158, 158), (159, 159), (160, 160), (161, 161), (162, 162), (163, 163), (164, 164), (165, 165), (166, 166), (167, 167), (168, 168), (169, 169), (170, 170), (171, 171), (172, 172), (173, 173), (174, 174), (175, 175), (176, 176), (177, 177), (178, 178), (179, 179), (180, 180), (181, 181), (182, 182), (183, 183), (184, 184), (185, 185), (186, 186), (187, 187), (188, 188), (189, 189), (190, 190), (191, 191), (192, 192), (193, 193), (194, 194), (195, 195), (196, 196), (197, 197), (198, 198), (199, 199), (200, 200); +select c2 from t where c1 in ('7', '10', '112', '111', '98', '106', '100', '9', '18', '17') order by c2; +c2 +7 +9 +10 +17 +18 +98 +100 +106 +111 +112 +select c2 from t where c1 in ('7a'); +c2 +7 +drop table if exists t; +create table t (a int PRIMARY KEY AUTO_INCREMENT); +insert t values (),(); +insert t values (-100),(0); +select * from t; +a +-100 +1 +2 +3 +select * from t where a = 1; +a +1 +select * from t where a != 1; +a +-100 +2 +3 +select * from t where a >= '1.1'; +a +2 +3 +select * from t where a < '1.1'; +a +-100 +1 +select * from t where a > '-100.1' and a < 2; +a +-100 +1 +select * from t where a is null; +a +select * from t where a is true; +a +-100 +1 +2 +3 +select * from t where a is false; +a +select * from t where a in (1, 2); +a +1 +2 +select * from t where a between 1 and 2; +a +1 +2 +drop table if exists t; +create table t (a int primary key auto_increment, b int default 1, c int); +insert t values (); +select * from t; +a b c +1 1 NULL +update t set b = NULL where a = 1; +select * from t; +a b c +1 NULL NULL +update t set c = 1; +select * from t ; +a b c +1 NULL 1 +delete from t where a = 1; +insert t (a) values (1); +select * from t; +a b c +1 1 NULL +drop table if exists test_json; +create table test_json (id int, a json); +insert into test_json (id, a) values (1, '{"a":[1,"2",{"aa":"bb"},4],"b":true}'); +insert into test_json (id, a) values (2, "null"); +insert into test_json (id, a) values (3, null); +insert into test_json (id, a) values (4, 'true'); +insert into test_json (id, a) values (5, '3'); +insert into test_json (id, a) values (5, '4.0'); +insert into test_json (id, a) values (6, '"string"'); +select tj.a from test_json tj order by tj.id; +a +{"a": [1, "2", {"aa": "bb"}, 4], "b": true} +null +NULL +true +3 +4 +"string" +select json_type(a) from test_json tj order by tj.id; +json_type(a) +OBJECT +NULL +NULL +BOOLEAN +INTEGER +DOUBLE +STRING +select a from test_json tj where a = 3; +a +3 +select a from test_json tj where a = 4.0; +a +4 +select a from test_json tj where a = true; +a +true +select a from test_json tj where a = "string"; +a +"string" +select cast(true as JSON); +cast(true as JSON) +true +select cast(false as JSON); +cast(false as JSON) +false +select a->>'$.a[2].aa' as x, a->'$.b' as y from test_json having x is not null order by id; +x y +bb true +select a->'$.a[2].aa' as x, a->>'$.b' as y from test_json having x is not null order by id; +x y +"bb" true +create table test_bad_json(a json default '{}'); +Error 1101 (42000): BLOB/TEXT/JSON column 'a' can't have a default value +create table test_bad_json(a blob default 'hello'); +Error 1101 (42000): BLOB/TEXT/JSON column 'a' can't have a default value +create table test_bad_json(a text default 'world'); +Error 1101 (42000): BLOB/TEXT/JSON column 'a' can't have a default value +create table test_bad_json(id int, a json, key (a)); +Error 3152 (42000): JSON column 'a' cannot be used in key specification. +select CAST('3' AS JSON), CAST('{}' AS JSON), CAST(null AS JSON); +CAST('3' AS JSON) CAST('{}' AS JSON) CAST(null AS JSON) +3 {} NULL +select a, count(1) from test_json group by a order by a; +a count(1) +NULL 1 +null 1 +3 1 +4 1 +"string" 1 +{"a": [1, "2", {"aa": "bb"}, 4], "b": true} 1 +true 1 +drop table if exists test_json; +create table test_json ( a decimal(60,2) as (JSON_EXTRACT(b,'$.c')), b json ); +insert into test_json (b) values +('{"c": "1267.1"}'), +('{"c": "1267.01"}'), +('{"c": "1267.1234"}'), +('{"c": "1267.3456"}'), +('{"c": "1234567890123456789012345678901234567890123456789012345"}'), +('{"c": "1234567890123456789012345678901234567890123456789012345.12345"}'); +select a from test_json; +a +1267.10 +1267.01 +1267.12 +1267.35 +1234567890123456789012345678901234567890123456789012345.00 +1234567890123456789012345678901234567890123456789012345.12 +drop table if exists test_gc_write, test_gc_write_1; +CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (a+8) virtual); +Error 3109 (HY000): Generated column 'c' cannot refer to auto-increment column. +CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (b+8) virtual); +CREATE TABLE test_gc_write_1 (a int primary key, b int, c int); +insert into test_gc_write (a, b, c) values (1, 1, 1); +Error 3105 (HY000): The value specified for generated column 'c' in table 'test_gc_write' is not allowed. +insert into test_gc_write values (1, 1, 1); +Error 3105 (HY000): The value specified for generated column 'c' in table 'test_gc_write' is not allowed. +insert into test_gc_write select 1, 1, 1; +Error 3105 (HY000): The value specified for generated column 'c' in table 'test_gc_write' is not allowed. +insert into test_gc_write (a, b) values (1, 1) on duplicate key update c = 1; +Error 3105 (HY000): The value specified for generated column 'c' in table 'test_gc_write' is not allowed. +insert into test_gc_write set a = 1, b = 1, c = 1; +Error 3105 (HY000): The value specified for generated column 'c' in table 'test_gc_write' is not allowed. +update test_gc_write set c = 1; +Error 3105 (HY000): The value specified for generated column 'c' in table 'test_gc_write' is not allowed. +update test_gc_write, test_gc_write_1 set test_gc_write.c = 1; +Error 3105 (HY000): The value specified for generated column 'c' in table 'test_gc_write' is not allowed. +insert into test_gc_write (a, b) values (1, 1); +insert into test_gc_write set a = 2, b = 2; +insert into test_gc_write (b) select c from test_gc_write; +update test_gc_write set b = 2 where a = 2; +update test_gc_write t1, test_gc_write_1 t2 set t1.b = 3, t2.b = 4; +insert into test_gc_write values (1, 1); +Error 1136 (21S01): Column count doesn't match value count at row 1 +insert into test_gc_write select 1, 1; +Error 1136 (21S01): Column count doesn't match value count at row 1 +insert into test_gc_write (c) select a, b from test_gc_write; +Error 1136 (21S01): Column count doesn't match value count at row 1 +insert into test_gc_write (b, c) select a, b from test_gc_write; +Error 3105 (HY000): The value specified for generated column 'c' in table 'test_gc_write' is not allowed. +drop table if exists test_gc_read; +CREATE TABLE test_gc_read(a int primary key, b int, c int as (a+b), d int as (a*b) stored, e int as (c*2)); +SELECT generation_expression FROM information_schema.columns WHERE table_name = 'test_gc_read' AND column_name = 'd'; +generation_expression +`a` * `b` +INSERT INTO test_gc_read (a, b) VALUES (0,null),(1,2),(3,4); +SELECT * FROM test_gc_read ORDER BY a; +a b c d e +0 NULL NULL NULL NULL +1 2 3 2 6 +3 4 7 12 14 +INSERT INTO test_gc_read SET a = 5, b = 10; +SELECT * FROM test_gc_read ORDER BY a; +a b c d e +0 NULL NULL NULL NULL +1 2 3 2 6 +3 4 7 12 14 +5 10 15 50 30 +REPLACE INTO test_gc_read (a, b) VALUES (5, 6); +SELECT * FROM test_gc_read ORDER BY a; +a b c d e +0 NULL NULL NULL NULL +1 2 3 2 6 +3 4 7 12 14 +5 6 11 30 22 +INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE b = 9; +SELECT * FROM test_gc_read ORDER BY a; +a b c d e +0 NULL NULL NULL NULL +1 2 3 2 6 +3 4 7 12 14 +5 9 14 45 28 +SELECT c, d FROM test_gc_read; +c d +NULL NULL +3 2 +7 12 +14 45 +SELECT e FROM test_gc_read; +e +NULL +6 +14 +28 +INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE a = 6, b = a; +SELECT * FROM test_gc_read ORDER BY a; +a b c d e +0 NULL NULL NULL NULL +1 2 3 2 6 +3 4 7 12 14 +6 6 12 36 24 +INSERT INTO test_gc_read (a, b) VALUES (6, 8) ON DUPLICATE KEY UPDATE b = 8, a = b; +SELECT * FROM test_gc_read ORDER BY a; +a b c d e +0 NULL NULL NULL NULL +1 2 3 2 6 +3 4 7 12 14 +8 8 16 64 32 +SELECT * FROM test_gc_read WHERE c = 7; +a b c d e +3 4 7 12 14 +SELECT * FROM test_gc_read WHERE d = 64; +a b c d e +8 8 16 64 32 +SELECT * FROM test_gc_read WHERE e = 6; +a b c d e +1 2 3 2 6 +UPDATE test_gc_read SET a = a + 100 WHERE c = 7; +SELECT * FROM test_gc_read WHERE c = 107; +a b c d e +103 4 107 412 214 +UPDATE test_gc_read m SET m.a = m.a + 100 WHERE c = 107; +SELECT * FROM test_gc_read WHERE c = 207; +a b c d e +203 4 207 812 414 +UPDATE test_gc_read SET a = a - 200 WHERE d = 812; +SELECT * FROM test_gc_read WHERE d = 12; +a b c d e +3 4 7 12 14 +INSERT INTO test_gc_read set a = 4, b = d + 1; +SELECT * FROM test_gc_read ORDER BY a; +a b c d e +0 NULL NULL NULL NULL +1 2 3 2 6 +3 4 7 12 14 +4 NULL NULL NULL NULL +8 8 16 64 32 +DELETE FROM test_gc_read where a = 4; +CREATE TABLE test_gc_help(a int primary key, b int, c int, d int, e int); +INSERT INTO test_gc_help(a, b, c, d, e) SELECT * FROM test_gc_read; +SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.c = t2.c ORDER BY t1.a; +a b c d e +1 2 3 2 6 +3 4 7 12 14 +8 8 16 64 32 +SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.d = t2.d ORDER BY t1.a; +a b c d e +1 2 3 2 6 +3 4 7 12 14 +8 8 16 64 32 +SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.e = t2.e ORDER BY t1.a; +a b c d e +1 2 3 2 6 +3 4 7 12 14 +8 8 16 64 32 +SELECT * FROM test_gc_read t WHERE t.a not in (SELECT t.a FROM test_gc_read t where t.c > 5); +a b c d e +0 NULL NULL NULL NULL +1 2 3 2 6 +SELECT * FROM test_gc_read t WHERE t.c in (SELECT t.c FROM test_gc_read t where t.c > 5); +a b c d e +3 4 7 12 14 +8 8 16 64 32 +SELECT tt.b FROM test_gc_read tt WHERE tt.a = (SELECT max(t.a) FROM test_gc_read t WHERE t.c = tt.c) ORDER BY b; +b +2 +4 +8 +SELECT c, sum(a) aa, max(d) dd, sum(e) ee FROM test_gc_read GROUP BY c ORDER BY aa; +c aa dd ee +NULL 0 NULL NULL +3 1 2 6 +7 3 12 14 +16 8 64 32 +SELECT a, sum(c), sum(d), sum(e) FROM test_gc_read GROUP BY a ORDER BY a; +a sum(c) sum(d) sum(e) +0 NULL NULL NULL +1 3 2 6 +3 7 12 14 +8 16 64 32 +UPDATE test_gc_read m, test_gc_read n SET m.b = m.b + 10, n.b = n.b + 10; +SELECT * FROM test_gc_read ORDER BY a; +a b c d e +0 NULL NULL NULL NULL +1 12 13 12 26 +3 14 17 42 34 +8 18 26 144 52 +drop table if exists t; +create table t(a int); +insert into t values(8); +update test_gc_read set a = a+1 where a in (select a from t); +select * from test_gc_read order by a; +a b c d e +0 NULL NULL NULL NULL +1 12 13 12 26 +3 14 17 42 34 +9 18 27 162 54 +CREATE TABLE test_gc_read_cast(a VARCHAR(255), b VARCHAR(255), c INT AS (JSON_EXTRACT(a, b)), d INT AS (JSON_EXTRACT(a, b)) STORED); +INSERT INTO test_gc_read_cast (a, b) VALUES ('{"a": "3"}', '$.a'); +SELECT c, d FROM test_gc_read_cast; +c d +3 3 +CREATE TABLE test_gc_read_cast_1(a VARCHAR(255), b VARCHAR(255), c ENUM("red", "yellow") AS (JSON_UNQUOTE(JSON_EXTRACT(a, b)))); +INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "yellow"}', '$.a'); +SELECT c FROM test_gc_read_cast_1; +c +yellow +CREATE TABLE test_gc_read_cast_2( a JSON, b JSON AS (a->>'$.a')); +INSERT INTO test_gc_read_cast_2(a) VALUES ('{"a": "{ \\\"key\\\": \\\"\\u6d4b\\\" }"}'); +SELECT b FROM test_gc_read_cast_2; +b +{"key": "测"} +CREATE TABLE test_gc_read_cast_3( a JSON, b JSON AS (a->>'$.a'), c INT AS (b * 3.14) ); +INSERT INTO test_gc_read_cast_3(a) VALUES ('{"a": "5"}'); +SELECT c FROM test_gc_read_cast_3; +c +16 +INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "invalid"}', '$.a'); +Error 1265 (01000): Data truncated for column 'c' at row 1 +DROP TABLE IF EXISTS test_gc_read_m; +CREATE TABLE test_gc_read_m (a int primary key, b int, c int as (a+1), d int as (c*2)); +INSERT INTO test_gc_read_m(a) values (1), (2); +ALTER TABLE test_gc_read_m DROP b; +SELECT * FROM test_gc_read_m; +a c d +1 2 4 +2 3 6 +CREATE TABLE test_gc_read_1(a int primary key, b int, c int as (a+b) not null, d int as (a*b) stored); +CREATE TABLE test_gc_read_2(a int primary key, b int, c int as (a+b), d int as (a*b) stored not null); +insert into test_gc_read_1(a, b) values (1, null); +Error 1048 (23000): Column 'c' cannot be null +insert into test_gc_read_2(a, b) values (1, null); +Error 1048 (23000): Column 'd' cannot be null +drop table if exists th, tr, tl; +create table th (a int, b int) partition by hash(a) partitions 3; +create table tr (a int, b int) +partition by range (a) ( +partition r0 values less than (4), +partition r1 values less than (7), +partition r3 values less than maxvalue); +create table tl (a int, b int, unique index idx(a)) partition by list (a) ( +partition p0 values in (3,5,6,9,17), +partition p1 values in (1,2,10,11,19,20), +partition p2 values in (4,12,13,14,18), +partition p3 values in (7,8,15,16,null)); +insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8); +insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8); +insert into tr values (-3,-3),(3,3),(4,4),(7,7),(8,8); +insert into tl values (3,3),(1,1),(4,4),(7,7),(8,8),(null,null); +select b from th partition (p0) order by a; +b +-6 +-3 +0 +3 +6 +select b from tr partition (r0) order by a; +b +-3 +3 +select b from tl partition (p0) order by a; +b +3 +select b from th partition (p0,P0) order by a; +b +-6 +-3 +0 +3 +6 +select b from tr partition (r0,R0,r0) order by a; +b +-3 +3 +select b from tl partition (p0,P0,p0) order by a; +b +3 +select b from th partition (P2,p0) order by a; +b +-8 +-6 +-5 +-3 +-2 +0 +2 +3 +5 +6 +8 +select b from tr partition (r1,R3) order by a; +b +4 +7 +8 +select b from tl partition (p0,P3) order by a; +b +NULL +3 +7 +8 +select b from th partition (p0,p4); +Error 1735 (HY000): Unknown partition 'p4' in table 'th' +select b from tr partition (r1,r4); +Error 1735 (HY000): Unknown partition 'r4' in table 'tr' +select b from tl partition (p0,p4); +Error 1735 (HY000): Unknown partition 'p4' in table 'tl' +begin; +insert into th values (10,10),(11,11); +select a, b from th where b>10; +a b +11 11 +commit; +select a, b from th where b>10; +a b +11 11 +drop table if exists tscalar; +create table tscalar (c1 int) partition by range (c1 % 30) ( +partition p0 values less than (0), +partition p1 values less than (10), +partition p2 values less than (20), +partition pm values less than (maxvalue)); +insert into tscalar values(0), (10), (40), (50), (55); +insert into tscalar values(-0), (-10), (-40), (-50), (-55); +select * from tscalar where c1 in (55, 55); +c1 +55 +select * from tscalar where c1 in (40, 40); +c1 +40 +select * from tscalar where c1 in (40); +c1 +40 +select * from tscalar where c1 in (-40); +c1 +-40 +select * from tscalar where c1 in (-40, -40); +c1 +-40 +select * from tscalar where c1 in (-1); +c1 +prepare stmt from "load data local infile '/tmp/load_data_test.csv' into table test"; +Error 1295 (HY000): This command is not supported in the prepared statement protocol yet +prepare stmt from "import into test from 'xx' format 'delimited'"; +Error 1295 (HY000): This command is not supported in the prepared statement protocol yet +drop table if exists t; +create table t(a int, index idx(a)); +insert into t values(1), (2), (4); +begin; +update t set a = 3 where a = 4; +select * from t ignore index(idx); +a +1 +2 +3 +insert into t values(4); +select * from t use index(idx); +a +1 +2 +3 +4 +select * from t use index(idx) order by a desc; +a +4 +3 +2 +1 +update t set a = 5 where a = 3; +select * from t use index(idx); +a +1 +2 +4 +5 +commit; +drop table if exists t; +create table t(a int, b int, index idx(a)); +insert into t values(3, 3), (1, 1), (2, 2); +select * from t use index(idx) order by a; +a b +1 1 +2 2 +3 3 +drop table if exists t; +create table t(id bigint, PRIMARY KEY (id)); +insert into t values(9223372036854775807); +select * from t where id = 9223372036854775807; +id +9223372036854775807 +select * from t where id = 9223372036854775807; +id +9223372036854775807 +select * from t; +id +9223372036854775807 +insert into t values(9223372036854775807); +Error 1062 (23000): Duplicate entry '9223372036854775807' for key 't.PRIMARY' +delete from t where id = 9223372036854775807; +select * from t; +id +drop table if exists t; +create table t(id bigint unsigned primary key); +insert into t values(9223372036854775808), (9223372036854775809), (1), (2); +select * from t order by id; +id +1 +2 +9223372036854775808 +9223372036854775809 +select * from t where id not in (2); +id +9223372036854775808 +9223372036854775809 +1 +drop table if exists t; +create table t(a bigint unsigned primary key, b int, index idx(b)); +insert into t values(9223372036854775808, 1), (1, 1); +select * from t use index(idx) where b = 1 and a < 2; +a b +1 1 +select * from t use index(idx) where b = 1 order by b, a; +a b +1 1 +9223372036854775808 1 +set @@tidb_enable_clustered_index = 1; +drop table if exists t; +create table t(k1 int, k2 int, primary key(k1, k2)); +insert into t(k1, k2) value(-100, 1), (-50, 1), (0, 0), (1, 1), (3, 3); +select k1 from t order by k1; +k1 +-100 +-50 +0 +1 +3 +select k1 from t order by k1 desc; +k1 +3 +1 +0 +-50 +-100 +select k1 from t where k1 < -51; +k1 +-100 +select k1 from t where k1 < -1; +k1 +-100 +-50 +select k1 from t where k1 <= 0; +k1 +-100 +-50 +0 +select k1 from t where k1 < 2; +k1 +-100 +-50 +0 +1 +select k1 from t where k1 < -1 and k1 > -90; +k1 +-50 +set @@tidb_enable_clustered_index = default; +drop table if exists t1, t2, t3; +create table t1(t1.a char); +create table t2(a char, t2.b int); +create table t3(s.a char); +Error 1103 (42000): Incorrect table name 's' +set @@tidb_enable_clustered_index = 1; +drop table if exists admin_test; +create table admin_test (c1 int, c2 int, c3 int default 1, primary key (c1, c2), index (c1), unique key(c2)); +insert admin_test (c1, c2) values (1, 1), (2, 2), (3, 3); +admin check table admin_test; +set @@tidb_enable_clustered_index = default; +drop table if exists t; +create table t(a bigint); +prepare stmt1 from 'select * from t limit ?'; +prepare stmt2 from 'select * from t limit ?, ?'; +set @a = -1; +set @b = 1; +execute stmt1 using @a; +Error 1210 (HY000): Incorrect arguments to LIMIT +execute stmt2 using @b, @a; +Error 1210 (HY000): Incorrect arguments to LIMIT +execute stmt2 using @a, @b; +Error 1210 (HY000): Incorrect arguments to LIMIT +execute stmt2 using @a, @a; +Error 1210 (HY000): Incorrect arguments to LIMIT +drop table if exists t; +create table t (e enum('Y', 'N')); +set sql_mode='STRICT_TRANS_TABLES'; +insert into t values (0); +Error 1265 (01000): Data truncated for column 'e' at row 1 +insert into t values ('abc'); +Error 1265 (01000): Data truncated for column 'e' at row 1 +set sql_mode=''; +insert into t values (0); +select * from t; +e + +insert into t values ('abc'); +select * from t; +e + + +insert into t values (null); +select * from t; +e + + +NULL +drop table if exists t; +create table t (id int auto_increment primary key, c1 enum('a', '', 'c')); +insert into t(c1) values (0); +select id, c1+0, c1 from t; +id c1+0 c1 +1 0 +alter table t change c1 c1 enum('a', '') not null; +select id, c1+0, c1 from t; +id c1+0 c1 +1 0 +insert into t(c1) values (0); +select id, c1+0, c1 from t; +id c1+0 c1 +1 0 +2 0 +set sql_mode=default; +drop table if exists t1; +create table t1(a int) partition by range (10 div a) (partition p0 values less than (10), partition p1 values less than maxvalue); +set @@sql_mode=''; +insert into t1 values (NULL), (0), (1); +set @@sql_mode='STRICT_ALL_TABLES,ERROR_FOR_DIVISION_BY_ZERO'; +insert into t1 values (NULL), (0), (1); +Error 1365 (22012): Division by 0 +set @@sql_mode=default; +drop table if exists t1; +create table t1( +a int(11) DEFAULT NULL, +b varchar(10) DEFAULT NULL, +UNIQUE KEY idx_a (a)) PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB, +PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB, +PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB, +PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB, +PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB); +insert into t1 partition(p0) values(1, 'a'), (2, 'b'); +select * from t1 partition(p0) order by a; +a b +1 a +2 b +insert into t1 partition(p0, p1) values(3, 'c'), (4, 'd'); +select * from t1 partition(p1); +a b +insert into t1 values(1, 'a'); +Error 1062 (23000): Duplicate entry '1' for key 't1.idx_a' +insert into t1 partition(p0, p_non_exist) values(1, 'a'); +Error 1735 (HY000): Unknown partition 'p_non_exist' in table 't1' +insert into t1 partition(p0, p1) values(40, 'a'); +Error 1748 (HY000): Found a row not matching the given partition set +replace into t1 partition(p0) values(1, 'replace'); +replace into t1 partition(p0, p1) values(3, 'replace'), (4, 'replace'); +replace into t1 values(1, 'a'); +select * from t1 partition (p0) order by a; +a b +1 a +2 b +3 replace +4 replace +replace into t1 partition(p0, p_non_exist) values(1, 'a'); +Error 1735 (HY000): Unknown partition 'p_non_exist' in table 't1' +replace into t1 partition(p0, p1) values(40, 'a'); +Error 1748 (HY000): Found a row not matching the given partition set +truncate table t1; +drop table if exists t; +create table t(a int, b char(10)); +insert into t partition(p0, p1) values(1, 'a'); +Error 1747 (HY000): PARTITION () clause on non partitioned table +insert into t values(1, 'a'), (2, 'b'); +insert into t1 partition(p0) select * from t; +select * from t1 partition(p0) order by a; +a b +1 a +2 b +truncate table t; +insert into t values(3, 'c'), (4, 'd'); +insert into t1 partition(p0, p1) select * from t; +select * from t1 partition(p1) order by a; +a b +select * from t1 partition(p0) order by a; +a b +1 a +2 b +3 c +4 d +insert into t1 select 1, 'a'; +Error 1062 (23000): Duplicate entry '1' for key 't1.idx_a' +insert into t1 partition(p0, p_non_exist) select 1, 'a'; +Error 1735 (HY000): Unknown partition 'p_non_exist' in table 't1' +insert into t1 partition(p0, p1) select 40, 'a'; +Error 1748 (HY000): Found a row not matching the given partition set +replace into t1 partition(p0) select 1, 'replace'; +truncate table t; +insert into t values(3, 'replace'), (4, 'replace'); +replace into t1 partition(p0, p1) select * from t; +replace into t1 select 1, 'a'; +select * from t1 partition (p0) order by a; +a b +1 a +2 b +3 replace +4 replace +replace into t1 partition(p0, p_non_exist) select 1, 'a'; +Error 1735 (HY000): Unknown partition 'p_non_exist' in table 't1' +replace into t1 partition(p0, p1) select 40, 'a'; +Error 1748 (HY000): Found a row not matching the given partition set +drop table if exists t1, t2, t3; +create table t1( +a int(11), +b varchar(10) DEFAULT NULL, +primary key idx_a (a)) PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB, +PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB, +PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB, +PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB, +PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB); +create table t2( +a int(11) DEFAULT NULL, +b varchar(10) DEFAULT NULL) PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB, +PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB, +PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB, +PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB, +PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB); +create table t3 (a int(11), b varchar(10) default null); +insert into t3 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd'); +update t3 partition(p0) set a = 40 where a = 2; +Error 1747 (HY000): PARTITION () clause on non partitioned table +insert into t1 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd'); +update t1 partition(p0, p1) set a = 40; +Error 1748 (HY000): Found a row not matching the given partition set +update t1 partition(p0) set a = 40 where a = 2; +Error 1748 (HY000): Found a row not matching the given partition set +update t1 partition (p0, p_non_exist) set a = 40; +Error 1735 (HY000): Unknown partition 'p_non_exist' in table 't1' +update t1 partition (p0), t3 set t1.a = 40 where t3.a = 2; +Error 1748 (HY000): Found a row not matching the given partition set +update t1 partition(p0) set a = 3 where a = 2; +update t1 partition(p0, p3) set a = 33 where a = 1; +insert into t2 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd'); +update t2 partition(p0, p1) set a = 40; +Error 1748 (HY000): Found a row not matching the given partition set +update t2 partition(p0) set a = 40 where a = 2; +Error 1748 (HY000): Found a row not matching the given partition set +update t2 partition(p0) set a = 3 where a = 2; +update t2 partition(p0, p3) set a = 33 where a = 1; +drop table if exists t4; +create table t4(a int primary key, b int) partition by hash(a) partitions 2; +insert into t4(a, b) values(1, 1),(2, 2),(3, 3); +update t4 partition(p0) set a = 5 where a = 2; +Error 1748 (HY000): Found a row not matching the given partition set +drop table if exists t; +CREATE TABLE t (a DATETIME); +INSERT INTO t VALUES('1988-04-17 01:59:59'); +SELECT DATE_ADD(a, INTERVAL 1 SECOND) FROM t; +DATE_ADD(a, INTERVAL 1 SECOND) +1988-04-17 02:00:00 +select YEAR(0000-00-00), YEAR("0000-00-00"); +YEAR(0000-00-00) YEAR("0000-00-00") +0 NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000000' +select MONTH(0000-00-00), MONTH("0000-00-00"); +MONTH(0000-00-00) MONTH("0000-00-00") +0 NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000000' +select DAYOFMONTH(0000-00-00), DAYOFMONTH("0000-00-00"); +DAYOFMONTH(0000-00-00) DAYOFMONTH("0000-00-00") +0 NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000000' +select QUARTER(0000-00-00), QUARTER("0000-00-00"); +QUARTER(0000-00-00) QUARTER("0000-00-00") +0 NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000000' +select EXTRACT(DAY FROM 0000-00-00), EXTRACT(DAY FROM "0000-00-00"); +EXTRACT(DAY FROM 0000-00-00) EXTRACT(DAY FROM "0000-00-00") +0 NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000000' +select EXTRACT(MONTH FROM 0000-00-00), EXTRACT(MONTH FROM "0000-00-00"); +EXTRACT(MONTH FROM 0000-00-00) EXTRACT(MONTH FROM "0000-00-00") +0 NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000000' +select EXTRACT(YEAR FROM 0000-00-00), EXTRACT(YEAR FROM "0000-00-00"); +EXTRACT(YEAR FROM 0000-00-00) EXTRACT(YEAR FROM "0000-00-00") +0 NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000000' +select EXTRACT(WEEK FROM 0000-00-00), EXTRACT(WEEK FROM "0000-00-00"); +EXTRACT(WEEK FROM 0000-00-00) EXTRACT(WEEK FROM "0000-00-00") +0 NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000000' +select EXTRACT(QUARTER FROM 0000-00-00), EXTRACT(QUARTER FROM "0000-00-00"); +EXTRACT(QUARTER FROM 0000-00-00) EXTRACT(QUARTER FROM "0000-00-00") +0 NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000000' +select DAYOFWEEK(0000-00-00), DAYOFWEEK("0000-00-00"); +DAYOFWEEK(0000-00-00) DAYOFWEEK("0000-00-00") +NULL NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00' +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000000' +select DAYOFYEAR(0000-00-00), DAYOFYEAR("0000-00-00"); +DAYOFYEAR(0000-00-00) DAYOFYEAR("0000-00-00") +NULL NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00' +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000000' +drop table if exists t; +create table t(v1 datetime, v2 datetime(3)); +insert ignore into t values(0,0); +select YEAR(v1), YEAR(v2) from t; +YEAR(v1) YEAR(v2) +0 0 +select MONTH(v1), MONTH(v2) from t; +MONTH(v1) MONTH(v2) +0 0 +select DAYOFMONTH(v1), DAYOFMONTH(v2) from t; +DAYOFMONTH(v1) DAYOFMONTH(v2) +0 0 +select QUARTER(v1), QUARTER(v2) from t; +QUARTER(v1) QUARTER(v2) +0 0 +select EXTRACT(DAY FROM v1), EXTRACT(DAY FROM v2) from t; +EXTRACT(DAY FROM v1) EXTRACT(DAY FROM v2) +0 0 +select EXTRACT(MONTH FROM v1), EXTRACT(MONTH FROM v2) from t; +EXTRACT(MONTH FROM v1) EXTRACT(MONTH FROM v2) +0 0 +select EXTRACT(YEAR FROM v1), EXTRACT(YEAR FROM v2) from t; +EXTRACT(YEAR FROM v1) EXTRACT(YEAR FROM v2) +0 0 +select EXTRACT(WEEK FROM v1), EXTRACT(WEEK FROM v2) from t; +EXTRACT(WEEK FROM v1) EXTRACT(WEEK FROM v2) +0 0 +select EXTRACT(QUARTER FROM v1), EXTRACT(QUARTER FROM v2) from t; +EXTRACT(QUARTER FROM v1) EXTRACT(QUARTER FROM v2) +0 0 +select DAYOFWEEK(v1), DAYOFWEEK(v2) from t; +DAYOFWEEK(v1) DAYOFWEEK(v2) +NULL NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00' +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000' +select DAYOFYEAR(v1), DAYOFYEAR(v2) from t; +DAYOFYEAR(v1) DAYOFYEAR(v2) +NULL NULL +Level Code Message +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00' +Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00.000' +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE'; +create table t (a datetime default '2999-00-00 00:00:00'); +Error 1067 (42000): Invalid default value for 'a' +create table t (a datetime); +alter table t modify column a datetime default '2999-00-00 00:00:00'; +Error 1067 (42000): Invalid default value for 'a' +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_DATE'; +create table t (a datetime default '0000-00-00 00:00:00'); +Error 1067 (42000): Invalid default value for 'a' +create table t (a datetime); +alter table t modify column a datetime default '0000-00-00 00:00:00'; +Error 1067 (42000): Invalid default value for 'a' +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES'; +create table t (a datetime default '2999-00-00 00:00:00'); +drop table if exists t; +create table t (a datetime default '0000-00-00 00:00:00'); +drop table if exists t; +create table t (a datetime); +alter table t modify column a datetime default '2999-00-00 00:00:00'; +alter table t modify column a datetime default '0000-00-00 00:00:00'; +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES'; +create table t (a datetime default '2999-02-30 00:00:00'); +Error 1067 (42000): Invalid default value for 'a' +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE'; +create table t (a datetime default '2999-02-30 00:00:00'); +Error 1067 (42000): Invalid default value for 'a' +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES'; +create table t (a datetime default '2999-02-30 00:00:00'); +drop table if exists t; +create table t (a datetime); +alter table t modify column a datetime default '2999-02-30 00:00:00'; +drop table if exists t; +set @@sql_mode=default; +drop table if exists `enum-set`; +CREATE TABLE `enum-set` (`set` SET('x00','x01','x02','x03','x04','x05','x06','x07','x08','x09','x10','x11','x12','x13','x14','x15','x16','x17','x18','x19','x20','x21','x22','x23','x24','x25','x26','x27','x28','x29','x30','x31','x32','x33','x34','x35','x36','x37','x38','x39','x40','x41','x42','x43','x44','x45','x46','x47','x48','x49','x50','x51','x52','x53','x54','x55','x56','x57','x58','x59','x60','x61','x62','x63')NOT NULL PRIMARY KEY); +INSERT INTO `enum-set` VALUES ("x00,x59"); +select `set` from `enum-set` use index(PRIMARY); +set +x00,x59 +admin check table `enum-set`; +drop table if exists t; +create table t(a YEAR, PRIMARY KEY(a)); +insert into t set a = '2151'; +delete from t; +admin check table t; +drop table if exists t; +set @@tidb_enable_clustered_index = 'int_only'; +create table t(a varchar(10), b varchar(10), c varchar(1), index idx(a, b, c)); +insert into t values('a', 'b', 'c'); +insert into t values('a', 'b', 'c'); +select b, _tidb_rowid from t use index(idx) where a = 'a'; +b _tidb_rowid +b 1 +b 2 +begin; +select * from t for update; +a b c +a b c +a b c +select distinct b from t use index(idx) where a = 'a'; +b +b +commit; +drop table if exists t; +create table t(a varchar(5) primary key); +insert into t values('a'); +select *, _tidb_rowid from t use index(`primary`) where _tidb_rowid=1; +a _tidb_rowid +a 1 +set @@tidb_enable_clustered_index = default; +drop table if exists t; +set sql_mode = ''; +select a, b from (select 1 a) ``, (select 2 b) ``; +Error 1248 (42000): Every derived table must have its own alias +select a, b from (select 1 a) `x`, (select 2 b) `x`; +Error 1066 (42000): Not unique table/alias: 'x' +select a, b from (select 1 a), (select 2 b); +Error 1248 (42000): Every derived table must have its own alias +select a from (select 1 a) ``, (select 2 a) ``; +Error 1248 (42000): Every derived table must have its own alias +select a from (select 1 a) `x`, (select 2 a) `x`; +Error 1066 (42000): Not unique table/alias: 'x' +select x.a from (select 1 a) `x`, (select 2 a) `x`; +Error 1066 (42000): Not unique table/alias: 'x' +select a from (select 1 a), (select 2 a); +Error 1248 (42000): Every derived table must have its own alias +set sql_mode = 'oracle'; +select a, b from (select 1 a) ``, (select 2 b) ``; +a b +1 2 +select a, b from (select 1 a) `x`, (select 2 b) `x`; +a b +1 2 +select a, b from (select 1 a), (select 2 b); +a b +1 2 +select a from (select 1 a) ``, (select 2 a) ``; +Error 1052 (23000): Column 'a' in field list is ambiguous +select a from (select 1 a) `x`, (select 2 a) `x`; +Error 1052 (23000): Column 'a' in field list is ambiguous +select x.a from (select 1 a) `x`, (select 2 a) `x`; +Error 1052 (23000): Column 'a' in field list is ambiguous +select a from (select 1 a), (select 2 a); +Error 1052 (23000): Column 'a' in field list is ambiguous +set sql_mode = default; +drop table if exists th; +create table th (a int, b int) partition by hash(a) partitions 3; +insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8); +insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8); +select b from th order by a; +b +-8 +-7 +-6 +-5 +-4 +-3 +-2 +-1 +0 +1 +2 +3 +4 +5 +6 +7 +8 +select * from th where a=-2; +a b +-2 -2 +select * from th where a=5; +a b +5 5 +drop table if exists th; +drop table if exists view_t; +create table view_t (a int,b int); +insert into view_t values(1,2); +create definer='root'@'localhost' view view1 as select * from view_t; +create definer='root'@'localhost' view view2(c,d) as select * from view_t; +create definer='root'@'localhost' view view3(c,d) as select a,b from view_t; +create definer='root'@'localhost' view view4 as select * from (select * from (select * from view_t) tb1) tb; +select * from view1; +a b +1 2 +select * from view2; +c d +1 2 +select * from view3; +c d +1 2 +select * from view4; +a b +1 2 +drop table view_t; +create table view_t(c int,d int); +select * from view1; +Error 1356 (HY000): View 'executor__executor.view1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them +select * from view2; +Error 1356 (HY000): View 'executor__executor.view2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them +select * from view3; +Error 1356 (HY000): View 'executor__executor.view3' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them +drop table view_t; +create table view_t(a int,b int,c int); +insert into view_t values(1,2,3); +select * from view1; +a b +1 2 +select * from view2; +c d +1 2 +select * from view3; +c d +1 2 +select * from view4; +a b +1 2 +alter table view_t drop column a; +alter table view_t add column a int after b; +update view_t set a=1; +select * from view1; +a b +1 2 +select * from view2; +c d +1 2 +select * from view3; +c d +1 2 +select * from view4; +a b +1 2 +drop table view_t; +drop view view1,view2,view3,view4; +set @@tidb_enable_window_function = 1; +create table t(a int, b int); +insert into t values (1,1),(1,2),(2,1),(2,2); +create definer='root'@'localhost' view v as select a, first_value(a) over(rows between 1 preceding and 1 following), last_value(a) over(rows between 1 preceding and 1 following) from t; +select * from v; +a first_value(a) over(rows between 1 preceding and 1 following) last_value(a) over(rows between 1 preceding and 1 following) +1 1 1 +1 1 2 +2 1 2 +2 2 2 +drop view v; +set @@tidb_enable_window_function = default; +drop table if exists t; +create table t(a varbinary(10)); +insert into t values ('123.12'); +select 1+a from t; +1+a +124.12 +select a-1 from t; +a-1 +122.12 +select -10*a from t; +-10*a +-1231.2 +select a/-2 from t; +a/-2 +-61.56 +drop table if exists t1, t2, t3; +create table t1(a int, b int); +create table t2(a int, b varchar(20)); +create table t3(a int, b decimal(30,10)); +insert into t1 values (1,1),(1,1),(2,2),(3,3),(null,null); +insert into t2 values (1,'1'),(2,'2'),(null,null),(null,'3'); +insert into t3 values (2,2.1),(3,3); +explain format = 'brief' select * from t3 union select * from t1; +id estRows task access object operator info +HashAgg 16000.00 root group by:Column#7, Column#8, funcs:firstrow(Column#7)->Column#7, funcs:firstrow(Column#8)->Column#8 +└─Union 20000.00 root + ├─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo + └─Projection 10000.00 root executor__executor.t1.a->Column#7, cast(executor__executor.t1.b, decimal(30,10) BINARY)->Column#8 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t3 union select * from t1; +a b +NULL NULL +1 1.0000000000 +2 2.0000000000 +2 2.1000000000 +3 3.0000000000 +explain format = 'brief' select * from t2 union all select * from t1; +id estRows task access object operator info +Union 20000.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─Projection 10000.00 root executor__executor.t1.a->Column#7, cast(executor__executor.t1.b, varchar(20) BINARY CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#8 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t2 union all select * from t1; +a b +NULL NULL +NULL NULL +NULL 3 +1 1 +1 1 +1 1 +2 2 +2 2 +3 3 +explain format = 'brief' select * from t1 except select * from t3; +id estRows task access object operator info +HashJoin 6400.00 root anti semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t3.a)], other cond:nulleq(cast(executor__executor.t1.b, decimal(10,0) BINARY), executor__executor.t3.b) +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, executor__executor.t1.b, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a, funcs:firstrow(executor__executor.t1.b)->executor__executor.t1.b + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, executor__executor.t1.b, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 except select * from t3; +a b +NULL NULL +1 1 +2 2 +explain format = 'brief' select * from t1 intersect select * from t2; +id estRows task access object operator info +HashJoin 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)], other cond:nulleq(cast(executor__executor.t1.b, double BINARY), cast(executor__executor.t2.b, double BINARY)) +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, executor__executor.t1.b, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a, funcs:firstrow(executor__executor.t1.b)->executor__executor.t1.b + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, executor__executor.t1.b, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 intersect select * from t2; +a b +NULL NULL +1 1 +2 2 +explain format = 'brief' select * from t1 union all select * from t2 union all select * from t3; +id estRows task access object operator info +Union 30000.00 root +├─Projection 10000.00 root executor__executor.t1.a->Column#10, cast(executor__executor.t1.b, varchar(30) BINARY CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#11 +│ └─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +├─Projection 10000.00 root executor__executor.t2.a->Column#10, cast(executor__executor.t2.b, varchar(30) BINARY CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#11 +│ └─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─Projection 10000.00 root executor__executor.t3.a->Column#10, cast(executor__executor.t3.b, varchar(30) BINARY CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#11 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +select * from t1 union all select * from t2 union all select * from t3; +a b +NULL NULL +NULL NULL +NULL 3 +1 1 +1 1 +1 1 +2 2 +2 2 +2 2.1000000000 +3 3 +3 3.0000000000 +explain format = 'brief' select * from t1 union all select * from t2 except select * from t3; +id estRows task access object operator info +HashJoin 12800.00 root anti semi join, equal:[nulleq(Column#10, executor__executor.t3.a)], other cond:nulleq(cast(Column#11, double BINARY), cast(executor__executor.t3.b, double BINARY)) +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +└─HashAgg(Probe) 16000.00 root group by:Column#10, Column#11, funcs:firstrow(Column#10)->Column#10, funcs:firstrow(Column#11)->Column#11 + └─Union 20000.00 root + ├─Projection 10000.00 root executor__executor.t1.a->Column#10, cast(executor__executor.t1.b, varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#11 + │ └─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select * from t1 union all select * from t2 except select * from t3; +a b +NULL NULL +NULL 3 +1 1 +2 2 +explain format = 'brief' select * from t1 intersect select * from t2 intersect select * from t1; +id estRows task access object operator info +HashJoin 5120.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t1.a) nulleq(executor__executor.t1.b, executor__executor.t1.b)] +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─HashJoin(Probe) 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)], other cond:nulleq(cast(executor__executor.t1.b, double BINARY), cast(executor__executor.t2.b, double BINARY)) + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, executor__executor.t1.b, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a, funcs:firstrow(executor__executor.t1.b)->executor__executor.t1.b + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, executor__executor.t1.b, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 intersect select * from t2 intersect select * from t1; +a b +NULL NULL +1 1 +2 2 +explain format = 'brief' select * from t1 union all select * from t2 intersect select * from t3; +id estRows task access object operator info +Union 16400.00 root +├─Projection 10000.00 root executor__executor.t1.a->Column#10, cast(executor__executor.t1.b, varchar(20) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin)->Column#11 +│ └─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─HashJoin 6400.00 root semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)], other cond:nulleq(cast(executor__executor.t2.b, double BINARY), cast(executor__executor.t3.b, double BINARY)) + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, executor__executor.t2.b, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a, funcs:firstrow(executor__executor.t2.b)->executor__executor.t2.b + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, executor__executor.t2.b, + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select * from t1 union all select * from t2 intersect select * from t3; +a b +NULL NULL +1 1 +1 1 +2 2 +3 3 +explain format = 'brief' select * from t1 except select * from t2 intersect select * from t3; +id estRows task access object operator info +HashJoin 6400.00 root anti semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)], other cond:nulleq(cast(executor__executor.t1.b, double BINARY), cast(executor__executor.t2.b, double BINARY)) +├─HashJoin(Build) 6400.00 root semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)], other cond:nulleq(cast(executor__executor.t2.b, double BINARY), cast(executor__executor.t3.b, double BINARY)) +│ ├─TableReader(Build) 10000.00 root data:TableFullScan +│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +│ └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, executor__executor.t2.b, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a, funcs:firstrow(executor__executor.t2.b)->executor__executor.t2.b +│ └─TableReader 8000.00 root data:HashAgg +│ └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, executor__executor.t2.b, +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, executor__executor.t1.b, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a, funcs:firstrow(executor__executor.t1.b)->executor__executor.t1.b + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, executor__executor.t1.b, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 except select * from t2 intersect select * from t3; +a b +NULL NULL +1 1 +2 2 +3 3 +set tidb_cost_model_version=2; +drop table if exists t; +create table t (c1 year(4), c2 int, key(c1)); +insert into t values(2001, 1); +explain format = 'brief' select t1.c1, t2.c1 from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +MergeJoin 0.00 root inner join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 +├─TableDual(Build) 0.00 root rows:0 +└─TableDual(Probe) 0.00 root rows:0 +select t1.c1, t2.c1 from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +c1 c1 +explain format = 'brief' select * from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +MergeJoin 0.00 root inner join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 +├─TableDual(Build) 0.00 root rows:0 +└─TableDual(Probe) 0.00 root rows:0 +select * from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +c1 c2 c1 c2 +explain format = 'brief' select count(*) from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +StreamAgg 1.00 root funcs:count(1)->Column#7 +└─MergeJoin 0.00 root inner join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 + ├─TableDual(Build) 0.00 root rows:0 + └─TableDual(Probe) 0.00 root rows:0 +select count(*) from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +count(*) +0 +explain format = 'brief' select t1.c1, t2.c1 from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +MergeJoin 0.00 root left outer join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 +├─TableDual(Build) 0.00 root rows:0 +└─TableDual(Probe) 0.00 root rows:0 +select t1.c1, t2.c1 from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +c1 c1 +explain format = 'brief' select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +MergeJoin 0.00 root left outer join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 +├─TableDual(Build) 0.00 root rows:0 +└─TableDual(Probe) 0.00 root rows:0 +select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +c1 c2 c1 c2 +explain format = 'brief' select count(*) from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +id estRows task access object operator info +StreamAgg 1.00 root funcs:count(1)->Column#7 +└─MergeJoin 0.00 root left outer join, left key:executor__executor.t.c1, right key:executor__executor.t.c1 + ├─TableDual(Build) 0.00 root rows:0 + └─TableDual(Probe) 0.00 root rows:0 +select count(*) from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +count(*) +0 +explain format = 'brief' select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 is not NULL; +id estRows task access object operator info +HashJoin 12487.50 root left outer join, equal:[eq(executor__executor.t.c1, executor__executor.t.c1)] +├─TableReader(Build) 9990.00 root data:Selection +│ └─Selection 9990.00 cop[tikv] not(isnull(executor__executor.t.c1)) +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(executor__executor.t.c1)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 is not NULL; +c1 c2 c1 c2 +2001 1 2001 1 +set tidb_cost_model_version=2; +drop table if exists t1, t2, t3; +create table t1(a int); +create table t2 like t1; +create table t3 like t1; +insert into t1 values (1),(1),(2),(3),(null); +insert into t2 values (1),(2),(null),(null); +insert into t3 values (2),(3); +explain format='brief' select * from t3 union select * from t1; +id estRows task access object operator info +HashAgg 16000.00 root group by:Column#5, funcs:firstrow(Column#5)->Column#5 +└─Union 20000.00 root + ├─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t3 union select * from t1; +a +NULL +1 +2 +3 +explain format='brief' select * from t2 union all select * from t1; +id estRows task access object operator info +Union 20000.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t2 union all select * from t1; +a +NULL +NULL +NULL +1 +1 +1 +2 +2 +3 +explain format='brief' select * from t1 except select * from t3; +id estRows task access object operator info +HashJoin 6400.00 root anti semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t3.a)] +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 except select * from t3; +a +NULL +1 +explain format='brief' select * from t1 intersect select * from t2; +id estRows task access object operator info +HashJoin 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)] +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 intersect select * from t2; +a +NULL +1 +2 +explain format='brief' select * from t1 union all select * from t2 union all select * from t3; +id estRows task access object operator info +Union 30000.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +select * from t1 union all select * from t2 union all select * from t3; +a +NULL +NULL +NULL +1 +1 +1 +2 +2 +2 +3 +3 +explain format='brief' select * from t1 union all select * from t2 except select * from t3; +id estRows task access object operator info +HashJoin 12800.00 root anti semi join, equal:[nulleq(Column#7, executor__executor.t3.a)] +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +└─HashAgg(Probe) 16000.00 root group by:Column#7, funcs:firstrow(Column#7)->Column#7 + └─Union 20000.00 root + ├─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select * from t1 union all select * from t2 except select * from t3; +a +NULL +1 +explain format='brief' select * from t1 intersect select * from t2 intersect select * from t1; +id estRows task access object operator info +HashJoin 5120.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t1.a)] +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─HashJoin(Probe) 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)] + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 intersect select * from t2 intersect select * from t1; +a +NULL +1 +2 +explain format='brief' select * from t1 union all select * from t2 intersect select * from t3; +id estRows task access object operator info +Union 16400.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─HashJoin 6400.00 root semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)] + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select * from t1 union all select * from t2 intersect select * from t3; +a +NULL +1 +1 +2 +2 +3 +explain format='brief' select * from t1 except select * from t2 intersect select * from t3; +id estRows task access object operator info +HashJoin 6400.00 root anti semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)] +├─HashJoin(Build) 6400.00 root semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)] +│ ├─TableReader(Build) 10000.00 root data:TableFullScan +│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +│ └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a +│ └─TableReader 8000.00 root data:HashAgg +│ └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 except select * from t2 intersect select * from t3; +a +NULL +1 +3 +explain format='brief' select * from t1 intersect (select * from t2 except (select * from t3)); +id estRows task access object operator info +HashJoin 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t2.a)] +├─HashJoin(Build) 6400.00 root anti semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)] +│ ├─TableReader(Build) 10000.00 root data:TableFullScan +│ │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +│ └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a +│ └─TableReader 8000.00 root data:HashAgg +│ └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 intersect (select * from t2 except (select * from t3)); +a +NULL +1 +explain format='brief' select * from t1 union all (select * from t2 except select * from t3); +id estRows task access object operator info +Union 16400.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─HashJoin 6400.00 root anti semi join, equal:[nulleq(executor__executor.t2.a, executor__executor.t3.a)] + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t2.a, funcs:firstrow(executor__executor.t2.a)->executor__executor.t2.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t2.a, + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select * from t1 union all (select * from t2 except select * from t3); +a +NULL +NULL +1 +1 +1 +2 +3 +explain format='brief' select * from t1 union (select * from t2 union all select * from t3); +id estRows task access object operator info +HashAgg 24000.00 root group by:Column#8, funcs:firstrow(Column#8)->Column#8 +└─Union 30000.00 root + ├─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Union 20000.00 root + ├─TableReader 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +select * from t1 union (select * from t2 union all select * from t3); +a +NULL +1 +2 +3 +explain format='brief' (select * from t1 intersect select * from t1) except (select * from t2 union select * from t3); +id estRows task access object operator info +HashJoin 5120.00 root anti semi join, equal:[nulleq(executor__executor.t1.a, Column#9)] +├─HashAgg(Build) 16000.00 root group by:Column#9, funcs:firstrow(Column#9)->Column#9 +│ └─Union 20000.00 root +│ ├─TableReader 10000.00 root data:TableFullScan +│ │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +│ └─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +└─HashJoin(Probe) 6400.00 root semi join, equal:[nulleq(executor__executor.t1.a, executor__executor.t1.a)] + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─HashAgg(Probe) 8000.00 root group by:executor__executor.t1.a, funcs:firstrow(executor__executor.t1.a)->executor__executor.t1.a + └─TableReader 8000.00 root data:HashAgg + └─HashAgg 8000.00 cop[tikv] group by:executor__executor.t1.a, + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +(select * from t1 intersect select * from t1) except (select * from t2 union select * from t3); +a +drop table if exists issue40279; +CREATE TABLE `issue40279` (`a` char(155) NOT NULL DEFAULT 'on1unvbxp5sko6mbetn3ku26tuiyju7w3wc0olzto9ew7gsrx',`b` mediumint(9) NOT NULL DEFAULT '2525518',PRIMARY KEY (`b`,`a`) /*T![clustered_index] CLUSTERED */); +insert into `issue40279` values (); +( select `issue40279`.`b` as r0 , from_base64( `issue40279`.`a` ) as r1 from `issue40279` ) except ( select `issue40279`.`a` as r0 , elt(2, `issue40279`.`a` , `issue40279`.`a` ) as r1 from `issue40279`); +r0 r1 +2525518 NULL +drop table if exists t2; +CREATE TABLE `t2` ( `a` varchar(20) CHARACTER SET gbk COLLATE gbk_chinese_ci DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t2 values(0xCED2); +(select elt(2,t2.a,t2.a) from t2) except (select 0xCED2 from t2); +elt(2,t2.a,t2.a) +drop table if exists t; +create table t(a datetime, b bigint, c bigint); +insert into t values(cast('2023-08-09 00:00:00' as datetime), 20230809, 20231310); +select a > 20230809 from t; +a > 20230809 +0 +select a = 20230809 from t; +a = 20230809 +1 +select a < 20230810 from t; +a < 20230810 +1 +select a < 20231310 from t; +a < 20231310 +0 +select 20230809 < a from t; +20230809 < a +0 +select 20230809 = a from t; +20230809 = a +1 +select 20230810 > a from t; +20230810 > a +1 +select 20231310 > a from t; +20231310 > a +0 +select cast('2023-08-09 00:00:00' as datetime) > 20230809 from t; +cast('2023-08-09 00:00:00' as datetime) > 20230809 +1 +select cast('2023-08-09 00:00:00' as datetime) = 20230809 from t; +cast('2023-08-09 00:00:00' as datetime) = 20230809 +0 +select cast('2023-08-09 00:00:00' as datetime) < 20230810 from t; +cast('2023-08-09 00:00:00' as datetime) < 20230810 +0 +select cast('2023-08-09 00:00:00' as datetime) < 20231310 from t; +cast('2023-08-09 00:00:00' as datetime) < 20231310 +0 +select 20230809 < cast('2023-08-09 00:00:00' as datetime) from t; +20230809 < cast('2023-08-09 00:00:00' as datetime) +1 +select 20230809 = cast('2023-08-09 00:00:00' as datetime) from t; +20230809 = cast('2023-08-09 00:00:00' as datetime) +0 +select 20230810 > cast('2023-08-09 00:00:00' as datetime) from t; +20230810 > cast('2023-08-09 00:00:00' as datetime) +0 +select 20231310 > cast('2023-08-09 00:00:00' as datetime) from t; +20231310 > cast('2023-08-09 00:00:00' as datetime) +0 +select a > b from t; +a > b +1 +select a = b from t; +a = b +0 +select a < b + 1 from t; +a < b + 1 +0 +select a < c from t; +a < c +0 +select b < a from t; +b < a +1 +select b = a from t; +b = a +0 +select b > a from t; +b > a +0 +select c > a from t; +c > a +0 +load stats; +Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 11 near ";" +load stats ./xxx.json; +Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 12 near "./xxx.json;" +drop database if exists test_show; +create database test_show; +use test_show; +show engines; +Engine Support Comment Transactions XA Savepoints +InnoDB DEFAULT Supports transactions, row-level locking, and foreign keys YES YES YES +drop table if exists t; +create table t(a int primary key); +show index in t; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Visible Expression Clustered Global +t 0 PRIMARY 1 a A 0 NULL NULL BTREE YES NULL YES NO +show index from t; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Visible Expression Clustered Global +t 0 PRIMARY 1 a A 0 NULL NULL BTREE YES NULL YES NO +show master status; +File Position Binlog_Do_DB Binlog_Ignore_DB Executed_Gtid_Set +tidb-binlog 0 +show create database test_show; +Database Create Database +test_show CREATE DATABASE `test_show` /*!40100 DEFAULT CHARACTER SET utf8mb4 */ +show privileges; +Privilege Context Comment +Alter Tables To alter the table +Alter routine Functions,Procedures To alter or drop stored functions/procedures +Config Server Admin To use SHOW CONFIG and SET CONFIG statements +Create Databases,Tables,Indexes To create new databases and tables +Create routine Databases To use CREATE FUNCTION/PROCEDURE +Create role Server Admin To create new roles +Create temporary tables Databases To use CREATE TEMPORARY TABLE +Create view Tables To create new views +Create user Server Admin To create new users +Delete Tables To delete existing rows +Drop Databases,Tables To drop databases, tables, and views +Drop role Server Admin To drop roles +Event Server Admin To create, alter, drop and execute events +Execute Functions,Procedures To execute stored routines +File File access on server To read and write files on the server +Grant option Databases,Tables,Functions,Procedures To give to other users those privileges you possess +Index Tables To create or drop indexes +Insert Tables To insert data into tables +Lock tables Databases To use LOCK TABLES (together with SELECT privilege) +Process Server Admin To view the plain text of currently executing queries +Proxy Server Admin To make proxy user possible +References Databases,Tables To have references on tables +Reload Server Admin To reload or refresh tables, logs and privileges +Replication client Server Admin To ask where the slave or master servers are +Replication slave Server Admin To read binary log events from the master +Select Tables To retrieve rows from table +Show databases Server Admin To see all databases with SHOW DATABASES +Show view Tables To see views with SHOW CREATE VIEW +Shutdown Server Admin To shut down the server +Super Server Admin To use KILL thread, SET GLOBAL, CHANGE MASTER, etc. +Trigger Tables To use triggers +Create tablespace Server Admin To create/alter/drop tablespaces +Update Tables To update existing rows +Usage Server Admin No privileges - allow connect only +BACKUP_ADMIN Server Admin +RESTORE_ADMIN Server Admin +SYSTEM_USER Server Admin +SYSTEM_VARIABLES_ADMIN Server Admin +ROLE_ADMIN Server Admin +CONNECTION_ADMIN Server Admin +PLACEMENT_ADMIN Server Admin +DASHBOARD_CLIENT Server Admin +RESTRICTED_TABLES_ADMIN Server Admin +RESTRICTED_STATUS_ADMIN Server Admin +RESTRICTED_VARIABLES_ADMIN Server Admin +RESTRICTED_USER_ADMIN Server Admin +RESTRICTED_CONNECTION_ADMIN Server Admin +RESTRICTED_REPLICA_WRITER_ADMIN Server Admin +RESOURCE_GROUP_ADMIN Server Admin +RESOURCE_GROUP_USER Server Admin +show table status; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +t InnoDB 10 Compact 0 0 0 0 0 0 NULL 0 NULL NULL utf8mb4_bin +drop database test_show; +use executor__executor; +select \N; +NULL +NULL +select "\N"; +N +N +drop table if exists test; +create table test (`\N` int); +insert into test values (1); +select * from test; +\N +1 +select \N from test; +NULL +NULL +select (\N) from test; +NULL +NULL +select `\N` from test; +\N +1 +select (`\N`) from test; +\N +1 +select '\N' from test; +N +N +select ('\N') from test; +N +N +select nUll; +NULL +NULL +select (null); +NULL +NULL +select null+NULL; +null+NULL +NULL +select 'abc'; +abc +abc +select (('abc')); +abc +abc +select 'abc'+'def'; +'abc'+'def' +0 +select '\n'; + + + +select '\t col'; +col + col +select '\t Col'; +Col + Col +select '\n\t 中文 col'; +中文 col + + 中文 col +select ' \r\n .col'; +.col + + .col +select ' 😆col'; +😆col + 😆col +select 'abc '; +abc +abc +select ' abc 123 '; +abc 123 + abc 123 +select 'a' ' ' 'string'; +a +a string +select 'a' " " "string"; +a +a string +select 'string' 'string'; +string +stringstring +select "ss" "a"; +ss +ssa +select "ss" "a" "b"; +ss +ssab +select "ss" "a" ' ' "b"; +ss +ssa b +select "ss" "a" ' ' "b" ' ' "d"; +ss +ssa b d +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) ); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.v = 20, b.v = 100, a.k1 = a.k1 + 1, b.k1 = b.k1 + 1, a.k2 = a.k2 + 2, b.k2 = b.k2 + 2; +select * from b; +a k1 k2 v +2 3 4 100 +select * from a; +k1 k2 v +2 3 20 +3 4 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) ); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +2 3 4 100 +select * from a; +k1 k2 v +2 3 20 +3 4 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +12 13 20 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b right join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +12 13 20 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +11 11 11 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, a.v = 20 where exists (select 1 from b where a.k1 = b.k1 and a.k2 = b.k2); +select * from b; +a k1 k2 v +22 22 22 22 +select * from a; +k1 k2 v +11 11 11 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) clustered); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.v = 20, b.v = 100, a.k1 = a.k1 + 1, b.k1 = b.k1 + 1, a.k2 = a.k2 + 2, b.k2 = b.k2 + 2; +select * from b; +a k1 k2 v +2 3 4 100 +select * from a; +k1 k2 v +2 3 20 +3 4 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) clustered); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +2 3 4 100 +select * from a; +k1 k2 v +2 3 20 +3 4 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +12 13 20 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b right join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +12 13 20 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +a k1 k2 v +22 23 24 100 +select * from a; +k1 k2 v +11 11 11 +23 24 20 +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, a.v = 20 where exists (select 1 from b where a.k1 = b.k1 and a.k2 = b.k2); +select * from b; +a k1 k2 v +22 22 22 22 +select * from a; +k1 k2 v +11 11 11 +23 24 20 +admin check table a; +admin check table b; +set @@tidb_enable_clustered_index=On; +drop table if exists t; +create table t (a int, b int, c int, primary key(a,b)); +explain format = 'brief' select t1.a from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b; +id estRows task access object operator info +TableReader 10000.00 root data:TableFullScan +└─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +set @@tidb_enable_clustered_index=default; +drop table if exists t; +create table t (c1 bit(2)); +insert into t values (0), (1), (2), (3); +insert into t values (4); +Error 1406 (22001): Data too long for column 'c1' at row 1 +insert into t values ('a'); +Error 1406 (22001): Data too long for column 'c1' at row 1 +select hex(c1) from t where c1 = 2; +hex(c1) +2 +drop table if exists t; +create table t (c1 bit(31)); +insert into t values (0x7fffffff); +insert into t values (0x80000000); +Error 1406 (22001): Data too long for column 'c1' at row 1 +insert into t values (0xffffffff); +Error 1406 (22001): Data too long for column 'c1' at row 1 +insert into t values ('123'); +insert into t values ('1234'); +insert into t values ('12345); +Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 30 near "'12345);" +drop table if exists t; +create table t (c1 bit(62)); +insert into t values ('12345678'); +drop table if exists t; +create table t (c1 bit(61)); +insert into t values ('12345678'); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t; +create table t (c1 bit(32)); +insert into t values (0x7fffffff); +insert into t values (0xffffffff); +insert into t values (0x1ffffffff); +Error 1406 (22001): Data too long for column 'c1' at row 1 +insert into t values ('1234'); +insert into t values ('12345'); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t; +create table t (c1 bit(64)); +insert into t values (0xffffffffffffffff); +insert into t values ('12345678'); +insert into t values ('123456789'); +Error 1366 (HY000): Incorrect bit value: '123456789' for column 'c1' at row 1 +drop table if exists t; +create table t (c1 bit(64)); +insert into t values (0xffffffffffffffff); +insert into t values ('12345678'); +select hex(c1) from t where c1; +hex(c1) +FFFFFFFFFFFFFFFF +3132333435363738 +drop table if exists t, t1; +create table t (ts timestamp); +set time_zone = '+00:00'; +insert into t values ('2017-04-27 22:40:42'); +set time_zone = '+10:00'; +select * from t; +ts +2017-04-28 08:40:42 +set time_zone = '-6:00'; +select * from t; +ts +2017-04-27 16:40:42 +drop table if exists t1; +CREATE TABLE t1 ( +id bigint(20) NOT NULL AUTO_INCREMENT, +uid int(11) DEFAULT NULL, +datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, +ip varchar(128) DEFAULT NULL, +PRIMARY KEY (id), +KEY i_datetime (datetime), +KEY i_userid (uid) +); +INSERT INTO t1 VALUES (123381351,1734,"2014-03-31 08:57:10","127.0.0.1"); +select datetime from t1; +datetime +2014-03-31 08:57:10 +select datetime from t1 where datetime='2014-03-31 08:57:10'; +datetime +2014-03-31 08:57:10 +select * from t1 where datetime='2014-03-31 08:57:10'; +id uid datetime ip +123381351 1734 2014-03-31 08:57:10 127.0.0.1 +set time_zone = 'Asia/Shanghai'; +drop table if exists t1; +CREATE TABLE t1 ( +id bigint(20) NOT NULL AUTO_INCREMENT, +datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, +PRIMARY KEY (id) +); +INSERT INTO t1 VALUES (123381351,"2014-03-31 08:57:10"); +select * from t1 where datetime="2014-03-31 08:57:10"; +id datetime +123381351 2014-03-31 08:57:10 +alter table t1 add key i_datetime (datetime); +select * from t1 where datetime="2014-03-31 08:57:10"; +id datetime +123381351 2014-03-31 08:57:10 +select * from t1; +id datetime +123381351 2014-03-31 08:57:10 +select datetime from t1 where datetime='2014-03-31 08:57:10'; +datetime +2014-03-31 08:57:10 +set time_zone=default; +drop table if exists t2; +create table t2(a int, b int, c int); +insert into t2 values (11, 8, (select not b)); +Error 1054 (42S22): Unknown column 'b' in 'field list' +insert into t2 set a = 11, b = 8, c = (select b)); +Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 49 near ");" +insert into t2 values(1, 1, (select b from t2)); +select * from t2; +a b c +1 1 NULL +insert into t2 set a = 1, b = 1, c = (select b+1 from t2); +select * from t2; +a b c +1 1 NULL +1 1 2 +delete from t2; +insert into t2 values(2, 4, a); +select * from t2; +a b c +2 4 2 +insert into t2 set a = 3, b = 5, c = b; +select * from t2; +a b c +2 4 2 +3 5 5 +drop table if exists t; +create table t(a int, b int); +insert into t values ( 81, ( select ( SELECT '1' AS `c0` WHERE '1' >= `subq_0`.`c0` ) as `c1` FROM ( SELECT '1' AS `c0` ) AS `subq_0` ) ); +Error 1105 (HY000): Insert's SET operation or VALUES_LIST doesn't support complex subqueries now +insert into t set a = 81, b = (select ( SELECT '1' AS `c0` WHERE '1' >= `subq_0`.`c0` ) as `c1` FROM ( SELECT '1' AS `c0` ) AS `subq_0` ); +Error 1105 (HY000): Insert's SET operation or VALUES_LIST doesn't support complex subqueries now +drop table if exists t2; +drop table if exists t; +create table t (id bit(16), key id(id)); +insert into t values (65); +select * from t where id not in (-1,2); +id +A +select * from t where id in (-1, -2); +Error 1582 (42000): Incorrect parameter count in the call to native function 'in' +drop table if exists t; +drop table if exists t1; +create table t(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) clustered); +create table t1(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) nonclustered); +insert into t(k1) select 1; +insert into t1(k1) select 1; +set @@tidb_enable_vectorized_expression = 0; +(select k1, hex(v) from t where false) union(select k1, hex(v) from t for update); +k1 hex(v) +1 1D5E4CF7F +(select k1, hex(v) from t1 where false) union(select k1, hex(v) from t1 for update); +k1 hex(v) +1 1D5E4CF7F +set @@tidb_enable_vectorized_expression = 1; +(select k1, hex(v) from t where false) union(select k1, hex(v) from t for update); +k1 hex(v) +1 1D5E4CF7F +(select k1, hex(v) from t1 where false) union(select k1, hex(v) from t1 for update); +k1 hex(v) +1 1D5E4CF7F +set @@tidb_enable_vectorized_expression = default; +drop table if exists t; +drop view if exists v; +create table t(a int); +insert into t values(1), (2), (3); +create definer='root'@'localhost' view v as select count(*) as c1 from t; +select * from v; +c1 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select count(*) from t) s; +select * from v order by 1; +count(*) +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select avg(a) from t group by a) s; +select * from v order by 1; +avg(a) +1.0000 +2.0000 +3.0000 +drop view v; +create definer='root'@'localhost' view v as select * from (select sum(a) from t group by a) s; +select * from v order by 1; +sum(a) +1 +2 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select group_concat(a) from t group by a) s; +select * from v order by 1; +group_concat(a) +1 +2 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select count(0) as c1 from t) s; +select * from v order by 1; +c1 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select count(*) as c1 from t) s; +select * from v order by 1; +c1 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select group_concat(a) as `concat(a)` from t group by a) s; +select * from v order by 1; +concat(a) +1 +2 +3 +drop view v; +create definer='root'@'localhost' view v as select * from (select a from t group by a) s; +select * from v order by 1; +a +1 +2 +3 +SELECT `s`.`count(a)` FROM (SELECT COUNT(`a`) FROM `executor__executor`.`t`) AS `s`; +Error 1054 (42S22): Unknown column 's.count(a)' in 'field list' +drop view v; +create definer='root'@'localhost' view v as select * from (select count(a) from t) s; +select * from v; +count(a) +3 +drop table if exists t; +create table t(c1 int); +insert into t values(111), (222), (333); +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select row_number() over (order by c1) from t) s); +select * from v; +row_number() over (order by c1) +1 +2 +3 +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select c1, row_number() over (order by c1) from t) s); +select * from v; +c1 row_number() over (order by c1) +111 1 +222 2 +333 3 +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select c1 or 0 from t) s); +select * from v; +c1 or 0 +1 +1 +1 +select `c1 or 0` from v; +c1 or 0 +1 +1 +1 +drop view v; +drop table if exists t, t1, t2; +create table t (a int(11) default null,b int(11) default null,key b (b),key ba (b)); +create table t1 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b)); +create table t2 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b)); +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +DROP TABLE IF EXISTS admin_checksum_partition_test; +CREATE TABLE admin_checksum_partition_test (a INT) PARTITION BY HASH(a) PARTITIONS 4; +INSERT INTO admin_checksum_partition_test VALUES (1), (2); +ADMIN CHECKSUM TABLE admin_checksum_partition_test; +drop table if exists t; +create table t (a tinyint not null); +set sql_mode = 'STRICT_TRANS_TABLES'; +insert t values (); +Error 1364 (HY000): Field 'a' doesn't have a default value +insert t values ('1000'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +create table if not exists tdouble (a double(3,2)); +insert tdouble values (10.23); +Error 1264 (22003): Out of range value for column 'a' at row 1 +set sql_mode = ''; +insert t values (); +show warnings; +Level Code Message +Warning 1364 Field 'a' doesn't have a default value +insert t values (null); +Error 1048 (23000): Column 'a' cannot be null +insert ignore t values (null); +show warnings; +Level Code Message +Warning 1048 Column 'a' cannot be null +insert t select null; +show warnings; +Level Code Message +Warning 1048 Column 'a' cannot be null +insert t values (1000); +select * from t order by a; +a +0 +0 +0 +127 +insert tdouble values (10.23); +select * from tdouble; +a +9.99 +set sql_mode = 'STRICT_TRANS_TABLES'; +set @@global.sql_mode = ''; +drop table if exists t2; +create table t2 (a varchar(3)); +insert t2 values ('abcd'); +select * from t2; +a +abc +insert t2 values ('abcd'); +Error 1406 (22001): Data too long for column 'a' at row 1 +set sql_mode = default; +set @@global.sql_mode = default; +use information_schema; +select count(*)>=4 from schemata; +count(*)>=4 +1 +create database mytest; +use information_schema; +select * from schemata where schema_name = 'mysql'; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +def mysql utf8mb4 utf8mb4_bin NULL NULL +select * from schemata where schema_name like 'my%'; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +def mysql utf8mb4 utf8mb4_bin NULL NULL +def mytest utf8mb4 utf8mb4_bin NULL NULL +select 1 from tables limit 1; +1 +1 +use executor__executor; +set @@sql_mode='NO_ZERO_DATE'; +select date_add('2001-01-00', interval -2 hour); +date_add('2001-01-00', interval -2 hour) +NULL +show warnings; +Level Code Message +Warning 1292 Incorrect datetime value: '2001-01-00' +set @@sql_mode=default; +set @@sql_mode='NO_ZERO_DATE'; +drop table if exists t1; +SELECT STR_TO_DATE('0000-1-01', '%Y-%m-%d'); +STR_TO_DATE('0000-1-01', '%Y-%m-%d') +NULL +show warnings; +Level Code Message +Warning 1411 Incorrect datetime value: '0000-1-01' for function str_to_date +SELECT CAST('4#,8?Q' AS DATE); +CAST('4#,8?Q' AS DATE) +NULL +show warnings; +Level Code Message +Warning 8034 Incorrect datetime value: '4#,8?Q' +CREATE TABLE t1 (c1 INT, c2 TEXT); +INSERT INTO t1 VALUES (1833458842, '0.3503490908550797'); +SELECT CAST(t1.c2 AS DATE) FROM t1; +CAST(t1.c2 AS DATE) +NULL +show warnings; +Level Code Message +Warning 1292 Incorrect datetime value: '0.3503490908550797' +set @@sql_mode=default; +drop table if exists t; +create table t(a decimal(10,2) unsigned); +insert into t values (-1); +Error 1264 (22003): Out of range value for column 'a' at row 1 +insert into t values ("-1.1e-1"); +Error 1264 (22003): Out of range value for column 'a' at row 1 +insert into t values (-1.1); +Error 1264 (22003): Out of range value for column 'a' at row 1 +insert into t values (-0); +set sql_mode=''; +delete from t; +insert into t values (-1); +select a from t limit 1; +a +0.00 +set sql_mode=default; +drop table if exists t; +create table t(a int); +do 1 in (select * from t); +insert into t values(1); +do 1 in (select * from t); +drop table if exists t; +create table t(j JSON); +insert into t values('2010'); +insert into t values('2011'); +insert into t values('2012'); +insert into t values('2010.000'); +insert into t values(cast(18446744073709551615 as JSON)); +insert into t values(cast(18446744073709551616.000000 as JSON)); +select count(distinct j) from t; +count(distinct j) +5 +drop table if exists t; +create table t(id int(11), j JSON, d DOUBLE); +insert into t values(0, '2010', 2010); +insert into t values(1, '2011', 2011); +insert into t values(2, '2012', 2012); +insert into t values(3, cast(18446744073709551615 as JSON), 18446744073709551616.000000); +select /*+inl_hash_join(t2)*/ t1.id, t2.id from t t1 join t t2 on t1.j = t2.d; +id id +0 0 +1 1 +2 2 +drop table if exists catalog_sales, store_sales, date_dim; +create table catalog_sales +( +cs_sold_date_sk int , +cs_sold_time_sk int , +cs_ship_date_sk int , +cs_bill_customer_sk int , +cs_bill_cdemo_sk int , +cs_bill_hdemo_sk int , +cs_bill_addr_sk int , +cs_ship_customer_sk int , +cs_ship_cdemo_sk int , +cs_ship_hdemo_sk int , +cs_ship_addr_sk int , +cs_call_center_sk int , +cs_catalog_page_sk int , +cs_ship_mode_sk int , +cs_warehouse_sk int , +cs_item_sk int not null, +cs_promo_sk int , +cs_order_number int not null, +cs_quantity int , +cs_wholesale_cost decimal(7,2) , +cs_list_price decimal(7,2) , +cs_sales_price decimal(7,2) , +cs_ext_discount_amt decimal(7,2) , +cs_ext_sales_price decimal(7,2) , +cs_ext_wholesale_cost decimal(7,2) , +cs_ext_list_price decimal(7,2) , +cs_ext_tax decimal(7,2) , +cs_coupon_amt decimal(7,2) , +cs_ext_ship_cost decimal(7,2) , +cs_net_paid decimal(7,2) , +cs_net_paid_inc_tax decimal(7,2) , +cs_net_paid_inc_ship decimal(7,2) , +cs_net_paid_inc_ship_tax decimal(7,2) , +cs_net_profit decimal(7,2) , +primary key (cs_item_sk, cs_order_number) +); +create table store_sales +( +ss_sold_date_sk int , +ss_sold_time_sk int , +ss_item_sk int not null, +ss_customer_sk int , +ss_cdemo_sk int , +ss_hdemo_sk int , +ss_addr_sk int , +ss_store_sk int , +ss_promo_sk int , +ss_ticket_number int not null, +ss_quantity int , +ss_wholesale_cost decimal(7,2) , +ss_list_price decimal(7,2) , +ss_sales_price decimal(7,2) , +ss_ext_discount_amt decimal(7,2) , +ss_ext_sales_price decimal(7,2) , +ss_ext_wholesale_cost decimal(7,2) , +ss_ext_list_price decimal(7,2) , +ss_ext_tax decimal(7,2) , +ss_coupon_amt decimal(7,2) , +ss_net_paid decimal(7,2) , +ss_net_paid_inc_tax decimal(7,2) , +ss_net_profit decimal(7,2) , +primary key (ss_item_sk, ss_ticket_number) +); +create table date_dim +( +d_date_sk int not null, +d_date_id char(16) not null, +d_date date , +d_month_seq int , +d_week_seq int , +d_quarter_seq int , +d_year int , +d_dow int , +d_moy int , +d_dom int , +d_qoy int , +d_fy_year int , +d_fy_quarter_seq int , +d_fy_week_seq int , +d_day_name char(9) , +d_quarter_name char(6) , +d_holiday char(1) , +d_weekend char(1) , +d_following_holiday char(1) , +d_first_dom int , +d_last_dom int , +d_same_day_ly int , +d_same_day_lq int , +d_current_day char(1) , +d_current_week char(1) , +d_current_month char(1) , +d_current_quarter char(1) , +d_current_year char(1) , +primary key (d_date_sk) +); +plan replayer dump explain with ssci as ( +select ss_customer_sk customer_sk +,ss_item_sk item_sk +from store_sales,date_dim +where ss_sold_date_sk = d_date_sk +and d_month_seq between 1212 and 1212 + 11 +group by ss_customer_sk +,ss_item_sk), +csci as( +select cs_bill_customer_sk customer_sk +,cs_item_sk item_sk +from catalog_sales,date_dim +where cs_sold_date_sk = d_date_sk +and d_month_seq between 1212 and 1212 + 11 +group by cs_bill_customer_sk +,cs_item_sk) +select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 else 0 end) store_only +,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 else 0 end) catalog_only +,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then 1 else 0 end) store_and_catalog +from ssci left join csci on (ssci.customer_sk=csci.customer_sk +and ssci.item_sk = csci.item_sk) +UNION +select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 else 0 end) store_only +,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 else 0 end) catalog_only +,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then 1 else 0 end) store_and_catalog +from ssci right join csci on (ssci.customer_sk=csci.customer_sk +and ssci.item_sk = csci.item_sk) +limit 100; +admin show bdr role; +BDR_ROLE + +admin set bdr role primary; +admin show bdr role; +BDR_ROLE +primary +admin set bdr role secondary; +admin show bdr role; +BDR_ROLE +secondary +admin unset bdr role; +admin show bdr role; +BDR_ROLE + +admin set bdr role test_err; +Error 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 27 near "test_err;" +admin show bdr role; +BDR_ROLE + +admin unset bdr role; +set global tidb_mem_oom_action='CANCEL'; +drop table if exists t, t1; +create table t(a int, b int, index idx(a)); +create table t1(a int, c int, index idx(a)); +set tidb_mem_quota_query=10; +select t.a, t1.a from t use index(idx), t1 use index(idx) where t.a = t1.a; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set global tidb_mem_oom_action=default; +set tidb_mem_quota_query=default; +drop table if exists t, t1; +create table t (a int primary key, b double); +insert into t values (1,1); +SET GLOBAL tidb_mem_oom_action='CANCEL'; +set @@tidb_mem_quota_query=1; +select sum(b) from t group by a; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +drop table if exists t,t1; +create table t (a bigint); +create table t1 (a bigint); +set @@tidb_mem_quota_query=200; +insert into t1 values (1),(2),(3),(4),(5); +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +replace into t1 values (1),(2),(3),(4),(5); +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set @@tidb_mem_quota_query=10000; +insert into t1 values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=10; +insert into t select a from t1 order by a desc; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +replace into t select a from t1 order by a desc; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set @@tidb_mem_quota_query=10000; +insert into t values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=244; +delete from t; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set @@tidb_mem_quota_query=10000; +delete from t1; +insert into t1 values(1); +insert into t values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=244; +delete t, t1 from t join t1 on t.a = t1.a; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set @@tidb_mem_quota_query=100000; +truncate table t; +insert into t values(1),(2),(3); +set @@tidb_mem_quota_query=244; +update t set a = 4; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +SET GLOBAL tidb_mem_oom_action = DEFAULT; +set @@tidb_mem_quota_query=DEFAULT; +drop table if exists t; +create table t(a int); +insert into t values(1); +set tidb_track_aggregate_memory_usage = off; +explain analyze select /*+ HASH_AGG() */ sum(a) from t; +id estRows actRows task access object execution info operator info memory disk +HashAgg_9 1.00 1 root funcs:sum(Column#4)->Column#3 N/A N/A +└─TableReader_10 1.00 1 root data:HashAgg_5 Bytes N/A + └─HashAgg_5 1.00 1 cop[tikv] funcs:sum(executor__executor.t.a)->Column#4 N/A N/A + └─TableFullScan_8 10000.00 1 cop[tikv] keep order:false, stats:pseudo N/A N/A +explain analyze select /*+ STREAM_AGG() */ sum(a) from t; +id estRows actRows task access object execution info operator info memory disk +StreamAgg_14 1.00 1 root funcs:sum(Column#4)->Column#3 N/A N/A +└─TableReader_15 1.00 1 root data:StreamAgg_8 Bytes N/A + └─StreamAgg_8 1.00 1 cop[tikv] funcs:sum(executor__executor.t.a)->Column#4 N/A N/A + └─TableFullScan_13 10000.00 1 cop[tikv] keep order:false, stats:pseudo N/A N/A +set tidb_track_aggregate_memory_usage = on; +explain analyze select /*+ HASH_AGG() */ sum(a) from t; +id estRows actRows task access object execution info operator info memory disk +HashAgg_9 1.00 1 root funcs:sum(Column#4)->Column#3 KB Bytes +└─TableReader_10 1.00 1 root data:HashAgg_5 Bytes N/A + └─HashAgg_5 1.00 1 cop[tikv] funcs:sum(executor__executor.t.a)->Column#4 N/A N/A + └─TableFullScan_8 10000.00 1 cop[tikv] keep order:false, stats:pseudo N/A N/A +explain analyze select /*+ STREAM_AGG() */ sum(a) from t; +id estRows actRows task access object execution info operator info memory disk +StreamAgg_14 1.00 1 root funcs:sum(Column#4)->Column#3 KB N/A +└─TableReader_15 1.00 1 root data:StreamAgg_8 Bytes N/A + └─StreamAgg_8 1.00 1 cop[tikv] funcs:sum(executor__executor.t.a)->Column#4 N/A N/A + └─TableFullScan_13 10000.00 1 cop[tikv] keep order:false, stats:pseudo N/A N/A +set tidb_track_aggregate_memory_usage = default; +drop table if exists testbind; +create table testbind(i int, s varchar(20)); +create index index_t on testbind(i,s); +create global binding for select * from testbind using select * from testbind use index for join(index_t); +show global bindings where default_db='executor__executor'; +Original_sql Bind_sql Default_db Status Create_time Update_time Charset Collation Source Sql_digest Plan_digest +select * from `executor__executor` . `testbind` SELECT * FROM `executor__executor`.`testbind` USE INDEX FOR JOIN (`index_t`) executor__executor enabled utf8mb4 utf8mb4_general_ci manual a2fa907992be17801e5976df09b5b3a0d205f4c4aff39a14ab3bc8642026f527 +create session binding for select * from testbind using select * from testbind use index for join(index_t); +show session bindings where default_db='executor__executor'; +Original_sql Bind_sql Default_db Status Create_time Update_time Charset Collation Source Sql_digest Plan_digest +select * from `executor__executor` . `testbind` SELECT * FROM `executor__executor`.`testbind` USE INDEX FOR JOIN (`index_t`) executor__executor enabled utf8mb4 utf8mb4_general_ci manual a2fa907992be17801e5976df09b5b3a0d205f4c4aff39a14ab3bc8642026f527 +drop session binding for select * from testbind using select * from testbind use index for join(index_t); +drop global binding for select * from testbind using select * from testbind use index for join(index_t); +drop table if EXISTS t1; +create table t1(id int primary key, a int, b int, c int, d int, index t1a(a), index t1b(b)); +insert into t1 values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5); +explain analyze select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4; +id estRows actRows task access object execution info operator info memory disk +IndexMerge_8 3334.67 2 root NULL .*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.* type: union KB N/A +├─TableRangeScan_5(Build) 3333.33 1 cop[tikv] table:t1 .*time:.*loops:.*cop_task:.* range:[-inf,2), keep order:false, stats:pseudo Bytes N/A +├─IndexRangeScan_6(Build) 3333.33 1 cop[tikv] table:t1, index:t1a(a) .*time:.*loops:.*cop_task:.* range:(4,+inf], keep order:false, stats:pseudo N/A N/A +└─TableRowIDScan_7(Probe) 3334.67 2 cop[tikv] table:t1 .*time:.*loops:.*cop_task:.* keep order:false, stats:pseudo N/A N/A +set @@tidb_enable_collect_execution_info=0; +select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4 order by a; +id a b c d +1 1 1 1 1 +5 5 5 5 5 +set @@tidb_enable_collect_execution_info=default; +drop table if exists t1; +create table t1 (a int, b int, index(a)); +insert into t1 values (1,2),(2,3),(3,4); +explain analyze select * from t1 use index(a) where a > 1; +id estRows actRows task access object execution info operator info memory disk +IndexLookUp_7 3333.33 2 root NULL .*time:.*loops:.*index_task:.*table_task: {total_time.*num.*concurrency.*}.* NULL KB N/A +├─IndexRangeScan_5(Build) 3333.33 2 cop[tikv] table:t1, index:a(a) .*time:.*loops:.*cop_task:.* range:(1,+inf], keep order:false, stats:pseudo N/A N/A +└─TableRowIDScan_6(Probe) 3333.33 2 cop[tikv] table:t1 .*time:.*loops:.*cop_task:.* keep order:false, stats:pseudo N/A N/A +drop table if exists t1; +create table t1 (a int, b int); +insert into t1 values (1,2),(2,3),(3,4); +explain analyze SELECT /*+ HASH_AGG() */ count(*) FROM t1 WHERE a < 10; +id estRows actRows task access object execution info operator info memory disk +HashAgg_11 1.00 1 root NULL .*time:.*loops:.*partial_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*final_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.* funcs:count(Column#5)->Column#4 KB Bytes +└─TableReader_12 1.00 1 root NULL time.*loops.*cop_task.* data:HashAgg_6 Bytes N/A + └─HashAgg_6 1.00 1 cop[tikv] NULL tikv_task:.* funcs:count(1)->Column#5 N/A N/A + └─Selection_10 3323.33 3 cop[tikv] NULL tikv_task:.* lt(executor__executor.t1.a, 10) N/A N/A + └─TableFullScan_9 10000.00 3 cop[tikv] table:t1 tikv_task:.* keep order:false, stats:pseudo N/A N/A +set global tidb_txn_mode=''; +drop table if exists t, t1; +create table t (c1 int, c2 int, c3 int); +insert t values (11, 2, 3); +insert t values (12, 2, 3); +insert t values (13, 2, 3); +create table t1 (c1 int); +insert t1 values (11); +begin; +select * from t where c1=11 for update; +c1 c2 c3 +11 2 3 +begin; +update t set c2=211 where c1=11; +commit; +commit; +Error 9007 (HY000): Write conflict, reason=Optimistic [try again later] +begin; +select * from t where exists(select null from t1 where t1.c1=t.c1) for update; +c1 c2 c3 +11 211 3 +begin; +update t set c2=211 where c1=12; +commit; +commit; +begin; +select * from t where c1=11 for update; +c1 c2 c3 +11 211 3 +begin; +update t set c2=22 where c1=12; +commit; +commit; +set @@autocommit=1; +select * from t where c1=11 for update; +c1 c2 c3 +11 211 3 +begin; +update t set c2=211 where c1=11; +commit; +commit; +begin; +select * from (select * from t for update) t join t1 for update; +c1 c2 c3 c1 +11 211 3 11 +12 22 3 11 +13 2 3 11 +begin; +update t1 set c1 = 13; +commit; +commit; +Error 9007 (HY000): Write conflict, reason=Optimistic [try again later] +set global tidb_txn_mode=pessimistic; +drop table if exists t, t1; +create table t (i int); +create table t1 (i int); +insert t values (1); +insert t1 values (1); +begin pessimistic; +select * from t, t1 where t.i = t1.i for update of t; +i i +1 1 +begin pessimistic; +select * from t1 for update; +i +1 +select * from t for update nowait; +Error 3572 (HY000): Statement aborted because lock(s) could not be acquired immediately and NOWAIT is set. +rollback; +select * from t for update nowait; +i +1 +rollback; +set session tidb_txn_mode=''; +drop table if exists t; +create table t(a int); +insert into t values (1); +begin; +select 1 as a union select a from t for update; +a +1 +set session tidb_txn_mode=''; +update t set a = a + 1; +commit; +Error 9007 (HY000): Write conflict, reason=Optimistic [try again later] +begin; +select 1 as a union select a from t limit 5 for update; +a +1 +2 +select 1 as a union select a from t order by a for update; +a +1 +2 +update t set a = a + 1; +commit; +Error 9007 (HY000): Write conflict, reason=Optimistic [try again later] +set session tidb_txn_mode=pessimistic; +drop table if exists t; +create table t (id bigint key,b int); +split table t by (10),(20),(30); +TOTAL_SPLIT_REGION SCATTER_FINISH_RATIO +3 1 +insert into t values (0,0),(10,10),(20,20),(30,30); +alter table t add index idx1(b); +admin show ddl jobs 1; +JOB_ID DB_NAME TABLE_NAME JOB_TYPE SCHEMA_STATE SCHEMA_ID TABLE_ID ROW_COUNT CREATE_TIME START_TIME END_TIME STATE + executor__executor t public 4 synced +insert into t values (1,0),(2,10),(3,20),(4,30); +alter table t add index idx2(b); +admin show ddl jobs 1; +JOB_ID DB_NAME TABLE_NAME JOB_TYPE SCHEMA_STATE SCHEMA_ID TABLE_ID ROW_COUNT CREATE_TIME START_TIME END_TIME STATE + executor__executor t public 8 synced +drop table if exists t; +create table t(a int, b int as(-a)); +insert into t(a) values(1), (3), (7); +SET GLOBAL tidb_mem_oom_action='CANCEL'; +set @@tidb_mem_quota_query=1; +update t set t.a = t.a - 1 where t.a in (select a from t where a < 4); +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +set @@tidb_mem_quota_query=1000000000; +select stmt_type from information_schema.statements_summary where digest_text = 'update `t` set `t` . `a` = `t` . `a` - ? where `t` . `a` in ( select `a` from `t` where `a` < ? )'; +stmt_type +Update +set @@tidb_mem_quota_query=default; +set global tidb_mem_oom_action=default; +drop table if exists t; +drop user if exists 'testuser'@'localhost'; +create table t(a int); +create user 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE; +Error 1044 (42000): Access denied for user 'testuser'@'localhost' to database 'executor__executor' +GRANT LOCK TABLES ON executor__executor.* to 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE; +Error 1142 (42000): SELECT command denied to user 'testuser'@'localhost' for table 't' +REVOKE ALL ON executor__executor.* FROM 'testuser'@'localhost'; +GRANT SELECT ON executor__executor.* to 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE; +Error 1044 (42000): Access denied for user 'testuser'@'localhost' to database 'executor__executor' +GRANT LOCK TABLES ON executor__executor.* to 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE; +drop database if exists test2; +create database test2; +create table test2.t2(a int); +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; +Error 1044 (42000): Access denied for user 'testuser'@'localhost' to database 'test2' +GRANT LOCK TABLES ON test2.* to 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; +Error 1142 (42000): SELECT command denied to user 'testuser'@'localhost' for table 't2' +GRANT SELECT ON test2.* to 'testuser'@'localhost'; +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; +Error 8020 (HY000): Table 't' was locked in WRITE by server: session: +unlock tables; +unlock tables; +drop user 'testuser'@'localhost'; diff --git a/tests/integrationtest/r/executor/insert.result b/tests/integrationtest/r/executor/insert.result new file mode 100644 index 0000000000000..a2250b63e7478 --- /dev/null +++ b/tests/integrationtest/r/executor/insert.result @@ -0,0 +1,2180 @@ +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(a char(20), b int, primary key(a)); +insert into t values('aa', 1), ('bb', 1); +insert into t values('aa', 2); +Error 1062 (23000): Duplicate entry 'aa' for key 't.PRIMARY' +drop table t; +create table t(a char(20), b varchar(30), c varchar(10), primary key(a, b, c)); +insert into t values ('a', 'b', 'c'), ('b', 'a', 'c'); +insert into t values ('a', 'b', 'c'); +Error 1062 (23000): Duplicate entry 'a-b-c' for key 't.PRIMARY' +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1; +c1 +1.0000 +set tidb_enable_clustered_index = default; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2004'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = 2004; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 bit); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal(6,4)); +insert into t1 set c1 = '1.1'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal); +insert into t1 set c1 = 1.1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 numeric); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 float); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.3; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 real); +insert into t1 set c1 = 1.4; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 date); +insert into t1 set c1 = '2020-01-01'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 time); +insert into t1 set c1 = '20:00:00'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 datetime); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 timestamp); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2020'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 char(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varchar(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 binary(3)); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varbinary(3)); +insert into t1 set c1 = 'b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 blob); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 text); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 enum('a', 'b')); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 set('a', 'b')); +insert into t1 set c1 = 'a,b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists c; +create table c(i int,j int,k int,primary key(i,j,k)); +insert into c values(1,2,3); +insert into c values(1,2,4); +update c set i=1,j=2,k=4 where i=1 and j=2 and k=3; +Error 1062 (23000): Duplicate entry '1-2-4' for key 'c.PRIMARY' +drop table if exists t1, t2; +create table t1 (a int(11) ,b varchar(100) ,primary key (a)); +create table t2 (c int(11) ,d varchar(100) ,primary key (c)); +prepare in1 from 'insert into t1 (a,b) select c,null from t2 t on duplicate key update b=t.d'; +execute in1; +drop table if exists t1; +create table t1(a bigint); +insert into t1 values("asfasdfsajhlkhlksdaf"); +Error 1366 (HY000): Incorrect bigint value: 'asfasdfsajhlkhlksdaf' for column 'a' at row 1 +drop table if exists t1; +create table t1(a varchar(10)) charset ascii; +insert into t1 values('我'); +Error 1366 (HY000): Incorrect string value '\xE6\x88\x91' for column 'a' +drop table if exists t1; +create table t1(a char(10) charset utf8); +insert into t1 values('我'); +alter table t1 add column b char(10) charset ascii as ((a)); +select * from t1; +a b +我 ? +drop table if exists t; +create table t (a year); +insert into t values(2156); +Error 1264 (22003): Out of range value for column 'a' at row 1 +DROP TABLE IF EXISTS ts; +CREATE TABLE ts (id int DEFAULT NULL, time1 TIMESTAMP NULL DEFAULT NULL); +SET @@sql_mode=''; +INSERT INTO ts (id, time1) VALUES (1, TIMESTAMP '1018-12-23 00:00:00'); +SHOW WARNINGS; +Level Code Message +Warning 1292 Incorrect timestamp value: '1018-12-23 00:00:00' for column 'time1' at row 1 +SELECT * FROM ts ORDER BY id; +id time1 +1 0000-00-00 00:00:00 +SET @@sql_mode='STRICT_TRANS_TABLES'; +INSERT INTO ts (id, time1) VALUES (2, TIMESTAMP '1018-12-24 00:00:00'); +Error 1292 (22007): Incorrect timestamp value: '1018-12-24 00:00:00' for column 'time1' at row 1 +DROP TABLE ts; +CREATE TABLE t0(c0 SMALLINT AUTO_INCREMENT PRIMARY KEY); +INSERT IGNORE INTO t0(c0) VALUES (194626268); +INSERT IGNORE INTO t0(c0) VALUES ('*'); +SHOW WARNINGS; +Level Code Message +Warning 1366 Incorrect smallint value: '*' for column 'c0' at row 1 +Warning 1690 constant 32768 overflows smallint +Warning 1467 Failed to read auto-increment value from storage engine +SET @@sql_mode=default; +drop table if exists t1; +create table t1(a decimal(15,2)); +insert into t1 values (1111111111111.01); +select * from t1; +a +1111111111111.01 +select cast(a as decimal) from t1; +cast(a as decimal) +9999999999 +drop table if exists t1; +create table t1(a json, b int, unique index idx((cast(a as signed array)))); +insert into t1 values ('[1,11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert into t1 values ('[2, 222]', 2); +Error 1062 (23000): Duplicate entry '2' for key 't1.idx' +replace into t1 values ('[1, 10]', 10); +select * from t1; +a b +[2, 22] 2 +[1, 10] 10 +replace into t1 values ('[1, 2]', 1); +select * from t1; +a b +[1, 2] 1 +replace into t1 values ('[1, 11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert ignore into t1 values ('[1]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert ignore into t1 values ('[1, 2]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert into t1 values ('[2]', 2) on duplicate key update b = 10; +select * from t1; +a b +[1, 11] 1 +[2, 22] 10 +insert into t1 values ('[2, 1]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +insert into t1 values ('[1,2]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +insert into t1 values ('[11, 22]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +set time_zone="+09:00"; +drop table if exists t; +create table t (id int, c1 datetime not null default CURRENT_TIMESTAMP); +set TIMESTAMP = 1234; +insert t (id) values (1); +select * from t; +id c1 +1 1970-01-01 09:20:34 +drop table if exists t; +create table t (dt datetime); +set @@time_zone='+08:00'; +delete from t; +insert into t values ('2020-10-22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10-22-16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22 16-31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16:31-15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16:31:15-10'); +select * from t; +dt +2020-10-23 10:31:15 +delete from t; +insert into t values ('2020.10-22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10.22-16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22.16-31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16.31-15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16.31.15+14'); +select * from t; +dt +2020-10-22 10:31:15 +delete from t; +insert into t values ('2020-10:22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10-22:16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22-16:31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16-31:15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16.31.15+09:30'); +select * from t; +dt +2020-10-22 15:01:15 +delete from t; +insert into t values ('2020.10-22:16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10.22-16:31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22.16-31:15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16:31.15+09:30'); +select * from t; +dt +2020-10-22 15:01:15 +drop table if exists t; +create table t (dt datetime, ts timestamp); +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+00:00'; +select * from t; +dt ts +2020-10-23 00:53:40 2020-10-22 16:53:40 +delete from t; +set @@time_zone='-08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 08:53:40 2020-10-23 00:53:40 +delete from t; +set @@time_zone='-03:00'; +insert into t values ('2020-10-22T16:53:40+03:00', '2020-10-22T16:53:40+03:00'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 10:53:40 2020-10-22 21:53:40 +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40+08:00', '2020-10-22T16:53:40+08:00'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 16:53:40 2020-10-22 16:53:40 +drop table if exists t; +create table t (ts timestamp); +insert into t values ('2020-10-22T12:00:00Z'), ('2020-10-22T13:00:00Z'), ('2020-10-22T14:00:00Z'); +select count(*) from t where ts > '2020-10-22T12:00:00Z'; +count(*) +2 +set @@time_zone='+08:00'; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10+00:00', '2020-10-27T14:39:10.10+00:00'); +select * from t; +dt ts +2020-10-27 22:39:10.10 2020-10-27 22:39:10.10 +drop table if exists t; +create table t (dt datetime(1), ts timestamp(1)); +insert into t values ('2020-10-27T14:39:10.3+0200', '2020-10-27T14:39:10.3+0200'); +select * from t; +dt ts +2020-10-27 20:39:10.3 2020-10-27 20:39:10.3 +drop table if exists t; +create table t (dt datetime(6), ts timestamp(6)); +insert into t values ('2020-10-27T14:39:10.3-02', '2020-10-27T14:39:10.3-02'); +select * from t; +dt ts +2020-10-28 00:39:10.300000 2020-10-28 00:39:10.300000 +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10Z', '2020-10-27T14:39:10.10Z'); +select * from t; +dt ts +2020-10-27 22:39:10.10 2020-10-27 22:39:10.10 +set time_zone=default; +set timestamp=default; +drop table if exists t1; +create table t1(a year(4)); +insert into t1 values(0000),(00),("0000"),("000"), ("00"), ("0"), (79), ("79"); +select * from t1; +a +0000 +0000 +0000 +2000 +2000 +2000 +1979 +1979 +drop table if exists t; +create table t(f_year year NOT NULL DEFAULT '0000')ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(); +select * from t; +f_year +0000 +insert into t values('0000'); +select * from t; +f_year +0000 +0000 +drop table if exists t1, t2, t3, t4; +create table t1(d date); +create table t2(d datetime); +create table t3(d date); +create table t4(d datetime); +set sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +select year(d), month(d), day(d) from t2; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +truncate t1;truncate t2;truncate t3;truncate t4; +set sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +select year(d), month(d), day(d) from t2; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +set sql_mode=default; +drop table if exists t1, t2, t3; +create table t1 (a int,b int,primary key(a,b)) partition by range(a) (partition p0 values less than (100),partition p1 values less than (1000)); +insert into t1 set a=1, b=1; +insert into t1 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t1; +a b +1 1 +create table t2 (a int,b int,primary key(a,b)) partition by hash(a) partitions 4; +insert into t2 set a=1,b=1; +insert into t2 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t2; +a b +1 1 +CREATE TABLE t3 (a int, b int, c int, d int, e int, +PRIMARY KEY (a,b), +UNIQUE KEY (b,c,d) +) PARTITION BY RANGE ( b ) ( +PARTITION p0 VALUES LESS THAN (4), +PARTITION p1 VALUES LESS THAN (7), +PARTITION p2 VALUES LESS THAN (11) +); +insert into t3 values (1,2,3,4,5); +insert into t3 values (1,2,3,4,5),(6,2,3,4,6) on duplicate key update e = e + values(e); +select * from t3; +a b c d e +1 2 3 4 16 +drop table if exists t1; +create table t1 (a bit(3)); +insert into t1 values(-1); +Error 1406 (22001): Data too long for column 'a' at row 1 +insert into t1 values(9); +Error 1406 (22001): Data too long for column 'a' at row 1 +create table t64 (a bit(64)); +insert into t64 values(-1); +insert into t64 values(18446744073709551615); +insert into t64 values(18446744073709551616); +Error 1264 (22003): Out of range value for column 'a' at row 1 +drop table if exists bug; +create table bug (a varchar(100)); +insert into bug select ifnull(JSON_UNQUOTE(JSON_EXTRACT('[{"amount":2000,"feeAmount":0,"merchantNo":"20190430140319679394","shareBizCode":"20160311162_SECOND"}]', '$[0].merchantNo')),'') merchant_no union SELECT '20180531557' merchant_no; +select * from bug; +a +20180531557 +20190430140319679394 +drop table if exists t; +create table t (a int, b double); +insert into t values (ifnull('',0)+0, 0); +insert into t values (0, ifnull('',0)+0); +select * from t; +a b +0 0 +0 0 +insert into t values ('', 0); +Error 1366 (HY000): Incorrect int value: '' for column 'a' at row 1 +insert into t values (0, ''); +Error 1366 (HY000): Incorrect double value: '' for column 'b' at row 1 +update t set a = ''; +Error 1292 (22007): Truncated incorrect DOUBLE value: '' +update t set b = ''; +Error 1292 (22007): Truncated incorrect DOUBLE value: '' +update t set a = ifnull('',0)+0; +update t set b = ifnull('',0)+0; +delete from t where a = ''; +select * from t; +a b +drop table if exists t,t1; +create table t(col1 FLOAT, col2 FLOAT(10,2), col3 DOUBLE, col4 DOUBLE(10,2), col5 DECIMAL, col6 DECIMAL(10,2)); +insert into t values (-3.402823466E+68, -34028234.6611, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +Error 1264 (22003): Out of range value for column 'col1' at row 1 +insert into t values (-34028234.6611, -3.402823466E+68, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +Error 1264 (22003): Out of range value for column 'col2' at row 1 +create table t1(id1 float,id2 float); +insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999); +select @@warning_count; +@@warning_count +2 +select convert(id1,decimal(65)),convert(id2,decimal(65)) from t1; +convert(id1,decimal(65)) convert(id2,decimal(65)) +340282346638528860000000000000000000000 -340282346638528860000000000000000000000 +set sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_ALL_TABLES,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t1; +CREATE TABLE t1(c1 mediumtext); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 8777215)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +set sql_mode = 'ONLY_FULL_GROUP_BY,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +select length(c1) from t1; +length(c1) +254 +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +select length(c1) from t1; +length(c1) +65534 +set sql_mode = default; +set @@allow_auto_random_explicit_insert = true; +drop table if exists ar; +create table ar (id bigint key clustered auto_random, name char(10)); +insert into ar(id) values (1); +select id from ar; +id +1 +select last_insert_id(); +last_insert_id() +0 +delete from ar; +insert into ar(id) values (1), (2); +select id from ar; +id +1 +2 +select last_insert_id(); +last_insert_id() +0 +delete from ar; +drop table ar; +set @@allow_auto_random_explicit_insert = default; +drop table if exists t, t1; +create table t (a int primary key, b datetime, d date); +insert into t values (1, '2019-02-11 30:00:00', '2019-01-31'); +Error 1292 (22007): Incorrect datetime value: '2019-02-11 30:00:00' for column 'b' at row 1 +CREATE TABLE t1 (a BINARY(16) PRIMARY KEY); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +Error 1062 (23000): Duplicate entry '{ W]\xA1\x06u\x9D\xBD\xB1\xA3.\xE2\xD9\xA7t' for key 't1.PRIMARY' +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +Error 1062 (23000): Duplicate entry '\x0C\x1E\x8DG`\xEB\x93 F&BC\xF0\xB5\xF4\xB7' for key 't1.PRIMARY' +drop table if exists t1; +create table t1 (a bit primary key) engine=innodb; +insert into t1 values (b'0'); +insert into t1 values (b'0'); +Error 1062 (23000): Duplicate entry '\x00' for key 't1.PRIMARY' +drop table if exists t; +create table t(c numeric primary key); +insert ignore into t values(null); +insert into t values(0); +Error 1062 (23000): Duplicate entry '0' for key 't.PRIMARY' +set tidb_enable_clustered_index = on; +drop table if exists t1pk; +create table t1pk(id varchar(200) primary key, v int); +insert into t1pk(id, v) values('abc', 1); +select * from t1pk; +id v +abc 1 +set @@tidb_constraint_check_in_place=true; +insert into t1pk(id, v) values('abc', 2); +Error 1062 (23000): Duplicate entry 'abc' for key 't1pk.PRIMARY' +set @@tidb_constraint_check_in_place=false; +insert into t1pk(id, v) values('abc', 3); +Error 1062 (23000): Duplicate entry 'abc' for key 't1pk.PRIMARY' +select v, id from t1pk; +v id +1 abc +select id from t1pk where id = 'abc'; +id +abc +select v, id from t1pk where id = 'abc'; +v id +1 abc +drop table if exists t3pk; +create table t3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 1); +select * from t3pk; +id1 id2 v id3 +abc xyz 1 100 +set @@tidb_constraint_check_in_place=true; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 2); +Error 1062 (23000): Duplicate entry 'abc-xyz-100' for key 't3pk.PRIMARY' +set @@tidb_constraint_check_in_place=false; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 3); +Error 1062 (23000): Duplicate entry 'abc-xyz-100' for key 't3pk.PRIMARY' +select v, id3, id2, id1 from t3pk; +v id3 id2 id1 +1 100 xyz abc +select id3, id2, id1 from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +id3 id2 id1 +100 xyz abc +select id3, id2, id1, v from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +id3 id2 id1 v +100 xyz abc 1 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 101, 1); +insert into t3pk(id1, id2, id3, v) values('abc', 'zzz', 101, 1); +drop table if exists t1pku; +create table t1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into t1pku(id, uk, v) values('abc', 1, 2); +select * from t1pku where id = 'abc'; +id uk v +abc 1 2 +insert into t1pku(id, uk, v) values('aaa', 1, 3); +Error 1062 (23000): Duplicate entry '1' for key 't1pku.ukk' +select * from t1pku; +id uk v +abc 1 2 +select * from t3pk where (id1, id2, id3) in (('abc', 'xyz', 100), ('abc', 'xyz', 101), ('abc', 'zzz', 101)); +id1 id2 v id3 +abc xyz 1 100 +abc xyz 1 101 +abc zzz 1 101 +set @@tidb_constraint_check_in_place=default; +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists it1pk; +create table it1pk(id varchar(200) primary key, v int); +insert into it1pk(id, v) values('abc', 1); +insert ignore into it1pk(id, v) values('abc', 2); +select * from it1pk where id = 'abc'; +id v +abc 1 +drop table if exists it2pk; +create table it2pk(id1 varchar(200), id2 varchar(200), v int, primary key(id1, id2)); +insert into it2pk(id1, id2, v) values('abc', 'cba', 1); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +id1 id2 v +abc cba 1 +insert ignore into it2pk(id1, id2, v) values('abc', 'cba', 2); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +id1 id2 v +abc cba 1 +drop table if exists it1pku; +create table it1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into it1pku(id, uk, v) values('abc', 1, 2); +select * from it1pku where id = 'abc'; +id uk v +abc 1 2 +insert ignore into it1pku(id, uk, v) values('aaa', 1, 3), ('bbb', 2, 1); +select * from it1pku; +id uk v +abc 1 2 +bbb 2 1 +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists dt1pi; +create table dt1pi(id varchar(200) primary key, v int); +insert into dt1pi(id, v) values('abb', 1),('acc', 2); +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1; +select * from dt1pi; +id v +abb 2 +acc 2 +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1pi; +id v +acc 2 +xxx 3 +drop table if exists dt1piu; +create table dt1piu(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +insert into dt1piu(id, uk, v) values('abb', 1, 10),('acc', 2, 20); +insert into dt1piu(id, uk, v) values('xyz', 1, 100) on duplicate key update v = v + 1; +select * from dt1piu; +id uk v +abb 1 11 +acc 2 20 +insert into dt1piu(id, uk, v) values('abb', 1, 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1piu; +id uk v +acc 2 20 +xxx 1 12 +drop table if exists ts1pk; +create table ts1pk(id1 timestamp, id2 timestamp, v int, primary key(id1, id2)); +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 1); +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:11 2018-01-01 11:11:11 1 +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v); +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:11 2018-01-01 11:11:11 2 +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v), id1 = '2018-01-01 11:11:12'; +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:12 2018-01-01 11:11:11 2 +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists pkt1; +CREATE TABLE pkt1 (a varchar(255), b int, index idx(b), primary key(a,b)); +insert into pkt1 values ('aaa',1); +select b from pkt1 where b = 1; +b +1 +drop table if exists pkt2; +CREATE TABLE pkt2 (a varchar(255), b int, unique index idx(b), primary key(a,b)); +insert into pkt2 values ('aaa',1); +select b from pkt2 where b = 1; +b +1 +drop table if exists issue_18232; +create table issue_18232 (a int, b int, c int, d int, primary key (a, b), index idx(c)); +select a from issue_18232 use index (idx); +a +select b from issue_18232 use index (idx); +b +select a,b from issue_18232 use index (idx); +a b +select c from issue_18232 use index (idx); +c +select a,c from issue_18232 use index (idx); +a c +select b,c from issue_18232 use index (idx); +b c +select a,b,c from issue_18232 use index (idx); +a b c +select d from issue_18232 use index (idx); +d +select a,d from issue_18232 use index (idx); +a d +select b,d from issue_18232 use index (idx); +b d +select a,b,d from issue_18232 use index (idx); +a b d +select c,d from issue_18232 use index (idx); +c d +select a,c,d from issue_18232 use index (idx); +a c d +select b,c,d from issue_18232 use index (idx); +b c d +select a,b,c,d from issue_18232 use index (idx); +a b c d +set tidb_enable_clustered_index = default; +drop table if exists t1, t2; +create table t1(a year, primary key(a)); +insert ignore into t1 values(null); +create table t2(a int, key(a)); +insert into t2 values(0); +select /*+ hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_join(t2) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +drop table if exists vctt; +create table vctt (v varchar(4), c char(4)); +insert into vctt values ('ab ', 'ab '); +select * from vctt; +v c +ab ab +delete from vctt; +insert into vctt values ('ab\n\n\n', 'ab\n\n\n'), ('ab\t\t\t', 'ab\t\t\t'), ('ab ', 'ab '), ('ab\r\r\r', 'ab\r\r\r'); +show warnings; +Level Code Message +Warning 1265 Data truncated for column 'v' at row 1 +Warning 1265 Data truncated for column 'v' at row 2 +Warning 1265 Data truncated for column 'v' at row 3 +Warning 1265 Data truncated for column 'v' at row 4 +select * from vctt; +v c +ab + + ab + + +ab ab +ab ab +ab ab +select length(v), length(c) from vctt; +length(v) length(c) +4 4 +4 4 +4 2 +4 4 +drop table if exists t1; +create table t1(a int, b varchar(20), primary key(a,b(3)) clustered); +insert into t1 values(1,'aaaaa'); +insert into t1 values(1,'aaaaa'); +Error 1062 (23000): Duplicate entry '1-aaa' for key 't1.PRIMARY' +insert into t1 select 1, 'aaa'; +Error 1062 (23000): Duplicate entry '1-aaa' for key 't1.PRIMARY' +insert into t1 select 1, 'bb'; +insert into t1 select 1, 'bb'; +Error 1062 (23000): Duplicate entry '1-bb' for key 't1.PRIMARY' +drop table if exists bintest; +create table bintest (h enum(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; +h +a +drop table if exists bintest; +create table bintest (h set(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; +h +a +drop table if exists temp_test; +create global temporary table temp_test(id int primary key auto_increment) on commit delete rows; +insert into temp_test(id) values(0); +select * from temp_test; +id +begin; +insert into temp_test(id) values(0); +select * from temp_test; +id +1 +commit; +begin; +insert into temp_test(id) values(0); +select * from temp_test; +id +1 +insert into temp_test(id) values(0); +select id from temp_test order by id; +id +1 +2 +commit; +begin; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +1 +2 +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +1 +2 +3 +4 +commit; +begin; +insert into temp_test(id) values(10); +insert into temp_test(id) values(0); +select id from temp_test order by id; +id +10 +11 +insert into temp_test(id) values(20), (30); +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +10 +11 +20 +30 +31 +32 +commit; +drop table if exists temp_test; +drop table if exists temp_test; +create global temporary table temp_test(id int) on commit delete rows; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +1 +commit; +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +1 +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +commit; +begin; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +3 +4 +commit; +drop table if exists temp_test; +drop table if exists t1; +create table t1(c1 date); +insert into t1 values('2020-02-31'); +Error 1292 (22007): Incorrect date value: '2020-02-31' for column 'c1' at row 1 +set @@sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values('2020-02-31'); +select * from t1; +c1 +2020-02-31 +set @@sql_mode='STRICT_TRANS_TABLES'; +insert into t1 values('2020-02-31'); +Error 1292 (22007): Incorrect date value: '2020-02-31' for column 'c1' at row 1 +set sql_mode=default; +drop table if exists t; +create table t (id decimal(10)); +insert into t values('1sdf'); +Error 1366 (HY000): Incorrect decimal value: '1sdf' for column 'id' at row 1 +insert into t values('1edf'); +Error 1366 (HY000): Incorrect decimal value: '1edf' for column 'id' at row 1 +insert into t values('12Ea'); +Error 1366 (HY000): Incorrect decimal value: '12Ea' for column 'id' at row 1 +insert into t values('1E'); +Error 1366 (HY000): Incorrect decimal value: '1E' for column 'id' at row 1 +insert into t values('1e'); +Error 1366 (HY000): Incorrect decimal value: '1e' for column 'id' at row 1 +insert into t values('1.2A'); +Error 1366 (HY000): Incorrect decimal value: '1.2A' for column 'id' at row 1 +insert into t values('1.2.3.4.5'); +Error 1366 (HY000): Incorrect decimal value: '1.2.3.4.5' for column 'id' at row 1 +insert into t values('1.2.'); +Error 1366 (HY000): Incorrect decimal value: '1.2.' for column 'id' at row 1 +insert into t values('1,999.00'); +Error 1366 (HY000): Incorrect decimal value: '1,999.00' for column 'id' at row 1 +insert into t values('12e-3'); +show warnings; +Level Code Message +Warning 1366 Incorrect decimal value: '12e-3' for column 'id' at row 1 +select id from t; +id +0 +drop table if exists t; +SET sql_mode='NO_ENGINE_SUBSTITUTION'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a tinyint not null auto_increment primary key, b char(20)); +INSERT INTO t1 VALUES (127,'maxvalue'); +REPLACE INTO t1 VALUES (0,'newmaxvalue'); +Error 1467 (HY000): Failed to read auto-increment value from storage engine +set sql_mode=default; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a INT) ENGINE = InnoDB; +INSERT IGNORE into t1(SELECT SLEEP(NULL)); +SHOW WARNINGS; +Level Code Message +Warning 1210 Incorrect arguments to sleep +INSERT IGNORE into t1(SELECT SLEEP(-1)); +SHOW WARNINGS; +Level Code Message +Warning 1210 Incorrect arguments to sleep +INSERT IGNORE into t1(SELECT SLEEP(1)); +SELECT * FROM t1; +a +0 +0 +0 +DROP TABLE t1; +drop table if exists t1; +create table t1(c1 float); +insert into t1 values(999.99); +select cast(t1.c1 as decimal(4, 1)) from t1; +cast(t1.c1 as decimal(4, 1)) +999.9 +select cast(t1.c1 as decimal(5, 1)) from t1; +cast(t1.c1 as decimal(5, 1)) +1000.0 +drop table if exists t1; +create table t1(c1 decimal(6, 4)); +insert into t1 values(99.9999); +select cast(t1.c1 as decimal(5, 3)) from t1; +cast(t1.c1 as decimal(5, 3)) +99.999 +select cast(t1.c1 as decimal(6, 3)) from t1; +cast(t1.c1 as decimal(6, 3)) +100.000 +drop table if exists t1; +create table t1(id int, a int); +set @@SQL_MODE='STRICT_TRANS_TABLES'; +insert into t1 values(1, '1e100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +insert into t1 values(2, '-1e100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +select id, a from t1; +id a +set @@SQL_MODE=''; +insert into t1 values(1, '1e100'); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +insert into t1 values(2, '-1e100'); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +select id, a from t1 order by id asc; +id a +1 2147483647 +2 -2147483648 +set sql_mode=default; +drop table if exists tf; +create table tf(a float(1, 0) unsigned); +insert into tf values('-100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +set @@sql_mode=''; +insert into tf values('-100'); +select * from tf; +a +0 +set @@sql_mode=default; +drop table if exists tt1; +create table tt1 (c1 decimal(64)); +insert into tt1 values(89000000000000000000000000000000000000000000000000000000000000000000000000000000000000000); +Error 1264 (22003): Out of range value for column 'c1' at row 1 +insert into tt1 values(89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000); +Error 1264 (22003): Out of range value for column 'c1' at row 1 +insert ignore into tt1 values(89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'c1' at row 1 +Warning 1292 Truncated incorrect DECIMAL value: '789012345678901234567890123456789012345678901234567890123456789012345678900000000' +select c1 from tt1; +c1 +9999999999999999999999999999999999999999999999999999999999999999 +update tt1 set c1 = 89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000; +Error 1264 (22003): Out of range value for column 'c1' at row 1 +drop table if exists tt1; +insert into tt1 values(4556414e723532); +Error 1367 (22007): Illegal double '4556414e723532' value found during parsing +select 888888888888888888888888888888888888888888888888888888888888888888888888888888888888; +888888888888888888888888888888888888888888888888888888888888888888888888888888888888 +99999999999999999999999999999999999999999999999999999999999999999 +show warnings; +Level Code Message +Warning 1292 Truncated incorrect DECIMAL value: '888888888888888888888888888888888888888888888888888888888888888888888888888888888' +drop table if exists t; +create table t (id smallint auto_increment primary key); +alter table t add column c1 int default 1; +insert ignore into t(id) values (194626268); +affected rows: 1 +info: +select * from t; +id c1 +32767 1 +insert ignore into t(id) values ('*') on duplicate key update c1 = 2; +affected rows: 2 +info: +select * from t; +id c1 +32767 2 +drop table if exists t; +create table t (i int not null primary key, j int unique key); +insert into t values (1, 1), (2, 2); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +insert ignore into t values(1, 1) on duplicate key update i = 2; +affected rows: 0 +info: +select * from t; +i j +1 1 +2 2 +insert ignore into t values(1, 1) on duplicate key update j = 2; +affected rows: 0 +info: +select * from t; +i j +1 1 +2 2 +drop table if exists t2; +create table t2(`col_25` set('Alice','Bob','Charlie','David') NOT NULL,`col_26` date NOT NULL DEFAULT '2016-04-15', PRIMARY KEY (`col_26`) clustered, UNIQUE KEY `idx_9` (`col_25`,`col_26`),UNIQUE KEY `idx_10` (`col_25`)); +insert into t2(col_25, col_26) values('Bob', '1989-03-23'),('Alice', '2023-11-24'), ('Charlie', '2023-12-05'); +insert ignore into t2 (col_25,col_26) values ( 'Bob','1977-11-23' ) on duplicate key update col_25 = 'Alice', col_26 = '2036-12-13'; +show warnings; +Level Code Message +Warning 1062 Duplicate entry 'Alice' for key 't2.idx_10' +select * from t2; +col_25 col_26 +Alice 2023-11-24 +Bob 1989-03-23 +Charlie 2023-12-05 +drop table if exists t4; +create table t4(id int primary key clustered, k int, v int, unique key uk1(k)); +insert into t4 values (1, 10, 100), (3, 30, 300); +insert ignore into t4 (id, k, v) values(1, 0, 0) on duplicate key update id = 2, k = 30; +show warnings; +Level Code Message +Warning 1062 Duplicate entry '30' for key 't4.uk1' +select * from t4; +id k v +1 10 100 +3 30 300 +drop table if exists t5; +create table t5(k1 varchar(100), k2 varchar(100), uk1 int, v int, primary key(k1, k2) clustered, unique key ukk1(uk1), unique key ukk2(v)); +insert into t5(k1, k2, uk1, v) values('1', '1', 1, '100'), ('1', '3', 2, '200'); +update ignore t5 set k2 = '2', uk1 = 2 where k1 = '1' and k2 = '1'; +show warnings; +Level Code Message +Warning 1062 Duplicate entry '2' for key 't5.ukk1' +select * from t5; +k1 k2 uk1 v +1 1 1 100 +1 3 2 200 +drop table if exists t6; +create table t6 (a int, b int, c int, primary key(a, b) clustered, unique key idx_14(b), unique key idx_15(b), unique key idx_16(a, b)); +insert into t6 select 10, 10, 20; +insert ignore into t6 set a = 20, b = 10 on duplicate key update a = 100; +select * from t6; +a b c +100 10 20 +insert ignore into t6 set a = 200, b= 10 on duplicate key update c = 1000; +select * from t6; +a b c +100 10 1000 +drop table if exists insert_autoinc_test; +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(c1) values (1), (2); +begin; +select * from insert_autoinc_test; +id c1 +1 1 +2 2 +commit; +begin; +insert into insert_autoinc_test(id, c1) values (5,5); +insert into insert_autoinc_test(c1) values (6); +commit; +begin; +select * from insert_autoinc_test; +id c1 +1 1 +2 2 +5 5 +6 6 +commit; +begin; +insert into insert_autoinc_test(id, c1) values (3,3); +commit; +begin; +select * from insert_autoinc_test; +id c1 +1 1 +2 2 +3 3 +5 5 +6 6 +commit; +begin; +insert into insert_autoinc_test(c1) values (7); +commit; +begin; +select * from insert_autoinc_test; +id c1 +1 1 +2 2 +3 3 +5 5 +6 6 +7 7 +commit; +drop table if exists insert_autoinc_test; +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(id, c1) values (0.3, 1); +select * from insert_autoinc_test; +id c1 +1 1 +insert into insert_autoinc_test(id, c1) values (-0.3, 2); +select * from insert_autoinc_test; +id c1 +1 1 +2 2 +insert into insert_autoinc_test(id, c1) values (-3.3, 3); +select * from insert_autoinc_test; +id c1 +-3 3 +1 1 +2 2 +insert into insert_autoinc_test(id, c1) values (4.3, 4); +select * from insert_autoinc_test; +id c1 +-3 3 +1 1 +2 2 +4 4 +insert into insert_autoinc_test(c1) values (5); +select * from insert_autoinc_test; +id c1 +-3 3 +1 1 +2 2 +4 4 +5 5 +insert into insert_autoinc_test(id, c1) values (null, 6); +select * from insert_autoinc_test; +id c1 +-3 3 +1 1 +2 2 +4 4 +5 5 +6 6 +drop table if exists insert_autoinc_test; +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(id, c1) values (5, 1); +select * from insert_autoinc_test; +id c1 +5 1 +insert into insert_autoinc_test(id, c1) values (0, 2); +select * from insert_autoinc_test; +id c1 +5 1 +6 2 +insert into insert_autoinc_test(id, c1) values (0, 3); +select * from insert_autoinc_test; +id c1 +5 1 +6 2 +7 3 +set SQL_MODE=NO_AUTO_VALUE_ON_ZERO; +insert into insert_autoinc_test(id, c1) values (0, 4); +select * from insert_autoinc_test; +id c1 +0 4 +5 1 +6 2 +7 3 +insert into insert_autoinc_test(id, c1) values (0, 5); +Error 1062 (23000): Duplicate entry '0' for key 'insert_autoinc_test.PRIMARY' +insert into insert_autoinc_test(c1) values (6); +select * from insert_autoinc_test; +id c1 +0 4 +5 1 +6 2 +7 3 +8 6 +insert into insert_autoinc_test(id, c1) values (null, 7); +select * from insert_autoinc_test; +id c1 +0 4 +5 1 +6 2 +7 3 +8 6 +9 7 +set SQL_MODE=''; +insert into insert_autoinc_test(id, c1) values (0, 8); +select * from insert_autoinc_test; +id c1 +0 4 +5 1 +6 2 +7 3 +8 6 +9 7 +10 8 +insert into insert_autoinc_test(id, c1) values (null, 9); +select * from insert_autoinc_test; +id c1 +0 4 +5 1 +6 2 +7 3 +8 6 +9 7 +10 8 +11 9 +set sql_mode = default; +drop table if exists insert_test; +create table insert_test (id int PRIMARY KEY AUTO_INCREMENT, c1 int, c2 int, c3 int default 1); +insert insert_test (c1) values (1),(2),(NULL); +affected rows: 3 +info: Records: 3 Duplicates: 0 Warnings: 0 +begin; +insert insert_test (c1) values (); +Error 1136 (21S01): Column count doesn't match value count at row 1 +rollback; +begin; +insert insert_test (c1, c2) values (1,2),(1); +Error 1136 (21S01): Column count doesn't match value count at row 2 +rollback; +begin; +insert insert_test (xxx) values (3); +Error 1054 (42S22): Unknown column 'xxx' in 'field list' +rollback; +begin; +insert insert_test_xxx (c1) values (); +Error 1146 (42S02): Table 'executor__insert.insert_test_xxx' doesn't exist +rollback; +insert insert_test set c1 = 3; +affected rows: 1 +info: +begin; +insert insert_test set c1 = 4, c1 = 5; +Error 1110 (42000): Column 'c1' specified twice +rollback; +begin; +insert insert_test set xxx = 6; +Error 1054 (42S22): Unknown column 'xxx' in 'field list' +rollback; +drop table if exists insert_test_1, insert_test_2; +create table insert_test_1 (id int, c1 int); +insert insert_test_1 select id, c1 from insert_test; +affected rows: 4 +info: Records: 4 Duplicates: 0 Warnings: 0 +create table insert_test_2 (id int, c1 int); +insert insert_test_1 select id, c1 from insert_test union select id * 10, c1 * 10 from insert_test; +affected rows: 8 +info: Records: 8 Duplicates: 0 Warnings: 0 +begin; +insert insert_test_1 select c1 from insert_test; +Error 1136 (21S01): Column count doesn't match value count at row 1 +rollback; +begin; +insert insert_test_1 values(default, default, default, default, default); +Error 1136 (21S01): Column count doesn't match value count at row 1 +rollback; +select * from insert_test where id = 1; +id c1 c2 c3 +1 1 NULL 1 +insert into insert_test (id, c3) values (1, 2) on duplicate key update id=values(id), c2=10; +affected rows: 2 +info: +select * from insert_test where id = 1; +id c1 c2 c3 +1 1 10 1 +insert into insert_test (id, c2) values (1, 1) on duplicate key update insert_test.c2=10; +affected rows: 0 +info: +insert into insert_test (id, c2) values(1, 1) on duplicate key update t.c2 = 10; +Error 1054 (42S22): Unknown column 't.c2' in 'field list' +INSERT INTO insert_test (id, c3) VALUES (1, 2) ON DUPLICATE KEY UPDATE c3=values(c3)+c3+3; +affected rows: 2 +info: +select * from insert_test where id = 1; +id c1 c2 c3 +1 1 10 6 +INSERT IGNORE INTO insert_test (id, c3) VALUES (1, 2) ON DUPLICATE KEY UPDATE c3=values(c3)+c3+3; +affected rows: 2 +info: +select * from insert_test where id = 1; +id c1 c2 c3 +1 1 10 11 +drop table if exists insert_err; +create table insert_err (id int, c1 varchar(8)); +insert insert_err values (1, 'abcdabcdabcd'); +Error 1406 (22001): Data too long for column 'c1' at row 1 +insert insert_err values (1, '你好,世界'); +create table TEST1 (ID INT NOT NULL, VALUE INT DEFAULT NULL, PRIMARY KEY (ID)); +INSERT INTO TEST1(id,value) VALUE(3,3) on DUPLICATE KEY UPDATE VALUE=4; +affected rows: 1 +info: +drop table if exists t; +create table t (id int); +insert into t values(1); +update t t1 set id = (select count(*) + 1 from t t2 where t1.id = t2.id); +select * from t; +id +2 +drop table if exists t; +create table t(c decimal(5, 5)); +insert into t value(0); +insert into t value(1); +Error 1264 (22003): Out of range value for column 'c' at row 1 +drop table if exists t; +create table t(c binary(255)); +insert into t value(1); +select length(c) from t; +length(c) +255 +drop table if exists t; +create table t(c varbinary(255)); +insert into t value(1); +select length(c) from t; +length(c) +1 +drop table if exists t; +create table t(c int); +set @@time_zone = '+08:00'; +insert into t value(Unix_timestamp('2002-10-27 01:00')); +select * from t; +c +1035651600 +set @@time_zone = default; +drop table if exists t1; +create table t1 (b char(0)); +insert into t1 values (""); +DROP TABLE IF EXISTS t; +CREATE TABLE t(a DECIMAL(4,2)); +INSERT INTO t VALUES (1.000001); +SHOW WARNINGS; +Level Code Message +Warning 1366 Incorrect decimal value: '1.000001' for column 'a' at row 1 +INSERT INTO t VALUES (1.000000); +SHOW WARNINGS; +Level Code Message +DROP TABLE IF EXISTS t; +CREATE TABLE t(a datetime); +INSERT INTO t VALUES('2017-00-00'); +Error 1292 (22007): Incorrect datetime value: '2017-00-00' for column 'a' at row 1 +set sql_mode = ''; +INSERT INTO t VALUES('2017-00-00'); +SELECT * FROM t; +a +2017-00-00 00:00:00 +set sql_mode = 'strict_all_tables'; +SELECT * FROM t; +a +2017-00-00 00:00:00 +set sql_mode = default; +drop table if exists test; +CREATE TABLE test(id int(10) UNSIGNED NOT NULL AUTO_INCREMENT, p int(10) UNSIGNED NOT NULL, PRIMARY KEY(p), KEY(id)); +insert into test(p) value(1); +select * from test; +id p +1 1 +select * from test use index (id) where id = 1; +id p +1 1 +insert into test values(NULL, 2); +select * from test use index (id) where id = 2; +id p +2 2 +insert into test values(2, 3); +select * from test use index (id) where id = 2; +id p +2 2 +2 3 +drop table if exists t; +create table t(a bigint unsigned); +set @@sql_mode = 'strict_all_tables'; +insert into t value (-1); +Error 1264 (22003): Out of range value for column 'a' at row 1 +set @@sql_mode = ''; +insert into t value (-1); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +insert into t select -1; +show warnings; +Level Code Message +Warning 1690 constant -1 overflows bigint +insert into t select cast(-1 as unsigned); +insert into t value (-1.111); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +insert into t value ('-1.111'); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +update t set a = -1 limit 1; +show warnings; +Level Code Message +Warning 1690 constant -1 overflows bigint +select * from t; +a +0 +0 +18446744073709551615 +0 +0 +set @@sql_mode = default; +drop table if exists t; +create table t(a time(6)); +insert into t value('20070219173709.055870'), ('20070219173709.055'), ('20070219173709.055870123'); +select * from t; +a +17:37:09.055870 +17:37:09.055000 +17:37:09.055870 +truncate table t; +insert into t value(20070219173709.055870), (20070219173709.055), (20070219173709.055870123); +select * from t; +a +17:37:09.055870 +17:37:09.055000 +17:37:09.055870 +insert into t value(-20070219173709.055870); +Error 1292 (22007): Incorrect time value: '-20070219173709.055870' for column 'a' at row 1 +drop table if exists t; +set @@sql_mode=''; +create table t(a float unsigned, b double unsigned); +insert into t value(-1.1, -1.1), (-2.1, -2.1), (0, 0), (1.1, 1.1); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +Warning 1264 Out of range value for column 'b' at row 1 +Warning 1264 Out of range value for column 'a' at row 2 +Warning 1264 Out of range value for column 'b' at row 2 +select * from t; +a b +0 0 +0 0 +0 0 +1.1 1.1 +set @@sql_mode=default; +drop table if exists t; +create table t(a int default 1, b int default 2); +insert into t values(default, default); +select * from t; +a b +1 2 +truncate table t; +insert into t values(default(b), default(a)); +select * from t; +a b +2 1 +truncate table t; +insert into t (b) values(default); +select * from t; +a b +1 2 +truncate table t; +insert into t (b) values(default(a)); +select * from t; +a b +1 1 +drop view if exists v; +create view v as select * from t; +insert into v values(1,2); +Error 1105 (HY000): insert into view v is not supported now +replace into v values(1,2); +Error 1105 (HY000): replace into view v is not supported now +drop view v; +drop sequence if exists seq; +create sequence seq; +insert into seq values(); +Error 1105 (HY000): insert into sequence seq is not supported now +replace into seq values(); +Error 1105 (HY000): replace into sequence seq is not supported now +drop sequence seq; +drop table if exists t; +create table t(name varchar(255), b int, c int, primary key(name(2))); +insert into t(name, b) values("cha", 3); +insert into t(name, b) values("chb", 3); +Error 1062 (23000): Duplicate entry 'ch' for key 't.PRIMARY' +insert into t(name, b) values("测试", 3); +insert into t(name, b) values("测试", 3); +Error 1062 (23000): Duplicate entry 'æµ' for key 't.PRIMARY' +drop table if exists t; +create table t (i int unique key); +insert into t values (1),(2); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +select * from t; +i +1 +2 +insert into t values (1), (2) on duplicate key update i = values(i); +affected rows: 0 +info: Records: 2 Duplicates: 0 Warnings: 0 +select * from t; +i +1 +2 +insert into t values (2), (3) on duplicate key update i = 3; +affected rows: 2 +info: Records: 2 Duplicates: 1 Warnings: 0 +select * from t; +i +1 +3 +drop table if exists t; +create table t (i int primary key, j int unique key); +insert into t values (-1, 1); +affected rows: 1 +info: +select * from t; +i j +-1 1 +insert into t values (1, 1) on duplicate key update j = values(j); +affected rows: 0 +info: +select * from t; +i j +-1 1 +drop table if exists test; +create table test (i int primary key, j int unique); +begin; +insert into test values (1,1); +insert into test values (2,1) on duplicate key update i = -i, j = -j; +commit; +select * from test; +i j +-1 -1 +delete from test; +insert into test values (1, 1); +begin; +delete from test where i = 1; +insert into test values (2, 1) on duplicate key update i = -i, j = -j; +commit; +select * from test; +i j +2 1 +delete from test; +insert into test values (1, 1); +begin; +update test set i = 2, j = 2 where i = 1; +insert into test values (1, 3) on duplicate key update i = -i, j = -j; +insert into test values (2, 4) on duplicate key update i = -i, j = -j; +commit; +select * from test order by i; +i j +-2 -2 +1 3 +delete from test; +begin; +insert into test values (1, 3), (1, 3) on duplicate key update i = values(i), j = values(j); +commit; +select * from test order by i; +i j +1 3 +create table tmp (id int auto_increment, code int, primary key(id, code)); +create table m (id int primary key auto_increment, code int unique); +insert tmp (code) values (1); +insert tmp (code) values (1); +set tidb_init_chunk_size=1; +insert m (code) select code from tmp on duplicate key update code = values(code); +select * from m; +id code +1 1 +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT AUTO_INCREMENT PRIMARY KEY, +f2 VARCHAR(5) NOT NULL UNIQUE); +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +affected rows: 1 +info: +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +affected rows: 0 +info: +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT AUTO_INCREMENT UNIQUE, +f2 VARCHAR(5) NOT NULL UNIQUE); +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +affected rows: 1 +info: +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +affected rows: 0 +info: +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = 2; +affected rows: 2 +info: +SELECT LAST_INSERT_ID(); +LAST_INSERT_ID() +1 +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT); +INSERT t1 VALUES (1) ON DUPLICATE KEY UPDATE f1 = 1; +affected rows: 1 +info: +SELECT * FROM t1; +f1 +1 +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT PRIMARY KEY, f2 INT NOT NULL UNIQUE); +INSERT t1 VALUES (1, 1); +affected rows: 1 +info: +INSERT t1 VALUES (1, 1), (1, 1) ON DUPLICATE KEY UPDATE f1 = 2, f2 = 2; +affected rows: 3 +info: Records: 2 Duplicates: 1 Warnings: 0 +SELECT * FROM t1 order by f1; +f1 f2 +1 1 +2 2 +INSERT t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +Error 1048 (23000): Column 'f2' cannot be null +INSERT IGNORE t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +affected rows: 2 +info: +show warnings; +Level Code Message +Warning 1048 Column 'f2' cannot be null +SELECT * FROM t1 order by f1; +f1 f2 +1 0 +2 2 +SET sql_mode=''; +INSERT t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +Error 1048 (23000): Column 'f2' cannot be null +SELECT * FROM t1 order by f1; +f1 f2 +1 0 +2 2 +set sql_mode=default; +set tidb_init_chunk_size=default; +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +insert into t1 values(1, 100); +affected rows: 1 +info: +insert into t2 values(1, 200); +affected rows: 1 +info: +insert into t1 select a2, b2 from t2 on duplicate key update b1 = a2; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from t1; +a1 b1 +1 1 +insert into t1 select a2, b2 from t2 on duplicate key update b1 = b2; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from t1; +a1 b1 +1 200 +insert into t1 select a2, b2 from t2 on duplicate key update a1 = a2; +affected rows: 0 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from t1; +a1 b1 +1 200 +insert into t1 select a2, b2 from t2 on duplicate key update b1 = 300; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from t1; +a1 b1 +1 300 +insert into t1 values(1, 1) on duplicate key update b1 = 400; +affected rows: 2 +info: +select * from t1; +a1 b1 +1 400 +insert into t1 select 1, 500 from t2 on duplicate key update b1 = 400; +affected rows: 0 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from t1; +a1 b1 +1 400 +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +insert into t1 select * from t2 on duplicate key update c = t2.b; +Error 1054 (42S22): Unknown column 'c' in 'field list' +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +insert into t1 select * from t2 on duplicate key update a = b; +Error 1052 (23000): Column 'b' in field list is ambiguous +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +insert into t1 select * from t2 on duplicate key update c = b; +Error 1054 (42S22): Unknown column 'c' in 'field list' +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +insert into t1 select * from t2 on duplicate key update a1 = values(b2); +Error 1054 (42S22): Unknown column 'b2' in 'field list' +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +insert into t1 values(1, 100); +affected rows: 1 +info: +insert into t2 values(1, 200); +affected rows: 1 +info: +insert into t1 select * from t2 on duplicate key update b1 = values(b1) + b2; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from t1; +a1 b1 +1 400 +insert into t1 select * from t2 on duplicate key update b1 = values(b1) + b2; +affected rows: 0 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from t1; +a1 b1 +1 400 +drop table if exists t; +create table t(k1 bigint, k2 bigint, val bigint, primary key(k1, k2)); +insert into t (val, k1, k2) values (3, 1, 2); +affected rows: 1 +info: +select * from t; +k1 k2 val +1 2 3 +insert into t (val, k1, k2) select c, a, b from (select 1 as a, 2 as b, 4 as c) tmp on duplicate key update val = tmp.c; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from t; +k1 k2 val +1 2 4 +drop table if exists t; +create table t(k1 double, k2 double, v double, primary key(k1, k2)); +insert into t (v, k1, k2) select c, a, b from (select "3" c, "1" a, "2" b) tmp on duplicate key update v=c; +affected rows: 1 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from t; +k1 k2 v +1 2 3 +insert into t (v, k1, k2) select c, a, b from (select "3" c, "1" a, "2" b) tmp on duplicate key update v=c; +affected rows: 0 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from t; +k1 k2 v +1 2 3 +drop table if exists t1, t2; +create table t1(id int, a int, b int); +insert into t1 values (1, 1, 1); +affected rows: 1 +info: +insert into t1 values (2, 2, 1); +affected rows: 1 +info: +insert into t1 values (3, 3, 1); +affected rows: 1 +info: +create table t2(a int primary key, b int, unique(b)); +insert into t2 select a, b from t1 order by id on duplicate key update a=t1.a, b=t1.b; +affected rows: 5 +info: Records: 3 Duplicates: 2 Warnings: 0 +select * from t2 order by a; +a b +3 1 +drop table if exists t1, t2; +create table t1(id int, a int, b int); +insert into t1 values (1, 1, 1); +affected rows: 1 +info: +insert into t1 values (2, 1, 2); +affected rows: 1 +info: +insert into t1 values (3, 3, 1); +affected rows: 1 +info: +create table t2(a int primary key, b int, unique(b)); +insert into t2 select a, b from t1 order by id on duplicate key update a=t1.a, b=t1.b; +affected rows: 4 +info: Records: 3 Duplicates: 1 Warnings: 0 +select * from t2 order by a; +a b +1 2 +3 1 +drop table if exists t1, t2; +create table t1(id int, a int, b int, c int); +insert into t1 values (1, 1, 1, 1); +affected rows: 1 +info: +insert into t1 values (2, 2, 1, 2); +affected rows: 1 +info: +insert into t1 values (3, 3, 2, 2); +affected rows: 1 +info: +insert into t1 values (4, 4, 2, 2); +affected rows: 1 +info: +create table t2(a int primary key, b int, c int, unique(b), unique(c)); +insert into t2 select a, b, c from t1 order by id on duplicate key update b=t2.b, c=t2.c; +affected rows: 2 +info: Records: 4 Duplicates: 0 Warnings: 0 +select * from t2 order by a; +a b c +1 1 1 +3 2 2 +drop table if exists t1; +create table t1(a int primary key, b int); +insert into t1 values(1,1),(2,2),(3,3),(4,4),(5,5); +affected rows: 5 +info: Records: 5 Duplicates: 0 Warnings: 0 +insert into t1 values(4,14),(5,15),(6,16),(7,17),(8,18) on duplicate key update b=b+10; +affected rows: 7 +info: Records: 5 Duplicates: 2 Warnings: 0 +drop table if exists a, b; +create table a(x int primary key); +create table b(x int, y int); +insert into a values(1); +affected rows: 1 +info: +insert into b values(1, 2); +affected rows: 1 +info: +insert into a select x from b ON DUPLICATE KEY UPDATE a.x=b.y; +affected rows: 2 +info: Records: 1 Duplicates: 1 Warnings: 0 +select * from a; +x +2 +## Test issue 28078. +## Use different types of columns so that there's likely to be error if the types mismatches. +drop table if exists a, b; +create table a(id int, a1 timestamp, a2 varchar(10), a3 float, unique(id)); +create table b(id int, b1 time, b2 varchar(10), b3 int); +insert into a values (1, '2022-01-04 07:02:04', 'a', 1.1), (2, '2022-01-04 07:02:05', 'b', 2.2); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +insert into b values (2, '12:34:56', 'c', 10), (3, '01:23:45', 'd', 20); +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +insert into a (id) select id from b on duplicate key update a.a2 = b.b2, a.a3 = 3.3; +affected rows: 3 +info: Records: 2 Duplicates: 1 Warnings: 0 +select * from a; +id a1 a2 a3 +1 2022-01-04 07:02:04 a 1.1 +2 2022-01-04 07:02:05 c 3.3 +3 NULL NULL NULL +insert into a (id) select 4 from b where b3 = 20 on duplicate key update a.a3 = b.b3; +affected rows: 1 +info: Records: 1 Duplicates: 0 Warnings: 0 +select * from a; +id a1 a2 a3 +1 2022-01-04 07:02:04 a 1.1 +2 2022-01-04 07:02:05 c 3.3 +3 NULL NULL NULL +4 NULL NULL NULL +insert into a (a2, a3) select 'x', 1.2 from b on duplicate key update a.a2 = b.b3; +affected rows: 2 +info: Records: 2 Duplicates: 0 Warnings: 0 +select * from a; +id a1 a2 a3 +1 2022-01-04 07:02:04 a 1.1 +2 2022-01-04 07:02:05 c 3.3 +3 NULL NULL NULL +4 NULL NULL NULL +NULL NULL x 1.2 +NULL NULL x 1.2 +## reproduce insert on duplicate key update bug under new row format. +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1 use index(primary); +c1 +1.0000 +drop table if exists t; +create table t (d int); +insert into t values (cast('18446744073709551616' as unsigned)); +Error 1690 (22003): BIGINT UNSIGNED value is out of range in '18446744073709551616' +set sql_mode=''; +insert into t values (cast('18446744073709551616' as unsigned)); +Level Code Message +Warning 1264 Out of range value for column 'd' at row 1 +Warning 1292 Truncated incorrect INTEGER value: '18446744073709551616' +set sql_mode=DEFAULT; +drop table if exists parent, child; +create table parent (id int primary key, ref int, key(ref)); +create table child (id int primary key, ref int, foreign key (ref) references parent(ref)); +insert into parent values (1, 1), (2, 2); +insert into child values (1, 1); +insert into child values (1, 2) on duplicate key update ref = 2; +insert into child values (1, 3) on duplicate key update ref = 3; +Error 1452 (23000): Cannot add or update a child row: a foreign key constraint fails (`executor__insert`.`child`, CONSTRAINT `fk_1` FOREIGN KEY (`ref`) REFERENCES `parent` (`ref`)) +insert ignore into child values (1, 3) on duplicate key update ref = 3; +Level Code Message +Warning 1452 Cannot add or update a child row: a foreign key constraint fails (`executor__insert`.`child`, CONSTRAINT `fk_1` FOREIGN KEY (`ref`) REFERENCES `parent` (`ref`)) +insert into parent values (2, 3) on duplicate key update ref = 3; +Error 1451 (23000): Cannot delete or update a parent row: a foreign key constraint fails (`executor__insert`.`child`, CONSTRAINT `fk_1` FOREIGN KEY (`ref`) REFERENCES `parent` (`ref`)) +insert ignore into parent values (2, 3) on duplicate key update ref = 3; +drop table if exists t1, t2; +create table t1 (id int primary key, col1 varchar(10) not null default ''); +create table t2 (id int primary key, col1 varchar(10)); +insert into t2 values (1, null); +insert ignore into t1 values(5, null); +set session sql_mode = ''; +insert into t1 values(1, null); +Error 1048 (23000): Column 'col1' cannot be null +insert into t1 set id = 1, col1 = null; +Error 1048 (23000): Column 'col1' cannot be null +insert t1 VALUES (5, 5) ON DUPLICATE KEY UPDATE col1 = null; +Error 1048 (23000): Column 'col1' cannot be null +insert t1 VALUES (5, 5), (6, null) ON DUPLICATE KEY UPDATE col1 = null; +select * from t1; +id col1 +5 +6 +insert into t1 select * from t2; +show warnings; +Level Code Message +Warning 1048 Column 'col1' cannot be null +insert into t1 values(2, null), (3, 3), (4, 4); +show warnings; +Level Code Message +Warning 1048 Column 'col1' cannot be null +update t1 set col1 = null where id = 3; +show warnings; +Level Code Message +Warning 1048 Column 'col1' cannot be null +insert ignore t1 VALUES (4, 4) ON DUPLICATE KEY UPDATE col1 = null; +select * from t1; +id col1 +1 +2 +3 +4 +5 +6 diff --git a/tests/integrationtest/t/executor/executor.test b/tests/integrationtest/t/executor/executor.test new file mode 100644 index 0000000000000..fae9c4c4b9b2a --- /dev/null +++ b/tests/integrationtest/t/executor/executor.test @@ -0,0 +1,2713 @@ +# TestSelectWithoutFrom +select 1 + 2*3; +select _utf8"string"; +select 1 order by 1; +SELECT 'a' as f1 having f1 = 'a'; +SELECT (SELECT * FROM (SELECT 'a') t) AS f1 HAVING (f1 = 'a' OR TRUE); +SELECT (SELECT * FROM (SELECT 'a') t) + 1 AS f1 HAVING (f1 = 'a' OR TRUE); + +# TestOrderBy +create table t (c1 int, c2 int, c3 varchar(20)); +insert into t values (1, 2, 'abc'), (2, 1, 'bcd'); +## Fix issue https://github.com/pingcap/tidb/issues/337 +select c1 as a, c1 as b from t order by c1; +select c1 as a, t.c1 as a from t order by a desc; +select c1 as c2 from t order by c2; +select sum(c1) from t order by sum(c1); +select c1 as c2 from t order by c2 + 1; +## Order by position. +select * from t order by 1; +select * from t order by 2; +## Order by binary. +select c1, c3 from t order by binary c1 desc; +select c1, c2 from t order by binary c3; + +# TestNeighbouringProj +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 value(1, 1), (2, 2); +insert into t2 value(1, 1), (2, 2); +select sum(c) from (select t1.a as a, t1.a as c, length(t1.b) from t1 union select a, b, b from t2) t; +drop table if exists t; +create table t(a bigint, b bigint, c bigint); +insert into t values(1, 1, 1), (2, 2, 2), (3, 3, 3); +select cast(count(a) as signed), a as another, a from t group by a order by cast(count(a) as signed), a limit 10; + +# TestIndexReverseOrder +drop table if exists t; +create table t (a int primary key auto_increment, b int, index idx (b)); +insert t (b) values (0), (1), (2), (3), (4), (5), (6), (7), (8), (9); +select b from t order by b desc; +select b from t where b <3 or (b >=6 and b < 8) order by b desc; +drop table if exists t; +create table t (a int, b int, index idx (b, a)); +insert t values (0, 2), (1, 2), (2, 2), (0, 1), (1, 1), (2, 1), (0, 0), (1, 0), (2, 0); +select b, a from t order by b, a desc; + +# TestTableReverseOrder +drop table if exists t; +create table t (a int primary key auto_increment, b int); +insert t (b) values (1), (2), (3), (4), (5), (6), (7), (8), (9); +select b from t order by a desc; +select a from t where a <3 or (a >=6 and a < 8) order by a desc; + +# TestUnsignedPKColumn +drop table if exists t; +create table t (a int unsigned primary key, b int, c int, key idx_ba (b, c, a)); +insert t values (1, 1, 1); +select * from t; +update t set c=2 where a=1; +select * from t where b=1; + +# TestMultiUpdate +CREATE TABLE test_mu (a int primary key, b int, c int); +INSERT INTO test_mu VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9); +INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE b = 3, c = b; +SELECT * FROM test_mu ORDER BY a; +INSERT INTO test_mu VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE c = 2, b = c+5; +SELECT * FROM test_mu ORDER BY a; +UPDATE test_mu SET b = 0, c = b WHERE a = 4; +SELECT * FROM test_mu ORDER BY a; +UPDATE test_mu SET c = 8, b = c WHERE a = 4; +SELECT * FROM test_mu ORDER BY a; +UPDATE test_mu SET c = b, b = c WHERE a = 7; +SELECT * FROM test_mu ORDER BY a; + +# TestGeneratedColumnPointGet +drop table if exists tu; +CREATE TABLE tu(a int, b int, c int GENERATED ALWAYS AS (a + b) VIRTUAL, d int as (a * b) stored, e int GENERATED ALWAYS as (b * 2) VIRTUAL, PRIMARY KEY (a), UNIQUE KEY ukc (c), unique key ukd(d), key ke(e)); +insert into tu(a, b) values(1, 2); +insert into tu(a, b) values(5, 6); +select * from tu for update; +select * from tu where a = 1; +select * from tu where a in (1, 2); +select * from tu where c in (1, 2, 3); +select * from tu where c = 3; +select d, e from tu where c = 3; +select * from tu where d in (1, 2, 3); +select * from tu where d = 2; +select c, d from tu where d = 2; +select d, e from tu where e = 4; +select * from tu where e = 4; +update tu set a = a + 1, b = b + 1 where c = 11; +select * from tu for update; +select * from tu where a = 6; +select * from tu where c in (5, 6, 13); +select b, c, e, d from tu where c = 13; +select a, e, d from tu where c in (5, 6, 13); +drop table if exists tu; + +# TestUnionAutoSignedCast +drop table if exists t1,t2; +create table t1 (id int, i int, b bigint, d double, dd decimal); +create table t2 (id int, i int unsigned, b bigint unsigned, d double unsigned, dd decimal unsigned); +insert into t1 values(1, -1, -1, -1.1, -1); +insert into t2 values(2, 1, 1, 1.1, 1); +select * from t1 union select * from t2 order by id; +select id, i, b, d, dd from t2 union select id, i, b, d, dd from t1 order by id; +select id, i from t2 union select id, cast(i as unsigned int) from t1 order by id; +select dd from t2 union all select dd from t2; +drop table if exists t3,t4; +create table t3 (id int, v int); +create table t4 (id int, v double unsigned); +insert into t3 values (1, -1); +insert into t4 values (2, 1); +select id, v from t3 union select id, v from t4 order by id; +select id, v from t4 union select id, v from t3 order by id; +drop table if exists t5,t6,t7; +create table t5 (id int, v bigint unsigned); +create table t6 (id int, v decimal); +create table t7 (id int, v bigint); +insert into t5 values (1, 1); +insert into t6 values (2, -1); +insert into t7 values (3, -1); +select id, v from t5 union select id, v from t6 order by id; +select id, v from t5 union select id, v from t7 union select id, v from t6 order by id; + +# TestDeletePartition +drop table if exists t1; +create table t1 (a int) partition by range (a) ( + partition p0 values less than (10), + partition p1 values less than (20), + partition p2 values less than (30), + partition p3 values less than (40), + partition p4 values less than MAXVALUE + ); +insert into t1 values (1),(11),(21),(31); +delete from t1 partition (p4); +select * from t1 order by a; +delete from t1 partition (p0) where a > 10; +select * from t1 order by a; +delete from t1 partition (p0,p1,p2); +select * from t1; + +# TestAlterTableComment +drop table if exists t_1; +create table t_1 (c1 int, c2 int, c3 int default 1, index (c1)) comment = 'test table'; +alter table `t_1` comment 'this is table comment'; +select table_comment from information_schema.tables where table_name = 't_1'; +alter table `t_1` comment 'table t comment'; +select table_comment from information_schema.tables where table_name = 't_1'; + +# TestExecutorEnum +drop table if exists t; +create table t (c enum('a', 'b', 'c')); +insert into t values ('a'), (2), ('c'); +select * from t where c = 'a'; +select c + 1 from t where c = 2; +delete from t; +insert into t values (); +insert into t values (null), ('1'); +select c + 1 from t where c = 1; +delete from t; +insert into t values(1), (2), (3); +select * from t where c; + +# TestExecutorSet +drop table if exists t; +create table t (c set('a', 'b', 'c')); +insert into t values ('a'), (2), ('c'), ('a,b'), ('b,a'); +select * from t where c = 'a'; +select * from t where c = 'a,b'; +select c + 1 from t where c = 2; +delete from t; +insert into t values (); +insert into t values (null), ('1'); +select c + 1 from t where c = 1; +delete from t; +insert into t values(3); +select * from t where c; + +# TestSubQueryInValues +drop table if exists t; +create table t (id int, name varchar(20)); +drop table if exists t1; +create table t1 (gid int); +insert into t1 (gid) value (1); +insert into t (id, name) value ((select gid from t1) ,'asd'); +select * from t; + +# TestEnhancedRangeAccess +drop table if exists t; +create table t (a int primary key, b int); +insert into t values(1, 2), (2, 1); +select * from t where (a = 1 and b = 2) or (a = 2 and b = 1); +select * from t where (a = 1 and b = 1) or (a = 2 and b = 2); + +# TestTableScanWithPointRanges +drop table if exists t; +create table t(id int, PRIMARY KEY (id)); +insert into t values(1), (5), (10); +select * from t where id in(1, 2, 10); + +# TestCheckTable +drop table if exists admin_test; +create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2)); +insert admin_test (c1, c2) values (1, 1), (2, 2), (NULL, NULL); +admin check table admin_test; + +# TestExecutorLimit +drop table if exists t; +create table t(a bigint, b bigint); +insert into t values(1, 1), (2, 2), (3, 30), (4, 40), (5, 5), (6, 6); +select * from t order by a limit 1, 1; +select * from t order by a limit 1, 2; +select * from t order by a limit 1, 3; +select * from t order by a limit 1, 4; +select a from t where a > 0 limit 1, 1; +select a from t where a > 0 limit 1, 2; +select b from t where a > 0 limit 1, 3; +select b from t where a > 0 limit 1, 4; +set @@tidb_init_chunk_size=2; +select * from t where a > 0 limit 2, 1; +select * from t where a > 0 limit 2, 2; +select * from t where a > 0 limit 2, 3; +select * from t where a > 0 limit 2, 4; +select a from t order by a limit 2, 1; +select b from t order by a limit 2, 2; +select a from t order by a limit 2, 3; +select b from t order by a limit 2, 4; +set @@tidb_init_chunk_size = default; + +# TestIndexScan +drop table if exists t; +create table t (a int unique); +insert t values (-1), (2), (3), (5), (6), (7), (8), (9); +select a from t where a < 0 or (a >= 2.1 and a < 5.1) or ( a > 5.9 and a <= 7.9) or a > '8.1'; +drop table if exists t; +create table t (a int unique); +insert t values (0); +select NULL from t ; +drop table if exists t; +create table t (a int unique, b int); +insert t values (5, 0); +insert t values (4, 0); +insert t values (3, 0); +insert t values (2, 0); +insert t values (1, 0); +insert t values (0, 0); +select * from t order by a limit 3; +drop table if exists t; +create table t (a int unique, b int); +insert t values (0, 1); +insert t values (1, 2); +insert t values (2, 1); +insert t values (3, 2); +insert t values (4, 1); +insert t values (5, 2); +select * from t where a < 5 and b = 1 limit 2; +drop table if exists tab1; +CREATE TABLE tab1(pk INTEGER PRIMARY KEY, col0 INTEGER, col1 FLOAT, col3 INTEGER, col4 FLOAT); +CREATE INDEX idx_tab1_0 on tab1 (col0); +CREATE INDEX idx_tab1_1 on tab1 (col1); +CREATE INDEX idx_tab1_3 on tab1 (col3); +CREATE INDEX idx_tab1_4 on tab1 (col4); +INSERT INTO tab1 VALUES(1,37,20.85,30,10.69); +SELECT pk FROM tab1 WHERE ((col3 <= 6 OR col3 < 29 AND (col0 < 41)) OR col3 > 42) AND col1 >= 96.1 AND col3 = 30 AND col3 > 17 AND (col0 BETWEEN 36 AND 42); +drop table if exists tab1; +CREATE TABLE tab1(pk INTEGER PRIMARY KEY, a INTEGER, b INTEGER); +CREATE INDEX idx_tab1_0 on tab1 (a); +INSERT INTO tab1 VALUES(1,1,1); +INSERT INTO tab1 VALUES(2,2,1); +INSERT INTO tab1 VALUES(3,1,2); +INSERT INTO tab1 VALUES(4,2,2); +SELECT * FROM tab1 WHERE pk <= 3 AND a = 1; +SELECT * FROM tab1 WHERE pk <= 4 AND a = 1 AND b = 2; +CREATE INDEX idx_tab1_1 on tab1 (b, a); +SELECT pk FROM tab1 WHERE b > 1; +drop table if exists t; +CREATE TABLE t (a varchar(3), index(a)); +insert t values('aaa'), ('aab'); +select * from t where a >= 'aaaa' and a < 'aabb'; +drop table if exists t; +CREATE TABLE t (a int primary key, b int, c int, index(c)); +insert t values(1, 1, 1), (2, 2, 2), (4, 4, 4), (3, 3, 3), (5, 5, 5); +select a from t where c >= 2 order by b desc limit 1; +drop table if exists t; +create table t(a varchar(50) primary key, b int, c int, index idx(b)); +insert into t values('aa', 1, 1); +select * from t use index(idx) where a > 'a'; +drop table if exists t; +CREATE TABLE `t` (a int, KEY (a)); +SELECT * FROM (SELECT * FROM (SELECT a as d FROM t WHERE a IN ('100')) AS x WHERE x.d < "123" ) tmp_count; + +# TestUpdateJoin +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +drop table if exists t4; +drop table if exists t5; +create table t1(k int, v int); +create table t2(k int, v int); +create table t3(id int auto_increment, k int, v int, primary key(id)); +create table t4(k int, v int); +create table t5(v int, k int, primary key(k)); +insert into t1 values (1, 1); +insert into t4 values (3, 3); +drop table if exists t6; +drop table if exists t7; +create table t6 (id int, v longtext); +create table t7 (x int, id int, v longtext, primary key(id)); +update t1 set v = 0 where k = 1; +select k, v from t1 where k = 1; +update t1 left join t3 on t1.k = t3.k set t1.v = 1; +select k, v from t1; +select id, k, v from t3; +update t1 left join t2 on t1.k = t2.k set t1.v = t2.v, t2.v = 3; +select k, v from t1; +select k, v from t2; +update t1 left join t2 on t1.k = t2.k set t2.v = 3, t1.v = t2.v; +select k, v from t1; +select k, v from t2; +update t2 right join t1 on t2.k = t1.k set t2.v = 4, t1.v = 0; +select k, v from t1; +select k, v from t2; +update t1 left join t2 on t1.k = t2.k right join t4 on t4.k = t2.k set t1.v = 4, t2.v = 4, t4.v = 4; +select k, v from t1; +select k, v from t2; +select k, v from t4; +insert t2 values (1, 10); +update t1 left join t2 on t1.k = t2.k set t2.v = 11; +select k, v from t2; +update t1 t11 left join t2 on t11.k = t2.k left join t1 t12 on t2.v = t12.k set t12.v = 233, t11.v = 111; +select k, v from t1; +select k, v from t2; +delete from t1; +delete from t2; +insert into t1 values (null, null); +update t1 left join t2 on t1.k = t2.k set t1.v = 1; +select k, v from t1; +insert t5 values(0, 0); +update t1 left join t5 on t1.k = t5.k set t1.v = 2; +select k, v from t1; +select k, v from t5; +insert into t6 values (1, NULL); +insert into t7 values (5, 1, 'a'); +update t6, t7 set t6.v = t7.v where t6.id = t7.id and t7.x = 5; +select v from t6; +drop table if exists t1, t2; +create table t1(id int primary key, v int, gv int GENERATED ALWAYS AS (v * 2) STORED); +create table t2(id int, v int); +update t1 tt1 inner join (select count(t1.id) a, t1.id from t1 left join t2 on t1.id = t2.id group by t1.id) x on tt1.id = x.id set tt1.v = tt1.v + x.a; + +# TestScanControlSelection +drop table if exists t; +create table t(a int primary key, b int, c int, index idx_b(b)); +insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3); +select (select count(1) k from t s where s.b = t1.c) from t t1; + +# TestSimpleDAG +drop table if exists t; +create table t(a int primary key, b int, c int); +insert into t values (1, 1, 1), (2, 1, 1), (3, 1, 2), (4, 2, 3); +select a from t; +select * from t where a = 4; +select a from t limit 1; +select a from t order by a desc; +select a from t order by a desc limit 1; +select a from t order by b desc limit 1; +select a from t where a < 3; +select a from t where b > 1; +select a from t where b > 1 and a < 3; +select count(*) from t where b > 1 and a < 3; +select count(*) from t; +select count(*), c from t group by c order by c; +select sum(c) as s from t group by b order by s; +select avg(a) as s from t group by b order by s; +select sum(distinct c) from t group by b; +create index i on t(c,b); +select a from t where c = 1; +select a from t where c = 1 and a < 2; +select a from t where c = 1 order by a limit 1; +select count(*) from t where c = 1 ; +create index i1 on t(b); +select c from t where b = 2; +select * from t where b = 2; +select count(*) from t where b = 1; +select * from t where b = 1 and a > 1 limit 1; +drop table if exists t; +create table t (id int, c1 datetime); +insert into t values (1, '2015-06-07 12:12:12'); +select id from t where c1 = '2015-06-07 12:12:12'; +drop table if exists t0; +CREATE TABLE t0(c0 INT); +INSERT INTO t0 VALUES (100000); +SELECT * FROM t0 WHERE NOT SPACE(t0.c0); + +# TestAlterDefaultValue +drop table if exists t; +create table t(a int, primary key(a)); +insert into t(a) values(1); +alter table t add column b int default 1; +alter table t alter b set default 2; +select b from t where a = 1; + +# TestGenerateColumnReplace +## For issue 17256 +drop table if exists t1; +create table t1 (a int, b int as (a + 1) virtual not null, unique index idx(b)); +REPLACE INTO `t1` (`a`) VALUES (2); +REPLACE INTO `t1` (`a`) VALUES (2); +select * from t1; +insert into `t1` (`a`) VALUES (2) on duplicate key update a = 3; +select * from t1; + +# TestIssue19372 +drop table if exists t1; +create table t1 (c_int int, c_str varchar(40), key(c_str)); +drop table if exists t2; +create table t2 like t1; +insert into t1 values (1, 'a'), (2, 'b'), (3, 'c'); +insert into t2 select * from t1; +select (select t2.c_str from t2 where t2.c_str <= t1.c_str and t2.c_int in (1, 2) order by t2.c_str limit 1) x from t1 order by c_int; + +# TestDeleteWithMulTbl +## Delete multiple tables from left joined table. +## The result of left join is (3, null, null). +## Because rows in t2 are not matched, so no row will be deleted in t2. +## But row in t1 is matched, so it should be deleted. +drop table if exists t1, t2; +create table t1 (c1 int); +create table t2 (c1 int primary key, c2 int); +insert into t1 values(3); +insert into t2 values(2, 2); +insert into t2 values(0, 0); +delete from t1, t2 using t1 left join t2 on t1.c1 = t2.c2; +select * from t1 order by c1; +select * from t2 order by c1; +## Rows in both t1 and t2 are matched, so will be deleted even if it's null. +## NOTE: The null values are not generated by join. +drop table if exists t1, t2; +create table t1 (c1 int); +create table t2 (c2 int); +insert into t1 values(null); +insert into t2 values(null); +delete from t1, t2 using t1 join t2 where t1.c1 is null; +select * from t1; +select * from t2; + +# TestIssue13758 +drop table if exists t1, t2; +create table t1 (pk int(11) primary key, a int(11) not null, b int(11), key idx_b(b), key idx_a(a)); +insert into `t1` values (1,1,0),(2,7,6),(3,2,null),(4,1,null),(5,4,5); +create table t2 (a int); +insert into t2 values (1),(null); +select (select a from t1 use index(idx_a) where b >= t2.a order by a limit 1) as field from t2; + +# TestIssue20237 +drop table if exists t, s; +create table t(a date, b float); +create table s(b float); +insert into t values(NULL,-37), ("2011-11-04",105), ("2013-03-02",-22), ("2006-07-02",-56), (NULL,124), (NULL,111), ("2018-03-03",-5); +insert into s values(-37),(105),(-22),(-56),(124),(105),(111),(-5); +select count(distinct t.a, t.b) from t join s on t.b= s.b; + +# TestToPBExpr +drop table if exists t; +create table t (a decimal(10,6), b decimal, index idx_b (b)); +set sql_mode = ''; +insert t values (1.1, 1.1); +insert t values (2.4, 2.4); +insert t values (3.3, 2.7); +select * from t where a < 2.399999; +select * from t where a > 1.5; +select * from t where a <= 1.1; +select * from t where b >= 3; +select * from t where not (b = 1); +select * from t where b&1 = a|1; +select * from t where b != 2 and b <=> 3; +select * from t where b in (3); +select * from t where b not in (1, 2); +drop table if exists t; +create table t (a varchar(255), b int); +insert t values ('abc123', 1); +insert t values ('ab123', 2); +select * from t where a like 'ab%'; +select * from t where a like 'ab_12'; +drop table if exists t; +create table t (a int primary key); +insert t values (1); +insert t values (2); +select * from t where not (a = 1); +select * from t where not(not (a = 1)); +select * from t where not(a != 1 and a != 2); +set @@sql_mode = default; + +# TestDatumXAPI +drop table if exists t; +create table t (a decimal(10,6), b decimal, index idx_b (b)); +set sql_mode = ''; +insert t values (1.1, 1.1); +insert t values (2.2, 2.2); +insert t values (3.3, 2.7); +select * from t where a > 1.5; +select * from t where b > 1.5; +drop table if exists t; +create table t (a time(3), b time, index idx_a (a)); +insert t values ('11:11:11', '11:11:11'); +insert t values ('11:11:12', '11:11:12'); +insert t values ('11:11:13', '11:11:13'); +select * from t where a > '11:11:11.5'; +select * from t where b > '11:11:11.5'; +set @@sql_mode = default; + +# TestTableDual +Select 1; +Select 1 from dual; +Select count(*) from dual; +Select 1 from dual where 1; +drop table if exists t; +create table t(a int primary key); +select t1.* from t t1, t t2 where t1.a=t2.a and 1=0; + +# TestRow +drop table if exists t; +create table t (c int, d int); +insert t values (1, 1); +insert t values (1, 3); +insert t values (2, 1); +insert t values (2, 3); +select * from t where (c, d) < (2,2); +select * from t where (1,2,3) > (3,2,1); +select * from t where row(1,2,3) > (3,2,1); +select * from t where (c, d) = (select * from t where (c,d) = (1,1)); +select * from t where (c, d) = (select * from t k where (t.c,t.d) = (c,d)); +select (1, 2, 3) < (2, 3, 4); +select (2, 3, 4) <= (2, 3, 3); +select (2, 3, 4) <= (2, 3, 4); +select (2, 3, 4) <= (2, 1, 4); +select (2, 3, 4) >= (2, 3, 4); +select (2, 3, 4) = (2, 3, 4); +select (2, 3, 4) != (2, 3, 4); +select row(1, 1) in (row(1, 1)); +select row(1, 0) in (row(1, 1)); +select row(1, 1) in (select 1, 1); +select row(1, 1) > row(1, 0); +select row(1, 1) > (select 1, 0); +select 1 > (select 1); +select (select 1); +drop table if exists t1; +create table t1 (a int, b int); +insert t1 values (1,2),(1,null); +drop table if exists t2; +create table t2 (c int, d int); +insert t2 values (0,0); +select * from t2 where (1,2) in (select * from t1); +select * from t2 where (1,2) not in (select * from t1); +select * from t2 where (1,1) not in (select * from t1); +select * from t2 where (1,null) in (select * from t1); +select * from t2 where (null,null) in (select * from t1); +delete from t1 where a=1 and b=2; +select (1,1) in (select * from t2) from t1; +select (1,1) not in (select * from t2) from t1; +select (1,1) in (select 1,1 from t2) from t1; +select (1,1) not in (select 1,1 from t2) from t1; +## MySQL 5.7 returns 1 for these 2 queries, which is wrong. +select (1,null) not in (select 1,1 from t2) from t1; +select (t1.a,null) not in (select 1,1 from t2) from t1; +select (1,null) in (select * from t1); +select (1,null) not in (select * from t1); + +# TestStrToDateBuiltin +select str_to_date('20190101','%Y%m%d%!') from dual; +select str_to_date('20190101','%Y%m%d%f') from dual; +select str_to_date('20190101','%Y%m%d%H%i%s') from dual; +select str_to_date('18/10/22','%y/%m/%d') from dual; +select str_to_date('a18/10/22','%y/%m/%d') from dual; +select str_to_date('69/10/22','%y/%m/%d') from dual; +select str_to_date('70/10/22','%y/%m/%d') from dual; +select str_to_date('8/10/22','%y/%m/%d') from dual; +select str_to_date('8/10/22','%Y/%m/%d') from dual; +select str_to_date('18/10/22','%Y/%m/%d') from dual; +select str_to_date('a18/10/22','%Y/%m/%d') from dual; +select str_to_date('69/10/22','%Y/%m/%d') from dual; +select str_to_date('70/10/22','%Y/%m/%d') from dual; +select str_to_date('018/10/22','%Y/%m/%d') from dual; +select str_to_date('2018/10/22','%Y/%m/%d') from dual; +select str_to_date('018/10/22','%y/%m/%d') from dual; +select str_to_date('18/10/22','%y0/%m/%d') from dual; +select str_to_date('18/10/22','%Y0/%m/%d') from dual; +select str_to_date('18a/10/22','%y/%m/%d') from dual; +select str_to_date('18a/10/22','%Y/%m/%d') from dual; +select str_to_date('20188/10/22','%Y/%m/%d') from dual; +select str_to_date('2018510522','%Y5%m5%d') from dual; +select str_to_date('2018^10^22','%Y^%m^%d') from dual; +select str_to_date('2018@10@22','%Y@%m@%d') from dual; +select str_to_date('2018%10%22','%Y%%m%%d') from dual; +select str_to_date('2018(10(22','%Y(%m(%d') from dual; +select str_to_date('2018\10\22','%Y\%m\%d') from dual; +select str_to_date('2018=10=22','%Y=%m=%d') from dual; +select str_to_date('2018+10+22','%Y+%m+%d') from dual; +select str_to_date('2018_10_22','%Y_%m_%d') from dual; +select str_to_date('69510522','%y5%m5%d') from dual; +select str_to_date('69^10^22','%y^%m^%d') from dual; +select str_to_date('18@10@22','%y@%m@%d') from dual; +select str_to_date('18%10%22','%y%%m%%d') from dual; +select str_to_date('18(10(22','%y(%m(%d') from dual; +select str_to_date('18\10\22','%y\%m\%d') from dual; +select str_to_date('18+10+22','%y+%m+%d') from dual; +select str_to_date('18=10=22','%y=%m=%d') from dual; +select str_to_date('18_10_22','%y_%m_%d') from dual; +SELECT STR_TO_DATE('2020-07-04 11:22:33 PM', '%Y-%m-%d %r'); +SELECT STR_TO_DATE('2020-07-04 12:22:33 AM', '%Y-%m-%d %r'); +SELECT STR_TO_DATE('2020-07-04 12:22:33', '%Y-%m-%d %T'); +SELECT STR_TO_DATE('2020-07-04 00:22:33', '%Y-%m-%d %T'); + +# TestReadPartitionedTable +drop table if exists pt; +create table pt (a int, b int, index i_b(b)) partition by range (a) (partition p1 values less than (2), partition p2 values less than (4), partition p3 values less than (6)); +insert into pt values(0, 0); +insert into pt values(1, 1); +insert into pt values(2, 2); +insert into pt values(3, 3); +insert into pt values(4, 4); +insert into pt values(5, 5); +## Table reader +select * from pt order by a; +## Index reader +select b from pt where b = 3; +## Index lookup +select a from pt where b = 3; + +# TestIssue10435 +drop table if exists t1; +create table t1(i int, j int, k int); +insert into t1 VALUES (1,1,1),(2,2,2),(3,3,3),(4,4,4); +INSERT INTO t1 SELECT 10*i,j,5*j FROM t1 UNION SELECT 20*i,j,5*j FROM t1 UNION SELECT 30*i,j,5*j FROM t1; +set @@session.tidb_enable_window_function=1; +SELECT SUM(i) OVER W FROM t1 WINDOW w AS (PARTITION BY j ORDER BY i) ORDER BY 1+SUM(i) OVER w; +set @@session.tidb_enable_window_function=default; + +# TestIndexJoinTableDualPanic +drop table if exists a; +create table a (f1 int, f2 varchar(32), primary key (f1)); +insert into a (f1,f2) values (1,'a'), (2,'b'), (3,'c'); +## TODO here: index join cause the data race of txn. +select /*+ inl_merge_join(a) */ a.* from a inner join (select 1 as k1,'k2-1' as k2) as k on a.f1=k.k1; + +# TestSortLeftJoinWithNullColumnInRightChildPanic +drop table if exists t1, t2; +create table t1(a int); +create table t2(a int); +insert into t1(a) select 1; +select b.n from t1 left join (select a as a, null as n from t2) b on b.a = t1.a order by t1.a; + +# TestIssue39211 +drop table if exists t; +drop table if exists s; +CREATE TABLE `t` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL); +CREATE TABLE `s` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL); +insert into t values(1,1),(2,2); +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into s values(3,3),(4,4),(1,null),(2,null),(null,null); +insert into s select * from s; +insert into s select * from s; +insert into s select * from s; +insert into s select * from s; +insert into s select * from s; +set @@tidb_max_chunk_size=32; +set @@tidb_enable_null_aware_anti_join=true; +select * from t where (a,b) not in (select a, b from s); +set @@tidb_max_chunk_size=default; +set @@tidb_enable_null_aware_anti_join=default; + +# TestPessimisticSelectForUpdate +drop table if exists t; +create table t(id int primary key, a int); +insert into t values(1, 1); +begin PESSIMISTIC; +select a from t where id=1 for update; +update t set a=a+1 where id=1; +commit; +select a from t where id=1; + +# TestSelectLimit +drop table if exists select_limit; +create table select_limit(id int not null default 1, name varchar(255), PRIMARY KEY(id)); +insert INTO select_limit VALUES (1, "hello"); +insert into select_limit values (2, "hello"); +insert INTO select_limit VALUES (3, "hello"); +insert INTO select_limit VALUES (4, "hello"); +select * from select_limit limit 1; +select id from (select * from select_limit limit 1) k where id != 1; +select * from select_limit limit 18446744073709551615 offset 0; +select * from select_limit limit 18446744073709551615 offset 1; +select * from select_limit limit 18446744073709551615 offset 3; +--error 1064 +select * from select_limit limit 18446744073709551616 offset 3; + +# TestSelectOrderBy +drop table if exists select_order_test; +create table select_order_test(id int not null default 1, name varchar(255), PRIMARY KEY(id)); +insert INTO select_order_test VALUES (1, "hello"); +insert into select_order_test values (2, "hello"); +select * from select_order_test where id = 1 order by id limit 1 offset 0; +select id from select_order_test order by id desc limit 1 ; +select id from select_order_test order by id + 1 desc limit 1 ; +select * from select_order_test order by name, id limit 1 offset 0; +select id as c1, name from select_order_test order by 2, id limit 1 offset 0; +select * from select_order_test order by name, id limit 100 offset 0; +select * from select_order_test order by name, id limit 1 offset 100; +select id from select_order_test order by name, id limit 18446744073709551615; +select id, name from select_order_test where id = 1 group by id, name limit 1 offset 0; +insert INTO select_order_test VALUES (3, "zz"); +insert INTO select_order_test VALUES (4, "zz"); +insert INTO select_order_test VALUES (5, "zz"); +insert INTO select_order_test VALUES (6, "zz"); +insert INTO select_order_test VALUES (7, "zz"); +insert INTO select_order_test VALUES (8, "zz"); +insert INTO select_order_test VALUES (9, "zz"); +insert INTO select_order_test VALUES (10, "zz"); +insert INTO select_order_test VALUES (10086, "hi"); +insert INTO select_order_test VALUES (11, "hh"); +insert INTO select_order_test VALUES (12, "hh"); +insert INTO select_order_test VALUES (13, "hh"); +insert INTO select_order_test VALUES (14, "hh"); +insert INTO select_order_test VALUES (15, "hh"); +insert INTO select_order_test VALUES (16, "hh"); +insert INTO select_order_test VALUES (17, "hh"); +insert INTO select_order_test VALUES (18, "hh"); +insert INTO select_order_test VALUES (19, "hh"); +insert INTO select_order_test VALUES (20, "hh"); +insert INTO select_order_test VALUES (21, "zz"); +insert INTO select_order_test VALUES (22, "zz"); +insert INTO select_order_test VALUES (23, "zz"); +insert INTO select_order_test VALUES (24, "zz"); +insert INTO select_order_test VALUES (25, "zz"); +insert INTO select_order_test VALUES (26, "zz"); +insert INTO select_order_test VALUES (27, "zz"); +insert INTO select_order_test VALUES (28, "zz"); +insert INTO select_order_test VALUES (29, "zz"); +insert INTO select_order_test VALUES (30, "zz"); +insert INTO select_order_test VALUES (1501, "aa"); +select * from select_order_test order by name, id limit 1 offset 3; +drop table if exists select_order_test; +drop table if exists t; +create table t (c int, d int); +insert t values (1, 1); +insert t values (1, 2); +insert t values (1, 3); +select 1-d as d from t order by d; +select 1-d as d from t order by d + 1; +select t.d from t order by d; +drop table if exists t; +create table t (a int, b int, c int); +insert t values (1, 2, 3); +select b from (select a,b from t order by a,c) t; +select b from (select a,b from t order by a,c limit 1) t; +drop table if exists t; +create table t(a int, b int, index idx(a)); +insert into t values(1, 1), (2, 2); +select * from t where 1 order by b; +select * from t where a between 1 and 2 order by a desc; +drop table if exists t; +create table t(a int primary key, b int, c int, index idx(b)); +insert into t values(1, 3, 1); +insert into t values(2, 2, 2); +insert into t values(3, 1, 3); +select * from t use index(idx) order by a desc limit 1; +drop table if exists t; +create table t(a int, b int, key b (b)); +set @@tidb_index_lookup_size = 3; +insert into t values(0, 10); +insert into t values(1, 9); +insert into t values(2, 8); +insert into t values(3, 7); +insert into t values(4, 6); +insert into t values(5, 5); +insert into t values(6, 4); +insert into t values(7, 3); +insert into t values(8, 2); +insert into t values(9, 1); +select a from t use index(b) order by b; +set @@tidb_index_lookup_size = default; + +# TestSelectErrorRow +--error 1146 +select row(1, 1) from test; +--error 1146 +select * from test group by row(1, 1); +--error 1146 +select * from test order by row(1, 1); +--error 1146 +select * from test having row(1, 1); +--error 1146 +select (select 1, 1) from test; +--error 1146 +select * from test group by (select 1, 1); +--error 1146 +select * from test order by (select 1, 1); +--error 1146 +select * from test having (select 1, 1); + +# TestIn +drop table if exists t; +create table t (c1 int primary key, c2 int, key c (c2)); +insert t values(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29), (30, 30), (31, 31), (32, 32), (33, 33), (34, 34), (35, 35), (36, 36), (37, 37), (38, 38), (39, 39), (40, 40), (41, 41), (42, 42), (43, 43), (44, 44), (45, 45), (46, 46), (47, 47), (48, 48), (49, 49), (50, 50), (51, 51), (52, 52), (53, 53), (54, 54), (55, 55), (56, 56), (57, 57), (58, 58), (59, 59), (60, 60), (61, 61), (62, 62), (63, 63), (64, 64), (65, 65), (66, 66), (67, 67), (68, 68), (69, 69), (70, 70), (71, 71), (72, 72), (73, 73), (74, 74), (75, 75), (76, 76), (77, 77), (78, 78), (79, 79), (80, 80), (81, 81), (82, 82), (83, 83), (84, 84), (85, 85), (86, 86), (87, 87), (88, 88), (89, 89), (90, 90), (91, 91), (92, 92), (93, 93), (94, 94), (95, 95), (96, 96), (97, 97), (98, 98), (99, 99), (100, 100), (101, 101), (102, 102), (103, 103), (104, 104), (105, 105), (106, 106), (107, 107), (108, 108), (109, 109), (110, 110), (111, 111), (112, 112), (113, 113), (114, 114), (115, 115), (116, 116), (117, 117), (118, 118), (119, 119), (120, 120), (121, 121), (122, 122), (123, 123), (124, 124), (125, 125), (126, 126), (127, 127), (128, 128), (129, 129), (130, 130), (131, 131), (132, 132), (133, 133), (134, 134), (135, 135), (136, 136), (137, 137), (138, 138), (139, 139), (140, 140), (141, 141), (142, 142), (143, 143), (144, 144), (145, 145), (146, 146), (147, 147), (148, 148), (149, 149), (150, 150), (151, 151), (152, 152), (153, 153), (154, 154), (155, 155), (156, 156), (157, 157), (158, 158), (159, 159), (160, 160), (161, 161), (162, 162), (163, 163), (164, 164), (165, 165), (166, 166), (167, 167), (168, 168), (169, 169), (170, 170), (171, 171), (172, 172), (173, 173), (174, 174), (175, 175), (176, 176), (177, 177), (178, 178), (179, 179), (180, 180), (181, 181), (182, 182), (183, 183), (184, 184), (185, 185), (186, 186), (187, 187), (188, 188), (189, 189), (190, 190), (191, 191), (192, 192), (193, 193), (194, 194), (195, 195), (196, 196), (197, 197), (198, 198), (199, 199), (200, 200); +select c2 from t where c1 in ('7', '10', '112', '111', '98', '106', '100', '9', '18', '17') order by c2; +select c2 from t where c1 in ('7a'); + +# TestTablePKisHandleScan +drop table if exists t; +create table t (a int PRIMARY KEY AUTO_INCREMENT); +insert t values (),(); +insert t values (-100),(0); +select * from t; +select * from t where a = 1; +select * from t where a != 1; +select * from t where a >= '1.1'; +select * from t where a < '1.1'; +select * from t where a > '-100.1' and a < 2; +select * from t where a is null; +select * from t where a is true; +select * from t where a is false; +select * from t where a in (1, 2); +select * from t where a between 1 and 2; + +# TestDefaultNull +drop table if exists t; +create table t (a int primary key auto_increment, b int default 1, c int); +insert t values (); +select * from t; +update t set b = NULL where a = 1; +select * from t; +update t set c = 1; +select * from t ; +delete from t where a = 1; +insert t (a) values (1); +select * from t; + +# TestJSON +drop table if exists test_json; +create table test_json (id int, a json); +insert into test_json (id, a) values (1, '{"a":[1,"2",{"aa":"bb"},4],"b":true}'); +insert into test_json (id, a) values (2, "null"); +insert into test_json (id, a) values (3, null); +insert into test_json (id, a) values (4, 'true'); +insert into test_json (id, a) values (5, '3'); +insert into test_json (id, a) values (5, '4.0'); +insert into test_json (id, a) values (6, '"string"'); +select tj.a from test_json tj order by tj.id; +select json_type(a) from test_json tj order by tj.id; +select a from test_json tj where a = 3; +select a from test_json tj where a = 4.0; +select a from test_json tj where a = true; +select a from test_json tj where a = "string"; +select cast(true as JSON); +select cast(false as JSON); +select a->>'$.a[2].aa' as x, a->'$.b' as y from test_json having x is not null order by id; +select a->'$.a[2].aa' as x, a->>'$.b' as y from test_json having x is not null order by id; +-- error 1101 +create table test_bad_json(a json default '{}'); +-- error 1101 +create table test_bad_json(a blob default 'hello'); +-- error 1101 +create table test_bad_json(a text default 'world'); +-- error 3152 +create table test_bad_json(id int, a json, key (a)); +select CAST('3' AS JSON), CAST('{}' AS JSON), CAST(null AS JSON); +select a, count(1) from test_json group by a order by a; +drop table if exists test_json; +create table test_json ( a decimal(60,2) as (JSON_EXTRACT(b,'$.c')), b json ); +insert into test_json (b) values + ('{"c": "1267.1"}'), + ('{"c": "1267.01"}'), + ('{"c": "1267.1234"}'), + ('{"c": "1267.3456"}'), + ('{"c": "1234567890123456789012345678901234567890123456789012345"}'), + ('{"c": "1234567890123456789012345678901234567890123456789012345.12345"}'); +select a from test_json; + +# TestGeneratedColumnWrite +drop table if exists test_gc_write, test_gc_write_1; +-- error 3109 +CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (a+8) virtual); +CREATE TABLE test_gc_write (a int primary key auto_increment, b int, c int as (b+8) virtual); +CREATE TABLE test_gc_write_1 (a int primary key, b int, c int); +-- error 3105 +insert into test_gc_write (a, b, c) values (1, 1, 1); +-- error 3105 +insert into test_gc_write values (1, 1, 1); +-- error 3105 +insert into test_gc_write select 1, 1, 1; +-- error 3105 +insert into test_gc_write (a, b) values (1, 1) on duplicate key update c = 1; +-- error 3105 +insert into test_gc_write set a = 1, b = 1, c = 1; +-- error 3105 +update test_gc_write set c = 1; +-- error 3105 +update test_gc_write, test_gc_write_1 set test_gc_write.c = 1; +insert into test_gc_write (a, b) values (1, 1); +insert into test_gc_write set a = 2, b = 2; +insert into test_gc_write (b) select c from test_gc_write; +update test_gc_write set b = 2 where a = 2; +update test_gc_write t1, test_gc_write_1 t2 set t1.b = 3, t2.b = 4; +-- error 1136 +insert into test_gc_write values (1, 1); +-- error 1136 +insert into test_gc_write select 1, 1; +-- error 1136 +insert into test_gc_write (c) select a, b from test_gc_write; +-- error 3105 +insert into test_gc_write (b, c) select a, b from test_gc_write; + +# TestGeneratedColumnRead +drop table if exists test_gc_read; +CREATE TABLE test_gc_read(a int primary key, b int, c int as (a+b), d int as (a*b) stored, e int as (c*2)); +SELECT generation_expression FROM information_schema.columns WHERE table_name = 'test_gc_read' AND column_name = 'd'; +INSERT INTO test_gc_read (a, b) VALUES (0,null),(1,2),(3,4); +SELECT * FROM test_gc_read ORDER BY a; +INSERT INTO test_gc_read SET a = 5, b = 10; +SELECT * FROM test_gc_read ORDER BY a; +REPLACE INTO test_gc_read (a, b) VALUES (5, 6); +SELECT * FROM test_gc_read ORDER BY a; +INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE b = 9; +SELECT * FROM test_gc_read ORDER BY a; +SELECT c, d FROM test_gc_read; +SELECT e FROM test_gc_read; +INSERT INTO test_gc_read (a, b) VALUES (5, 8) ON DUPLICATE KEY UPDATE a = 6, b = a; +SELECT * FROM test_gc_read ORDER BY a; +INSERT INTO test_gc_read (a, b) VALUES (6, 8) ON DUPLICATE KEY UPDATE b = 8, a = b; +SELECT * FROM test_gc_read ORDER BY a; +SELECT * FROM test_gc_read WHERE c = 7; +SELECT * FROM test_gc_read WHERE d = 64; +SELECT * FROM test_gc_read WHERE e = 6; +UPDATE test_gc_read SET a = a + 100 WHERE c = 7; +SELECT * FROM test_gc_read WHERE c = 107; +UPDATE test_gc_read m SET m.a = m.a + 100 WHERE c = 107; +SELECT * FROM test_gc_read WHERE c = 207; +UPDATE test_gc_read SET a = a - 200 WHERE d = 812; +SELECT * FROM test_gc_read WHERE d = 12; +INSERT INTO test_gc_read set a = 4, b = d + 1; +SELECT * FROM test_gc_read ORDER BY a; +DELETE FROM test_gc_read where a = 4; +CREATE TABLE test_gc_help(a int primary key, b int, c int, d int, e int); +INSERT INTO test_gc_help(a, b, c, d, e) SELECT * FROM test_gc_read; +SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.c = t2.c ORDER BY t1.a; +SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.d = t2.d ORDER BY t1.a; +SELECT t1.* FROM test_gc_read t1 JOIN test_gc_help t2 ON t1.e = t2.e ORDER BY t1.a; +--sorted_result +SELECT * FROM test_gc_read t WHERE t.a not in (SELECT t.a FROM test_gc_read t where t.c > 5); +--sorted_result +SELECT * FROM test_gc_read t WHERE t.c in (SELECT t.c FROM test_gc_read t where t.c > 5); +SELECT tt.b FROM test_gc_read tt WHERE tt.a = (SELECT max(t.a) FROM test_gc_read t WHERE t.c = tt.c) ORDER BY b; +SELECT c, sum(a) aa, max(d) dd, sum(e) ee FROM test_gc_read GROUP BY c ORDER BY aa; +SELECT a, sum(c), sum(d), sum(e) FROM test_gc_read GROUP BY a ORDER BY a; +UPDATE test_gc_read m, test_gc_read n SET m.b = m.b + 10, n.b = n.b + 10; +SELECT * FROM test_gc_read ORDER BY a; +drop table if exists t; +create table t(a int); +insert into t values(8); +update test_gc_read set a = a+1 where a in (select a from t); +select * from test_gc_read order by a; +CREATE TABLE test_gc_read_cast(a VARCHAR(255), b VARCHAR(255), c INT AS (JSON_EXTRACT(a, b)), d INT AS (JSON_EXTRACT(a, b)) STORED); +INSERT INTO test_gc_read_cast (a, b) VALUES ('{"a": "3"}', '$.a'); +SELECT c, d FROM test_gc_read_cast; +CREATE TABLE test_gc_read_cast_1(a VARCHAR(255), b VARCHAR(255), c ENUM("red", "yellow") AS (JSON_UNQUOTE(JSON_EXTRACT(a, b)))); +INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "yellow"}', '$.a'); +SELECT c FROM test_gc_read_cast_1; +CREATE TABLE test_gc_read_cast_2( a JSON, b JSON AS (a->>'$.a')); +INSERT INTO test_gc_read_cast_2(a) VALUES ('{"a": "{ \\\"key\\\": \\\"\\u6d4b\\\" }"}'); +SELECT b FROM test_gc_read_cast_2; +CREATE TABLE test_gc_read_cast_3( a JSON, b JSON AS (a->>'$.a'), c INT AS (b * 3.14) ); +INSERT INTO test_gc_read_cast_3(a) VALUES ('{"a": "5"}'); +SELECT c FROM test_gc_read_cast_3; +--error 1265 +INSERT INTO test_gc_read_cast_1 (a, b) VALUES ('{"a": "invalid"}', '$.a'); +DROP TABLE IF EXISTS test_gc_read_m; +CREATE TABLE test_gc_read_m (a int primary key, b int, c int as (a+1), d int as (c*2)); +INSERT INTO test_gc_read_m(a) values (1), (2); +ALTER TABLE test_gc_read_m DROP b; +SELECT * FROM test_gc_read_m; +CREATE TABLE test_gc_read_1(a int primary key, b int, c int as (a+b) not null, d int as (a*b) stored); +CREATE TABLE test_gc_read_2(a int primary key, b int, c int as (a+b), d int as (a*b) stored not null); +-- error 1048 +insert into test_gc_read_1(a, b) values (1, null); +-- error 1048 +insert into test_gc_read_2(a, b) values (1, null); + +# TestSelectPartition +drop table if exists th, tr, tl; +create table th (a int, b int) partition by hash(a) partitions 3; +create table tr (a int, b int) + partition by range (a) ( + partition r0 values less than (4), + partition r1 values less than (7), + partition r3 values less than maxvalue); +create table tl (a int, b int, unique index idx(a)) partition by list (a) ( + partition p0 values in (3,5,6,9,17), + partition p1 values in (1,2,10,11,19,20), + partition p2 values in (4,12,13,14,18), + partition p3 values in (7,8,15,16,null)); +insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8); +insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8); +insert into tr values (-3,-3),(3,3),(4,4),(7,7),(8,8); +insert into tl values (3,3),(1,1),(4,4),(7,7),(8,8),(null,null); +select b from th partition (p0) order by a; +select b from tr partition (r0) order by a; +select b from tl partition (p0) order by a; +select b from th partition (p0,P0) order by a; +select b from tr partition (r0,R0,r0) order by a; +select b from tl partition (p0,P0,p0) order by a; +select b from th partition (P2,p0) order by a; +select b from tr partition (r1,R3) order by a; +select b from tl partition (p0,P3) order by a; +-- error 1735 +select b from th partition (p0,p4); +-- error 1735 +select b from tr partition (r1,r4); +-- error 1735 +select b from tl partition (p0,p4); +begin; +insert into th values (10,10),(11,11); +select a, b from th where b>10; +commit; +select a, b from th where b>10; +drop table if exists tscalar; +create table tscalar (c1 int) partition by range (c1 % 30) ( + partition p0 values less than (0), + partition p1 values less than (10), + partition p2 values less than (20), + partition pm values less than (maxvalue)); +insert into tscalar values(0), (10), (40), (50), (55); +insert into tscalar values(-0), (-10), (-40), (-50), (-55); +select * from tscalar where c1 in (55, 55); +select * from tscalar where c1 in (40, 40); +select * from tscalar where c1 in (40); +select * from tscalar where c1 in (-40); +select * from tscalar where c1 in (-40, -40); +select * from tscalar where c1 in (-1); + +# TestPrepareLoadData +-- error 1295 +prepare stmt from "load data local infile '/tmp/load_data_test.csv' into table test"; + +# TestPrepareImportInto +-- error 1295 +prepare stmt from "import into test from 'xx' format 'delimited'"; + +# TestHandleTransfer +drop table if exists t; +create table t(a int, index idx(a)); +insert into t values(1), (2), (4); +begin; +update t set a = 3 where a = 4; +select * from t ignore index(idx); +insert into t values(4); +select * from t use index(idx); +select * from t use index(idx) order by a desc; +update t set a = 5 where a = 3; +select * from t use index(idx); +commit; +drop table if exists t; +create table t(a int, b int, index idx(a)); +insert into t values(3, 3), (1, 1), (2, 2); +select * from t use index(idx) order by a; + +# TestMaxInt64Handle +drop table if exists t; +create table t(id bigint, PRIMARY KEY (id)); +insert into t values(9223372036854775807); +select * from t where id = 9223372036854775807; +select * from t where id = 9223372036854775807; +select * from t; +--error 1062 +insert into t values(9223372036854775807); +delete from t where id = 9223372036854775807; +select * from t; + +# TestUnsignedPk +drop table if exists t; +create table t(id bigint unsigned primary key); +insert into t values(9223372036854775808), (9223372036854775809), (1), (2); +select * from t order by id; +select * from t where id not in (2); +drop table if exists t; +create table t(a bigint unsigned primary key, b int, index idx(b)); +insert into t values(9223372036854775808, 1), (1, 1); +select * from t use index(idx) where b = 1 and a < 2; +select * from t use index(idx) where b = 1 order by b, a; + +# TestSignedCommonHandle +set @@tidb_enable_clustered_index = 1; +drop table if exists t; +create table t(k1 int, k2 int, primary key(k1, k2)); +insert into t(k1, k2) value(-100, 1), (-50, 1), (0, 0), (1, 1), (3, 3); +select k1 from t order by k1; +select k1 from t order by k1 desc; +select k1 from t where k1 < -51; +select k1 from t where k1 < -1; +select k1 from t where k1 <= 0; +select k1 from t where k1 < 2; +select k1 from t where k1 < -1 and k1 > -90; +set @@tidb_enable_clustered_index = default; + +# TestContainDotColumn +drop table if exists t1, t2, t3; +create table t1(t1.a char); +create table t2(a char, t2.b int); +-- error 1103 +create table t3(s.a char); + +# TestCheckTableClusterIndex +set @@tidb_enable_clustered_index = 1; +drop table if exists admin_test; +create table admin_test (c1 int, c2 int, c3 int default 1, primary key (c1, c2), index (c1), unique key(c2)); +insert admin_test (c1, c2) values (1, 1), (2, 2), (3, 3); +admin check table admin_test; +set @@tidb_enable_clustered_index = default; + +# TestIncorrectLimitArg +drop table if exists t; +create table t(a bigint); +prepare stmt1 from 'select * from t limit ?'; +prepare stmt2 from 'select * from t limit ?, ?'; +set @a = -1; +set @b = 1; +-- error 1210 +execute stmt1 using @a; +-- error 1210 +execute stmt2 using @b, @a; +-- error 1210 +execute stmt2 using @a, @b; +-- error 1210 +execute stmt2 using @a, @a; + +# TestEmptyEnum +drop table if exists t; +create table t (e enum('Y', 'N')); +set sql_mode='STRICT_TRANS_TABLES'; +--error 1265 +insert into t values (0); +--error 1265 +insert into t values ('abc'); +set sql_mode=''; +insert into t values (0); +select * from t; +insert into t values ('abc'); +select * from t; +insert into t values (null); +select * from t; +drop table if exists t; +create table t (id int auto_increment primary key, c1 enum('a', '', 'c')); +insert into t(c1) values (0); +select id, c1+0, c1 from t; +alter table t change c1 c1 enum('a', '') not null; +select id, c1+0, c1 from t; +insert into t(c1) values (0); +select id, c1+0, c1 from t; +set sql_mode=default; + +# TestDIVZeroInPartitionExpr +drop table if exists t1; +create table t1(a int) partition by range (10 div a) (partition p0 values less than (10), partition p1 values less than maxvalue); +set @@sql_mode=''; +insert into t1 values (NULL), (0), (1); +set @@sql_mode='STRICT_ALL_TABLES,ERROR_FOR_DIVISION_BY_ZERO'; +-- error 1365 +insert into t1 values (NULL), (0), (1); +set @@sql_mode=default; + +# TestInsertIntoGivenPartitionSet +drop table if exists t1; +create table t1( + a int(11) DEFAULT NULL, + b varchar(10) DEFAULT NULL, + UNIQUE KEY idx_a (a)) PARTITION BY RANGE (a) + (PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB, + PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB, + PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB, + PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB, + PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB); +insert into t1 partition(p0) values(1, 'a'), (2, 'b'); +select * from t1 partition(p0) order by a; +insert into t1 partition(p0, p1) values(3, 'c'), (4, 'd'); +select * from t1 partition(p1); +-- error 1062 +insert into t1 values(1, 'a'); +-- error 1735 +insert into t1 partition(p0, p_non_exist) values(1, 'a'); +-- error 1748 +insert into t1 partition(p0, p1) values(40, 'a'); +replace into t1 partition(p0) values(1, 'replace'); +replace into t1 partition(p0, p1) values(3, 'replace'), (4, 'replace'); +replace into t1 values(1, 'a'); +select * from t1 partition (p0) order by a; +-- error 1735 +replace into t1 partition(p0, p_non_exist) values(1, 'a'); +-- error 1748 +replace into t1 partition(p0, p1) values(40, 'a'); +truncate table t1; +drop table if exists t; +create table t(a int, b char(10)); +-- error 1747 +insert into t partition(p0, p1) values(1, 'a'); +insert into t values(1, 'a'), (2, 'b'); +insert into t1 partition(p0) select * from t; +select * from t1 partition(p0) order by a; +truncate table t; +insert into t values(3, 'c'), (4, 'd'); +insert into t1 partition(p0, p1) select * from t; +select * from t1 partition(p1) order by a; +select * from t1 partition(p0) order by a; +-- error 1062 +insert into t1 select 1, 'a'; +-- error 1735 +insert into t1 partition(p0, p_non_exist) select 1, 'a'; +-- error 1748 +insert into t1 partition(p0, p1) select 40, 'a'; +replace into t1 partition(p0) select 1, 'replace'; +truncate table t; +insert into t values(3, 'replace'), (4, 'replace'); +replace into t1 partition(p0, p1) select * from t; +replace into t1 select 1, 'a'; +select * from t1 partition (p0) order by a; +-- error 1735 +replace into t1 partition(p0, p_non_exist) select 1, 'a'; +-- error 1748 +replace into t1 partition(p0, p1) select 40, 'a'; + +# TestUpdateGivenPartitionSet +drop table if exists t1, t2, t3; +create table t1( + a int(11), + b varchar(10) DEFAULT NULL, + primary key idx_a (a)) PARTITION BY RANGE (a) + (PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB, + PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB, + PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB, + PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB, + PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB); +create table t2( + a int(11) DEFAULT NULL, + b varchar(10) DEFAULT NULL) PARTITION BY RANGE (a) + (PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB, + PARTITION p1 VALUES LESS THAN (20) ENGINE = InnoDB, + PARTITION p2 VALUES LESS THAN (30) ENGINE = InnoDB, + PARTITION p3 VALUES LESS THAN (40) ENGINE = InnoDB, + PARTITION p4 VALUES LESS THAN MAXVALUE ENGINE = InnoDB); +create table t3 (a int(11), b varchar(10) default null); +insert into t3 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd'); +-- error 1747 +update t3 partition(p0) set a = 40 where a = 2; +insert into t1 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd'); +-- error 1748 +update t1 partition(p0, p1) set a = 40; +-- error 1748 +update t1 partition(p0) set a = 40 where a = 2; +-- error 1735 +update t1 partition (p0, p_non_exist) set a = 40; +-- error 1748 +update t1 partition (p0), t3 set t1.a = 40 where t3.a = 2; +update t1 partition(p0) set a = 3 where a = 2; +update t1 partition(p0, p3) set a = 33 where a = 1; +insert into t2 values(1, 'a'), (2, 'b'), (11, 'c'), (21, 'd'); +-- error 1748 +update t2 partition(p0, p1) set a = 40; +-- error 1748 +update t2 partition(p0) set a = 40 where a = 2; +update t2 partition(p0) set a = 3 where a = 2; +update t2 partition(p0, p3) set a = 33 where a = 1; +drop table if exists t4; +create table t4(a int primary key, b int) partition by hash(a) partitions 2; +insert into t4(a, b) values(1, 1),(2, 2),(3, 3); +-- error 1748 +update t4 partition(p0) set a = 5 where a = 2; + +# TestIssue19667 +drop table if exists t; +CREATE TABLE t (a DATETIME); +INSERT INTO t VALUES('1988-04-17 01:59:59'); +SELECT DATE_ADD(a, INTERVAL 1 SECOND) FROM t; + +# TestZeroDateTimeCompatibility +--enable_warnings +select YEAR(0000-00-00), YEAR("0000-00-00"); +select MONTH(0000-00-00), MONTH("0000-00-00"); +select DAYOFMONTH(0000-00-00), DAYOFMONTH("0000-00-00"); +select QUARTER(0000-00-00), QUARTER("0000-00-00"); +select EXTRACT(DAY FROM 0000-00-00), EXTRACT(DAY FROM "0000-00-00"); +select EXTRACT(MONTH FROM 0000-00-00), EXTRACT(MONTH FROM "0000-00-00"); +select EXTRACT(YEAR FROM 0000-00-00), EXTRACT(YEAR FROM "0000-00-00"); +select EXTRACT(WEEK FROM 0000-00-00), EXTRACT(WEEK FROM "0000-00-00"); +select EXTRACT(QUARTER FROM 0000-00-00), EXTRACT(QUARTER FROM "0000-00-00"); +select DAYOFWEEK(0000-00-00), DAYOFWEEK("0000-00-00"); +select DAYOFYEAR(0000-00-00), DAYOFYEAR("0000-00-00"); +--disable_warnings +drop table if exists t; +create table t(v1 datetime, v2 datetime(3)); +insert ignore into t values(0,0); +--enable_warnings +select YEAR(v1), YEAR(v2) from t; +select MONTH(v1), MONTH(v2) from t; +select DAYOFMONTH(v1), DAYOFMONTH(v2) from t; +select QUARTER(v1), QUARTER(v2) from t; +select EXTRACT(DAY FROM v1), EXTRACT(DAY FROM v2) from t; +select EXTRACT(MONTH FROM v1), EXTRACT(MONTH FROM v2) from t; +select EXTRACT(YEAR FROM v1), EXTRACT(YEAR FROM v2) from t; +select EXTRACT(WEEK FROM v1), EXTRACT(WEEK FROM v2) from t; +select EXTRACT(QUARTER FROM v1), EXTRACT(QUARTER FROM v2) from t; +select DAYOFWEEK(v1), DAYOFWEEK(v2) from t; +select DAYOFYEAR(v1), DAYOFYEAR(v2) from t; +--disable_warnings + +# TestInvalidDateValueInCreateTable +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE'; +-- error 1067 +create table t (a datetime default '2999-00-00 00:00:00'); +create table t (a datetime); +-- error 1067 +alter table t modify column a datetime default '2999-00-00 00:00:00'; +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_DATE'; +-- error 1067 +create table t (a datetime default '0000-00-00 00:00:00'); +create table t (a datetime); +-- error 1067 +alter table t modify column a datetime default '0000-00-00 00:00:00'; +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES'; +create table t (a datetime default '2999-00-00 00:00:00'); +drop table if exists t; +create table t (a datetime default '0000-00-00 00:00:00'); +drop table if exists t; +create table t (a datetime); +alter table t modify column a datetime default '2999-00-00 00:00:00'; +alter table t modify column a datetime default '0000-00-00 00:00:00'; +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES'; +-- error 1067 +create table t (a datetime default '2999-02-30 00:00:00'); +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE'; +-- error 1067 +create table t (a datetime default '2999-02-30 00:00:00'); +drop table if exists t; +set @@sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES'; +create table t (a datetime default '2999-02-30 00:00:00'); +drop table if exists t; +create table t (a datetime); +alter table t modify column a datetime default '2999-02-30 00:00:00'; +drop table if exists t; +set @@sql_mode=default; + +# TestEncodingSet +drop table if exists `enum-set`; +CREATE TABLE `enum-set` (`set` SET('x00','x01','x02','x03','x04','x05','x06','x07','x08','x09','x10','x11','x12','x13','x14','x15','x16','x17','x18','x19','x20','x21','x22','x23','x24','x25','x26','x27','x28','x29','x30','x31','x32','x33','x34','x35','x36','x37','x38','x39','x40','x41','x42','x43','x44','x45','x46','x47','x48','x49','x50','x51','x52','x53','x54','x55','x56','x57','x58','x59','x60','x61','x62','x63')NOT NULL PRIMARY KEY); +INSERT INTO `enum-set` VALUES ("x00,x59"); +select `set` from `enum-set` use index(PRIMARY); +admin check table `enum-set`; + +# TestYearTypeDeleteIndex +drop table if exists t; +create table t(a YEAR, PRIMARY KEY(a)); +insert into t set a = '2151'; +delete from t; +admin check table t; + +# TestRowID +drop table if exists t; +set @@tidb_enable_clustered_index = 'int_only'; +create table t(a varchar(10), b varchar(10), c varchar(1), index idx(a, b, c)); +insert into t values('a', 'b', 'c'); +insert into t values('a', 'b', 'c'); +select b, _tidb_rowid from t use index(idx) where a = 'a'; +begin; +select * from t for update; +select distinct b from t use index(idx) where a = 'a'; +commit; +drop table if exists t; +create table t(a varchar(5) primary key); +insert into t values('a'); +select *, _tidb_rowid from t use index(`primary`) where _tidb_rowid=1; +set @@tidb_enable_clustered_index = default; + +# TestSubqueryTableAlias +drop table if exists t; +set sql_mode = ''; +-- error 1248 +select a, b from (select 1 a) ``, (select 2 b) ``; +-- error 1066 +select a, b from (select 1 a) `x`, (select 2 b) `x`; +-- error 1248 +select a, b from (select 1 a), (select 2 b); +-- error 1248 +select a from (select 1 a) ``, (select 2 a) ``; +-- error 1066 +select a from (select 1 a) `x`, (select 2 a) `x`; +-- error 1066 +select x.a from (select 1 a) `x`, (select 2 a) `x`; +-- error 1248 +select a from (select 1 a), (select 2 a); +set sql_mode = 'oracle'; +select a, b from (select 1 a) ``, (select 2 b) ``; +select a, b from (select 1 a) `x`, (select 2 b) `x`; +select a, b from (select 1 a), (select 2 b); +-- error 1052 +select a from (select 1 a) ``, (select 2 a) ``; +-- error 1052 +select a from (select 1 a) `x`, (select 2 a) `x`; +-- error 1052 +select x.a from (select 1 a) `x`, (select 2 a) `x`; +-- error 1052 +select a from (select 1 a), (select 2 a); +set sql_mode = default; + +# TestSelectHashPartitionTable +drop table if exists th; +create table th (a int, b int) partition by hash(a) partitions 3; +insert into th values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8); +insert into th values (-1,-1),(-2,-2),(-3,-3),(-4,-4),(-5,-5),(-6,-6),(-7,-7),(-8,-8); +select b from th order by a; +select * from th where a=-2; +select * from th where a=5; +drop table if exists th; + +# TestSelectView +drop table if exists view_t; +create table view_t (a int,b int); +insert into view_t values(1,2); +create definer='root'@'localhost' view view1 as select * from view_t; +create definer='root'@'localhost' view view2(c,d) as select * from view_t; +create definer='root'@'localhost' view view3(c,d) as select a,b from view_t; +create definer='root'@'localhost' view view4 as select * from (select * from (select * from view_t) tb1) tb; +select * from view1; +select * from view2; +select * from view3; +select * from view4; +drop table view_t; +create table view_t(c int,d int); +-- error 1356 +select * from view1; +-- error 1356 +select * from view2; +-- error 1356 +select * from view3; +drop table view_t; +create table view_t(a int,b int,c int); +insert into view_t values(1,2,3); +select * from view1; +select * from view2; +select * from view3; +select * from view4; +alter table view_t drop column a; +alter table view_t add column a int after b; +update view_t set a=1; +select * from view1; +select * from view2; +select * from view3; +select * from view4; +drop table view_t; +drop view view1,view2,view3,view4; +set @@tidb_enable_window_function = 1; +create table t(a int, b int); +insert into t values (1,1),(1,2),(2,1),(2,2); +create definer='root'@'localhost' view v as select a, first_value(a) over(rows between 1 preceding and 1 following), last_value(a) over(rows between 1 preceding and 1 following) from t; +select * from v; +drop view v; +set @@tidb_enable_window_function = default; + +# TestBinaryStrNumericOperator +drop table if exists t; +create table t(a varbinary(10)); +insert into t values ('123.12'); +--enable_warnings +select 1+a from t; +select a-1 from t; +select -10*a from t; +select a/-2 from t; +--disable_warnings + +# TestSetOperationOnDiffColType +drop table if exists t1, t2, t3; +create table t1(a int, b int); +create table t2(a int, b varchar(20)); +create table t3(a int, b decimal(30,10)); +insert into t1 values (1,1),(1,1),(2,2),(3,3),(null,null); +insert into t2 values (1,'1'),(2,'2'),(null,null),(null,'3'); +insert into t3 values (2,2.1),(3,3); +explain format = 'brief' select * from t3 union select * from t1; +--sorted_result +select * from t3 union select * from t1; +explain format = 'brief' select * from t2 union all select * from t1; +--sorted_result +select * from t2 union all select * from t1; +explain format = 'brief' select * from t1 except select * from t3; +--sorted_result +select * from t1 except select * from t3; +explain format = 'brief' select * from t1 intersect select * from t2; +--sorted_result +select * from t1 intersect select * from t2; +explain format = 'brief' select * from t1 union all select * from t2 union all select * from t3; +--sorted_result +select * from t1 union all select * from t2 union all select * from t3; +explain format = 'brief' select * from t1 union all select * from t2 except select * from t3; +--sorted_result +select * from t1 union all select * from t2 except select * from t3; +explain format = 'brief' select * from t1 intersect select * from t2 intersect select * from t1; +--sorted_result +select * from t1 intersect select * from t2 intersect select * from t1; +explain format = 'brief' select * from t1 union all select * from t2 intersect select * from t3; +--sorted_result +select * from t1 union all select * from t2 intersect select * from t3; +explain format = 'brief' select * from t1 except select * from t2 intersect select * from t3; +--sorted_result +select * from t1 except select * from t2 intersect select * from t3; + +# TestIndexScanWithYearCol +# issue-23038: wrong key range of index scan for year column +set tidb_cost_model_version=2; +drop table if exists t; +create table t (c1 year(4), c2 int, key(c1)); +insert into t values(2001, 1); +explain format = 'brief' select t1.c1, t2.c1 from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select t1.c1, t2.c1 from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select * from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select * from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select count(*) from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select count(*) from t as t1 inner join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select t1.c1, t2.c1 from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select t1.c1, t2.c1 from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select count(*) from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +--sorted_result +select count(*) from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 != NULL; +explain format = 'brief' select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 is not NULL; +--sorted_result +select * from t as t1 left join t as t2 on t1.c1 = t2.c1 where t1.c1 is not NULL; + +# TestSetOperation +set tidb_cost_model_version=2; +drop table if exists t1, t2, t3; +create table t1(a int); +create table t2 like t1; +create table t3 like t1; +insert into t1 values (1),(1),(2),(3),(null); +insert into t2 values (1),(2),(null),(null); +insert into t3 values (2),(3); +explain format='brief' select * from t3 union select * from t1; +--sorted_result +select * from t3 union select * from t1; +explain format='brief' select * from t2 union all select * from t1; +--sorted_result +select * from t2 union all select * from t1; +explain format='brief' select * from t1 except select * from t3; +--sorted_result +select * from t1 except select * from t3; +explain format='brief' select * from t1 intersect select * from t2; +--sorted_result +select * from t1 intersect select * from t2; +explain format='brief' select * from t1 union all select * from t2 union all select * from t3; +--sorted_result +select * from t1 union all select * from t2 union all select * from t3; +explain format='brief' select * from t1 union all select * from t2 except select * from t3; +--sorted_result +select * from t1 union all select * from t2 except select * from t3; +explain format='brief' select * from t1 intersect select * from t2 intersect select * from t1; +--sorted_result +select * from t1 intersect select * from t2 intersect select * from t1; +explain format='brief' select * from t1 union all select * from t2 intersect select * from t3; +--sorted_result +select * from t1 union all select * from t2 intersect select * from t3; +explain format='brief' select * from t1 except select * from t2 intersect select * from t3; +--sorted_result +select * from t1 except select * from t2 intersect select * from t3; +explain format='brief' select * from t1 intersect (select * from t2 except (select * from t3)); +--sorted_result +select * from t1 intersect (select * from t2 except (select * from t3)); +explain format='brief' select * from t1 union all (select * from t2 except select * from t3); +--sorted_result +select * from t1 union all (select * from t2 except select * from t3); +explain format='brief' select * from t1 union (select * from t2 union all select * from t3); +--sorted_result +select * from t1 union (select * from t2 union all select * from t3); +explain format='brief' (select * from t1 intersect select * from t1) except (select * from t2 union select * from t3); +--sorted_result +(select * from t1 intersect select * from t1) except (select * from t2 union select * from t3); + +# https://github.com/pingcap/tidb/issues/40279 +drop table if exists issue40279; +CREATE TABLE `issue40279` (`a` char(155) NOT NULL DEFAULT 'on1unvbxp5sko6mbetn3ku26tuiyju7w3wc0olzto9ew7gsrx',`b` mediumint(9) NOT NULL DEFAULT '2525518',PRIMARY KEY (`b`,`a`) /*T![clustered_index] CLUSTERED */); +insert into `issue40279` values (); +( select `issue40279`.`b` as r0 , from_base64( `issue40279`.`a` ) as r1 from `issue40279` ) except ( select `issue40279`.`a` as r0 , elt(2, `issue40279`.`a` , `issue40279`.`a` ) as r1 from `issue40279`); +drop table if exists t2; +CREATE TABLE `t2` ( `a` varchar(20) CHARACTER SET gbk COLLATE gbk_chinese_ci DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t2 values(0xCED2); +(select elt(2,t2.a,t2.a) from t2) except (select 0xCED2 from t2); + +# TestCompareIssue38361 +drop table if exists t; +create table t(a datetime, b bigint, c bigint); +insert into t values(cast('2023-08-09 00:00:00' as datetime), 20230809, 20231310); +select a > 20230809 from t; +select a = 20230809 from t; +select a < 20230810 from t; +# 20231310 can't be converted to valid datetime, thus should be compared using real date type,and datetime will be +# converted to something like 'YYYYMMDDHHMMSS', bigger than 20231310 +select a < 20231310 from t; +select 20230809 < a from t; +select 20230809 = a from t; +select 20230810 > a from t; +select 20231310 > a from t; +# constant datetime cmp numeric constant should be compared as real data type +select cast('2023-08-09 00:00:00' as datetime) > 20230809 from t; +select cast('2023-08-09 00:00:00' as datetime) = 20230809 from t; +select cast('2023-08-09 00:00:00' as datetime) < 20230810 from t; +select cast('2023-08-09 00:00:00' as datetime) < 20231310 from t; +select 20230809 < cast('2023-08-09 00:00:00' as datetime) from t; +select 20230809 = cast('2023-08-09 00:00:00' as datetime) from t; +select 20230810 > cast('2023-08-09 00:00:00' as datetime) from t; +select 20231310 > cast('2023-08-09 00:00:00' as datetime) from t; +# datetime column cmp numeric column should be compared as real data type +select a > b from t; +select a = b from t; +select a < b + 1 from t; +select a < c from t; +select b < a from t; +select b = a from t; +select b > a from t; +select c > a from t; + +# TestLoadStats +-- error 1064 +load stats; +-- error 1064 +load stats ./xxx.json; + +# TestShow +drop database if exists test_show; +create database test_show; +use test_show; +show engines; +drop table if exists t; +create table t(a int primary key); +show index in t; +show index from t; +--replace_column 2 0 +show master status; +show create database test_show; +show privileges; +--replace_column 12 0 +show table status; + +drop database test_show; +use executor__executor; + +# TestSelectBackslashN +# Issue 3685. +select \N; +select "\N"; + +drop table if exists test; +create table test (`\N` int); +insert into test values (1); +select * from test; +select \N from test; +select (\N) from test; +select `\N` from test; +select (`\N`) from test; +select '\N' from test; +select ('\N') from test; + +# TestSelectNull +# Issue #4053. +select nUll; +select (null); +select null+NULL; + +# TestSelectStringLiteral Issue #3686. +select 'abc'; +select (('abc')); +select 'abc'+'def'; +## Below checks whether leading invalid chars are trimmed. +select '\n'; +## Lowercased letter is a valid char. +select '\t col'; +## Uppercased letter is a valid char. +select '\t Col'; +## Chinese char is a valid char. +select '\n\t 中文 col'; +## Punctuation is a valid char. +select ' \r\n .col'; +## Emoji is a valid char. +select ' 😆col'; +## Below checks whether trailing invalid chars are preserved. +select 'abc '; +select ' abc 123 '; +## Issue #4239. +select 'a' ' ' 'string'; +select 'a' " " "string"; +select 'string' 'string'; +select "ss" "a"; +select "ss" "a" "b"; +select "ss" "a" ' ' "b"; +select "ss" "a" ' ' "b" ' ' "d"; + +# TestUpdateClustered +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) ); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.v = 20, b.v = 100, a.k1 = a.k1 + 1, b.k1 = b.k1 + 1, a.k2 = a.k2 + 2, b.k2 = b.k2 + 2; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) ); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b right join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) , key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, a.v = 20 where exists (select 1 from b where a.k1 = b.k1 and a.k2 = b.k2); +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) clustered); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.v = 20, b.v = 100, a.k1 = a.k1 + 1, b.k1 = b.k1 + 1, a.k2 = a.k2 + 2, b.k2 = b.k2 + 2; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 int, k2 int, v int); +create table b (a int not null, k1 int, k2 int, v int, primary key(k1, k2) clustered); +insert into a values (1, 1, 1), (2, 2, 2); +insert into b values (2, 2, 2, 2); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a left join b on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b right join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update b join a on a.k1 = b.k1 and a.k2 = b.k2 set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, b.k1 = b.k1 + 1, b.k2 = b.k2 + 2, a.v = 20, b.v = 100; +select * from b; +select * from a; +admin check table a; +admin check table b; +drop table if exists a, b; +create table a (k1 varchar(100), k2 varchar(100), v varchar(100)); +create table b (a varchar(100) not null, k1 varchar(100), k2 varchar(100), v varchar(100), primary key(k1(1), k2(1)) clustered, key kk1(k1(1), v(1))); +insert into a values ('11', '11', '11'), ('22', '22', '22'); +insert into b values ('22', '22', '22', '22'); +update a set a.k1 = a.k1 + 1, a.k2 = a.k2 + 2, a.v = 20 where exists (select 1 from b where a.k1 = b.k1 and a.k2 = b.k2); +select * from b; +select * from a; +admin check table a; +admin check table b; + +# TestClusterIndexOuterJoinElimination +set @@tidb_enable_clustered_index=On; +drop table if exists t; +create table t (a int, b int, c int, primary key(a,b)); +explain format = 'brief' select t1.a from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b; +set @@tidb_enable_clustered_index=default; + +# TestExecutorBit +drop table if exists t; +create table t (c1 bit(2)); +insert into t values (0), (1), (2), (3); +-- error 1406 +insert into t values (4); +-- error 1406 +insert into t values ('a'); +select hex(c1) from t where c1 = 2; +drop table if exists t; +create table t (c1 bit(31)); +insert into t values (0x7fffffff); +-- error 1406 +insert into t values (0x80000000); +-- error 1406 +insert into t values (0xffffffff); +insert into t values ('123'); +insert into t values ('1234'); +-- error 1064 +insert into t values ('12345); +drop table if exists t; +create table t (c1 bit(62)); +insert into t values ('12345678'); +drop table if exists t; +create table t (c1 bit(61)); +-- error 1406 +insert into t values ('12345678'); +drop table if exists t; +create table t (c1 bit(32)); +insert into t values (0x7fffffff); +insert into t values (0xffffffff); +-- error 1406 +insert into t values (0x1ffffffff); +insert into t values ('1234'); +-- error 1406 +insert into t values ('12345'); +drop table if exists t; +create table t (c1 bit(64)); +insert into t values (0xffffffffffffffff); +insert into t values ('12345678'); +-- error 1366 +insert into t values ('123456789'); +drop table if exists t; +create table t (c1 bit(64)); +insert into t values (0xffffffffffffffff); +insert into t values ('12345678'); +select hex(c1) from t where c1; + +# TestTimestampTimeZone +drop table if exists t, t1; +create table t (ts timestamp); +set time_zone = '+00:00'; +insert into t values ('2017-04-27 22:40:42'); +set time_zone = '+10:00'; +select * from t; +set time_zone = '-6:00'; +select * from t; + +## For issue https://github.com/pingcap/tidb/issues/3467 +drop table if exists t1; +CREATE TABLE t1 ( + id bigint(20) NOT NULL AUTO_INCREMENT, + uid int(11) DEFAULT NULL, + datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + ip varchar(128) DEFAULT NULL, +PRIMARY KEY (id), + KEY i_datetime (datetime), + KEY i_userid (uid) +); +INSERT INTO t1 VALUES (123381351,1734,"2014-03-31 08:57:10","127.0.0.1"); +select datetime from t1; +select datetime from t1 where datetime='2014-03-31 08:57:10'; +select * from t1 where datetime='2014-03-31 08:57:10'; + +## For issue https://github.com/pingcap/tidb/issues/3485 +set time_zone = 'Asia/Shanghai'; +drop table if exists t1; +CREATE TABLE t1 ( + id bigint(20) NOT NULL AUTO_INCREMENT, + datetime timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (id) +); +INSERT INTO t1 VALUES (123381351,"2014-03-31 08:57:10"); +select * from t1 where datetime="2014-03-31 08:57:10"; +alter table t1 add key i_datetime (datetime); +select * from t1 where datetime="2014-03-31 08:57:10"; +select * from t1; +select datetime from t1 where datetime='2014-03-31 08:57:10'; +set time_zone=default; + +# TestInsertValuesWithSubQuery +# this is from jira issue #5856 +drop table if exists t2; +create table t2(a int, b int, c int); +-- error 1054 +insert into t2 values (11, 8, (select not b)); +-- error 1064 +insert into t2 set a = 11, b = 8, c = (select b)); +insert into t2 values(1, 1, (select b from t2)); +select * from t2; +insert into t2 set a = 1, b = 1, c = (select b+1 from t2); +select * from t2; +delete from t2; +insert into t2 values(2, 4, a); +select * from t2; +insert into t2 set a = 3, b = 5, c = b; +select * from t2; + +## issue #30626 +drop table if exists t; +create table t(a int, b int); +## TODO: should insert success and get (81,1) from the table +-- error 1105 +insert into t values ( 81, ( select ( SELECT '1' AS `c0` WHERE '1' >= `subq_0`.`c0` ) as `c1` FROM ( SELECT '1' AS `c0` ) AS `subq_0` ) ); +-- error 1105 +insert into t set a = 81, b = (select ( SELECT '1' AS `c0` WHERE '1' >= `subq_0`.`c0` ) as `c1` FROM ( SELECT '1' AS `c0` ) AS `subq_0` ); +drop table if exists t2; + +# TestBitColumnIn +# fix issue https://github.com/pingcap/tidb/issues/32871 +drop table if exists t; +create table t (id bit(16), key id(id)); +insert into t values (65); +select * from t where id not in (-1,2); +-- error 1582 +select * from t where id in (-1, -2); + +# TestProjectionBitType +drop table if exists t; +drop table if exists t1; +create table t(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) clustered); +create table t1(k1 int, v bit(34) DEFAULT b'111010101111001001100111101111111', primary key(k1) nonclustered); +insert into t(k1) select 1; +insert into t1(k1) select 1; +set @@tidb_enable_vectorized_expression = 0; +(select k1, hex(v) from t where false) union(select k1, hex(v) from t for update); +(select k1, hex(v) from t1 where false) union(select k1, hex(v) from t1 for update); +set @@tidb_enable_vectorized_expression = 1; +(select k1, hex(v) from t where false) union(select k1, hex(v) from t for update); +(select k1, hex(v) from t1 where false) union(select k1, hex(v) from t1 for update); + +set @@tidb_enable_vectorized_expression = default; + +# TestIssue24933 +drop table if exists t; +drop view if exists v; +create table t(a int); +insert into t values(1), (2), (3); +create definer='root'@'localhost' view v as select count(*) as c1 from t; +select * from v; +drop view v; +create definer='root'@'localhost' view v as select * from (select count(*) from t) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select avg(a) from t group by a) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select sum(a) from t group by a) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select group_concat(a) from t group by a) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select count(0) as c1 from t) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select count(*) as c1 from t) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select group_concat(a) as `concat(a)` from t group by a) s; +select * from v order by 1; +drop view v; +create definer='root'@'localhost' view v as select * from (select a from t group by a) s; +select * from v order by 1; +-- error 1054 +SELECT `s`.`count(a)` FROM (SELECT COUNT(`a`) FROM `executor__executor`.`t`) AS `s`; +drop view v; +create definer='root'@'localhost' view v as select * from (select count(a) from t) s; +select * from v; +drop table if exists t; +create table t(c1 int); +insert into t values(111), (222), (333); +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select row_number() over (order by c1) from t) s); +select * from v; +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select c1, row_number() over (order by c1) from t) s); +select * from v; +drop view if exists v; +create definer='root'@'localhost' view v as (select * from (select c1 or 0 from t) s); +select * from v; +select `c1 or 0` from v; +drop view v; + +# TestCTEWithIndexLookupJoinDeadLock +drop table if exists t, t1, t2; +create table t (a int(11) default null,b int(11) default null,key b (b),key ba (b)); +create table t1 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b)); +create table t2 (a int(11) default null,b int(11) default null,key idx_ab (a,b),key idx_a (a),key idx_b (b)); + +## It's easy to reproduce this problem in 30 times execution of IndexLookUpJoin. +--disable_result_log +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +with cte as (with cte1 as (select * from t2 use index(idx_ab) where a > 1 and b > 1) select * from cte1) select /*+use_index(t1 idx_ab)*/ * from cte join t1 on t1.a=cte.a; +--enable_result_log + +# TestAdminChecksumOfPartitionedTable +DROP TABLE IF EXISTS admin_checksum_partition_test; +CREATE TABLE admin_checksum_partition_test (a INT) PARTITION BY HASH(a) PARTITIONS 4; +INSERT INTO admin_checksum_partition_test VALUES (1), (2); +## The result is different with TiKV and unistore +--disable_result_log +ADMIN CHECKSUM TABLE admin_checksum_partition_test; +--enable_result_log + +# TestSQLMode +drop table if exists t; +create table t (a tinyint not null); +set sql_mode = 'STRICT_TRANS_TABLES'; +-- error 1364 +insert t values (); +-- error 1264 +insert t values ('1000'); +create table if not exists tdouble (a double(3,2)); +-- error 1264 +insert tdouble values (10.23); +set sql_mode = ''; +insert t values (); +show warnings; +-- error 1048 +insert t values (null); +insert ignore t values (null); +show warnings; +insert t select null; +show warnings; +insert t values (1000); +select * from t order by a; +insert tdouble values (10.23); +select * from tdouble; +set sql_mode = 'STRICT_TRANS_TABLES'; +set @@global.sql_mode = ''; + +connect (conn1, localhost, root,, executor__executor); +drop table if exists t2; +create table t2 (a varchar(3)); +insert t2 values ('abcd'); +select * from t2; +connection default; +disconnect conn1; + +-- error 1406 +insert t2 values ('abcd'); +set sql_mode = default; +set @@global.sql_mode = default; + +# TestTableScan +use information_schema; +## There must be these tables: information_schema, mysql, performance_schema and test. +select count(*)>=4 from schemata; +create database mytest; +use information_schema; +select * from schemata where schema_name = 'mysql'; +select * from schemata where schema_name like 'my%'; +select 1 from tables limit 1; +use executor__executor; + +# TestAddDateBuiltinWithWarnings +set @@sql_mode='NO_ZERO_DATE'; +select date_add('2001-01-00', interval -2 hour); +show warnings; +set @@sql_mode=default; + +# TestStrToDateBuiltinWithWarnings +set @@sql_mode='NO_ZERO_DATE'; +drop table if exists t1; +SELECT STR_TO_DATE('0000-1-01', '%Y-%m-%d'); +show warnings; +SELECT CAST('4#,8?Q' AS DATE); +show warnings; +CREATE TABLE t1 (c1 INT, c2 TEXT); +INSERT INTO t1 VALUES (1833458842, '0.3503490908550797'); +SELECT CAST(t1.c2 AS DATE) FROM t1; +show warnings; +set @@sql_mode=default; + +# TestUnsignedDecimalOverflow +drop table if exists t; +create table t(a decimal(10,2) unsigned); +-- error 1264 +insert into t values (-1); +-- error 1264 +insert into t values ("-1.1e-1"); +-- error 1264 +insert into t values (-1.1); +insert into t values (-0); +set sql_mode=''; +delete from t; +insert into t values (-1); +select a from t limit 1; +set sql_mode=default; + +# TestDoSubquery +drop table if exists t; +create table t(a int); +do 1 in (select * from t); +insert into t values(1); +do 1 in (select * from t); + +# TestCountDistinctJSON +drop table if exists t; +create table t(j JSON); +insert into t values('2010'); +insert into t values('2011'); +insert into t values('2012'); +insert into t values('2010.000'); +insert into t values(cast(18446744073709551615 as JSON)); +insert into t values(cast(18446744073709551616.000000 as JSON)); +select count(distinct j) from t; + +# TestHashJoinJSON +drop table if exists t; +create table t(id int(11), j JSON, d DOUBLE); +insert into t values(0, '2010', 2010); +insert into t values(1, '2011', 2011); +insert into t values(2, '2012', 2012); +insert into t values(3, cast(18446744073709551615 as JSON), 18446744073709551616.000000); +select /*+inl_hash_join(t2)*/ t1.id, t2.id from t t1 join t t2 on t1.j = t2.d; + +# TestPlanReplayerDumpTPCDS +drop table if exists catalog_sales, store_sales, date_dim; +create table catalog_sales +( + cs_sold_date_sk int , + cs_sold_time_sk int , + cs_ship_date_sk int , + cs_bill_customer_sk int , + cs_bill_cdemo_sk int , + cs_bill_hdemo_sk int , + cs_bill_addr_sk int , + cs_ship_customer_sk int , + cs_ship_cdemo_sk int , + cs_ship_hdemo_sk int , + cs_ship_addr_sk int , + cs_call_center_sk int , + cs_catalog_page_sk int , + cs_ship_mode_sk int , + cs_warehouse_sk int , + cs_item_sk int not null, + cs_promo_sk int , + cs_order_number int not null, + cs_quantity int , + cs_wholesale_cost decimal(7,2) , + cs_list_price decimal(7,2) , + cs_sales_price decimal(7,2) , + cs_ext_discount_amt decimal(7,2) , + cs_ext_sales_price decimal(7,2) , + cs_ext_wholesale_cost decimal(7,2) , + cs_ext_list_price decimal(7,2) , + cs_ext_tax decimal(7,2) , + cs_coupon_amt decimal(7,2) , + cs_ext_ship_cost decimal(7,2) , + cs_net_paid decimal(7,2) , + cs_net_paid_inc_tax decimal(7,2) , + cs_net_paid_inc_ship decimal(7,2) , + cs_net_paid_inc_ship_tax decimal(7,2) , + cs_net_profit decimal(7,2) , + primary key (cs_item_sk, cs_order_number) +); +create table store_sales +( + ss_sold_date_sk int , + ss_sold_time_sk int , + ss_item_sk int not null, + ss_customer_sk int , + ss_cdemo_sk int , + ss_hdemo_sk int , + ss_addr_sk int , + ss_store_sk int , + ss_promo_sk int , + ss_ticket_number int not null, + ss_quantity int , + ss_wholesale_cost decimal(7,2) , + ss_list_price decimal(7,2) , + ss_sales_price decimal(7,2) , + ss_ext_discount_amt decimal(7,2) , + ss_ext_sales_price decimal(7,2) , + ss_ext_wholesale_cost decimal(7,2) , + ss_ext_list_price decimal(7,2) , + ss_ext_tax decimal(7,2) , + ss_coupon_amt decimal(7,2) , + ss_net_paid decimal(7,2) , + ss_net_paid_inc_tax decimal(7,2) , + ss_net_profit decimal(7,2) , + primary key (ss_item_sk, ss_ticket_number) +); +create table date_dim +( + d_date_sk int not null, + d_date_id char(16) not null, + d_date date , + d_month_seq int , + d_week_seq int , + d_quarter_seq int , + d_year int , + d_dow int , + d_moy int , + d_dom int , + d_qoy int , + d_fy_year int , + d_fy_quarter_seq int , + d_fy_week_seq int , + d_day_name char(9) , + d_quarter_name char(6) , + d_holiday char(1) , + d_weekend char(1) , + d_following_holiday char(1) , + d_first_dom int , + d_last_dom int , + d_same_day_ly int , + d_same_day_lq int , + d_current_day char(1) , + d_current_week char(1) , + d_current_month char(1) , + d_current_quarter char(1) , + d_current_year char(1) , + primary key (d_date_sk) +); +--disable_result_log +plan replayer dump explain with ssci as ( +select ss_customer_sk customer_sk + ,ss_item_sk item_sk +from store_sales,date_dim +where ss_sold_date_sk = d_date_sk + and d_month_seq between 1212 and 1212 + 11 +group by ss_customer_sk + ,ss_item_sk), +csci as( + select cs_bill_customer_sk customer_sk + ,cs_item_sk item_sk +from catalog_sales,date_dim +where cs_sold_date_sk = d_date_sk + and d_month_seq between 1212 and 1212 + 11 +group by cs_bill_customer_sk + ,cs_item_sk) + select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 else 0 end) store_only + ,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 else 0 end) catalog_only + ,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then 1 else 0 end) store_and_catalog +from ssci left join csci on (ssci.customer_sk=csci.customer_sk + and ssci.item_sk = csci.item_sk) +UNION + select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 else 0 end) store_only + ,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 else 0 end) catalog_only + ,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then 1 else 0 end) store_and_catalog +from ssci right join csci on (ssci.customer_sk=csci.customer_sk + and ssci.item_sk = csci.item_sk) +limit 100; +--enable_result_log + +# TestBDRRole +admin show bdr role; +admin set bdr role primary; +admin show bdr role; +admin set bdr role secondary; +admin show bdr role; +admin unset bdr role; +admin show bdr role; +--error 1064 +admin set bdr role test_err; +admin show bdr role; +admin unset bdr role; + +# TestCompileOutOfMemoryQuota +# Test for issue: https://github.com/pingcap/tidb/issues/38322 +set global tidb_mem_oom_action='CANCEL'; +drop table if exists t, t1; +create table t(a int, b int, index idx(a)); +create table t1(a int, c int, index idx(a)); +set tidb_mem_quota_query=10; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +select t.a, t1.a from t use index(idx), t1 use index(idx) where t.a = t1.a; +set global tidb_mem_oom_action=default; +set tidb_mem_quota_query=default; + +# TestOOMPanicAction +drop table if exists t, t1; +create table t (a int primary key, b double); +insert into t values (1,1); +SET GLOBAL tidb_mem_oom_action='CANCEL'; +set @@tidb_mem_quota_query=1; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +select sum(b) from t group by a; + +## Test insert from select oom panic. +drop table if exists t,t1; +create table t (a bigint); +create table t1 (a bigint); +set @@tidb_mem_quota_query=200; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +insert into t1 values (1),(2),(3),(4),(5); +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +replace into t1 values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=10000; +insert into t1 values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=10; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +insert into t select a from t1 order by a desc; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +replace into t select a from t1 order by a desc; +set @@tidb_mem_quota_query=10000; +insert into t values (1),(2),(3),(4),(5); +## Set the memory quota to 244 to make this SQL panic during the DeleteExec +## instead of the TableReaderExec. +set @@tidb_mem_quota_query=244; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +delete from t; +set @@tidb_mem_quota_query=10000; +delete from t1; +insert into t1 values(1); +insert into t values (1),(2),(3),(4),(5); +set @@tidb_mem_quota_query=244; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +delete t, t1 from t join t1 on t.a = t1.a; +set @@tidb_mem_quota_query=100000; +truncate table t; +insert into t values(1),(2),(3); +## set the memory to quota to make the SQL panic during UpdateExec instead +## of TableReader. +set @@tidb_mem_quota_query=244; +-- replace_regex /conn=[-0-9]+/conn=/ +-- error 8175 +update t set a = 4; + +SET GLOBAL tidb_mem_oom_action = DEFAULT; +set @@tidb_mem_quota_query=DEFAULT; + +# TestTrackAggMemoryUsage +drop table if exists t; +create table t(a int); +insert into t values(1); +set tidb_track_aggregate_memory_usage = off; + +--replace_column 5 6 +--replace_regex /[0-9]+ Bytes/ Bytes/ /[.0-9]+ KB/ KB/ +explain analyze select /*+ HASH_AGG() */ sum(a) from t; + +--replace_column 5 6 +--replace_regex /[0-9]+ Bytes/ Bytes/ /[.0-9]+ KB/ KB/ +explain analyze select /*+ STREAM_AGG() */ sum(a) from t; + +set tidb_track_aggregate_memory_usage = on; + +--replace_column 5 6 +--replace_regex /[0-9]+ Bytes/ Bytes/ /[.0-9]+ KB/ KB/ +explain analyze select /*+ HASH_AGG() */ sum(a) from t; + +--replace_column 5 6 +--replace_regex /[0-9]+ Bytes/ Bytes/ /[.0-9]+ KB/ KB/ +explain analyze select /*+ STREAM_AGG() */ sum(a) from t; + +set tidb_track_aggregate_memory_usage = default; + +# TestBind +drop table if exists testbind; +create table testbind(i int, s varchar(20)); +create index index_t on testbind(i,s); +create global binding for select * from testbind using select * from testbind use index for join(index_t); +--replace_column 5 6 +show global bindings where default_db='executor__executor'; +create session binding for select * from testbind using select * from testbind use index for join(index_t); +--replace_column 5 6 +show session bindings where default_db='executor__executor'; + +drop session binding for select * from testbind using select * from testbind use index for join(index_t); +drop global binding for select * from testbind using select * from testbind use index for join(index_t); + +# TestIndexMergeRuntimeStats +drop table if EXISTS t1; +create table t1(id int primary key, a int, b int, c int, d int, index t1a(a), index t1b(b)); +insert into t1 values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3),(4,4,4,4,4),(5,5,5,5,5); +--replace_regex /.*time:.*loops:.*cop_task:.*/.*time:.*loops:.*cop_task:.*/ /.*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.*/.*time:.*loops:.*index_task:{fetch_handle:.*, merge:.*}.*table_task:{num.*concurrency.*fetch_row.*wait_time.*}.*/ /[0-9]+ Bytes/ Bytes/ /[.0-9]+ KB/ KB/ +explain analyze select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4; +set @@tidb_enable_collect_execution_info=0; +select /*+ use_index_merge(t1, primary, t1a) */ * from t1 where id < 2 or a > 4 order by a; +set @@tidb_enable_collect_execution_info=default; + +# TestIndexLookupRuntimeStats +drop table if exists t1; +create table t1 (a int, b int, index(a)); +insert into t1 values (1,2),(2,3),(3,4); +--replace_regex /.*time:.*loops:.*index_task:.*table_task: {total_time.*num.*concurrency.*}.*/.*time:.*loops:.*index_task:.*table_task: {total_time.*num.*concurrency.*}.*/ /.*time:.*loops:.*cop_task:.*/.*time:.*loops:.*cop_task:.*/ /[.0-9]+ KB/ KB/ /[0-9]+ Bytes/ Bytes/ +explain analyze select * from t1 use index(a) where a > 1; + +# TestHashAggRuntimeStats +drop table if exists t1; +create table t1 (a int, b int); +insert into t1 values (1,2),(2,3),(3,4); +--replace_regex /.*time:.*loops:.*partial_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*final_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*/.*time:.*loops:.*partial_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*final_worker:{wall_time:.*concurrency:.*task_num:.*tot_wait:.*tot_exec:.*tot_time:.*max:.*p95:.*}.*/ /time:.*loops:.*cop_task.*/time.*loops.*cop_task.*/ /tikv_task:.*/tikv_task:.*/ /[.0-9]+ KB/ KB/ /[.0-9]+ Bytes/ Bytes/ +explain analyze SELECT /*+ HASH_AGG() */ count(*) FROM t1 WHERE a < 10; + +# TestSelectForUpdate +set global tidb_txn_mode=''; +drop table if exists t, t1; +create table t (c1 int, c2 int, c3 int); +insert t values (11, 2, 3); +insert t values (12, 2, 3); +insert t values (13, 2, 3); +create table t1 (c1 int); +insert t1 values (11); + +connect (conn1, localhost, root,, executor__executor); +begin; +select * from t where c1=11 for update; + +connect (conn2, localhost, root,, executor__executor); +begin; +update t set c2=211 where c1=11; +commit; + +connection conn1; +--replace_regex /txnStartTS.*reason/ reason/ +--error 9007 +commit; + +begin; +select * from t where exists(select null from t1 where t1.c1=t.c1) for update; + +connection conn2; +begin; +update t set c2=211 where c1=12; +commit; + +connection conn1; +commit; + +begin; +select * from t where c1=11 for update; + +connection conn2; +begin; +update t set c2=22 where c1=12; +commit; + +connection conn1; +commit; + +set @@autocommit=1; +select * from t where c1=11 for update; + +connection conn2; +begin; +update t set c2=211 where c1=11; +commit; + +connection conn1; +commit; + +begin; +--sorted_result +select * from (select * from t for update) t join t1 for update; + +connection conn2; +begin; +update t1 set c1 = 13; +commit; + +connection conn1; +--replace_regex /txnStartTS.*reason/ reason/ +--error 9007 +commit; + +disconnect conn1; +disconnect conn2; +set global tidb_txn_mode=pessimistic; + +# TestSelectForUpdateOf +drop table if exists t, t1; +create table t (i int); +create table t1 (i int); +insert t values (1); +insert t1 values (1); +begin pessimistic; +select * from t, t1 where t.i = t1.i for update of t; + +connect (conn1, localhost, root,, executor__executor); +begin pessimistic; +select * from t1 for update; +--error 3572 +select * from t for update nowait; + +connection default; +rollback; + +connection conn1; +select * from t for update nowait; +rollback; +disconnect conn1; + +# TestForSelectScopeInUnion +set session tidb_txn_mode=''; +# A union B for update, the "for update" option belongs to union statement, so +# it should works on both A and B. +drop table if exists t; +create table t(a int); +insert into t values (1); +begin; +select 1 as a union select a from t for update; + +connect (conn1, localhost, root,, executor__executor); +set session tidb_txn_mode=''; +update t set a = a + 1; + +connection default; +## As tk1 use select 'for update', it should detect conflict and fail. +--replace_regex /txnStartTS.*reason/ reason/ +--error 9007 +commit; + +begin; +--sorted_result +select 1 as a union select a from t limit 5 for update; +select 1 as a union select a from t order by a for update; + +connection conn1; +update t set a = a + 1; + +connection default; +--replace_regex /txnStartTS.*reason/ reason/ +--error 9007 +commit; + +disconnect conn1; +set session tidb_txn_mode=pessimistic; + +# TestAdminShowDDLJobsRowCount +# https://github.com/pingcap/tidb/issues/25968 +drop table if exists t; +create table t (id bigint key,b int); +split table t by (10),(20),(30); +insert into t values (0,0),(10,10),(20,20),(30,30); +alter table t add index idx1(b); +--replace_column 1 4 6 7 9 10 11 +admin show ddl jobs 1; + +insert into t values (1,0),(2,10),(3,20),(4,30); +alter table t add index idx2(b); +--replace_column 1 4 6 7 9 10 11 +admin show ddl jobs 1; + +# TestSummaryFailedUpdate +drop table if exists t; +create table t(a int, b int as(-a)); +insert into t(a) values(1), (3), (7); +SET GLOBAL tidb_mem_oom_action='CANCEL'; +set @@tidb_mem_quota_query=1; +--replace_regex /conn=[-0-9]+/conn=/ +--error 8175 +update t set t.a = t.a - 1 where t.a in (select a from t where a < 4); +set @@tidb_mem_quota_query=1000000000; +select stmt_type from information_schema.statements_summary where digest_text = 'update `t` set `t` . `a` = `t` . `a` - ? where `t` . `a` in ( select `a` from `t` where `a` < ? )'; + +set @@tidb_mem_quota_query=default; +set global tidb_mem_oom_action=default; + + +# TestTableLockPrivilege +drop table if exists t; +drop user if exists 'testuser'@'localhost'; +create table t(a int); +create user 'testuser'@'localhost'; + +connect (conn1, localhost, testuser,,); +--error 1044 +LOCK TABLE executor__executor.t WRITE; + +connection default; +GRANT LOCK TABLES ON executor__executor.* to 'testuser'@'localhost'; + +connection conn1; +--error 1142 +LOCK TABLE executor__executor.t WRITE; + +connection default; +REVOKE ALL ON executor__executor.* FROM 'testuser'@'localhost'; +GRANT SELECT ON executor__executor.* to 'testuser'@'localhost'; + +connection conn1; +--error 1044 +LOCK TABLE executor__executor.t WRITE; + +connection default; +GRANT LOCK TABLES ON executor__executor.* to 'testuser'@'localhost'; + +connection conn1; +LOCK TABLE executor__executor.t WRITE; + +connection default; +drop database if exists test2; +create database test2; +create table test2.t2(a int); + +connection conn1; +--error 1044 +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; + +connection default; +GRANT LOCK TABLES ON test2.* to 'testuser'@'localhost'; + +connection conn1; +--error 1142 +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; + +connection default; +GRANT SELECT ON test2.* to 'testuser'@'localhost'; + +connection conn1; +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; + +connection default; +--replace_regex /server: .*session: .*/server: session: / +--error 8020 +LOCK TABLE executor__executor.t WRITE, test2.t2 WRITE; + +connection conn1; +unlock tables; + +disconnect conn1; +unlock tables; +drop user 'testuser'@'localhost'; diff --git a/tests/integrationtest/t/executor/insert.test b/tests/integrationtest/t/executor/insert.test new file mode 100644 index 0000000000000..07cf8bcd82c8a --- /dev/null +++ b/tests/integrationtest/t/executor/insert.test @@ -0,0 +1,1646 @@ +# TestClusterIndexInsertOnDuplicateKey +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(a char(20), b int, primary key(a)); +insert into t values('aa', 1), ('bb', 1); +-- error 1062 +insert into t values('aa', 2); +drop table t; +create table t(a char(20), b varchar(30), c varchar(10), primary key(a, b, c)); +insert into t values ('a', 'b', 'c'), ('b', 'a', 'c'); +-- error 1062 +insert into t values ('a', 'b', 'c'); +set tidb_enable_clustered_index = default; + +# TestPaddingCommonHandle +set tidb_enable_clustered_index = on; +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1; +set tidb_enable_clustered_index = default; + +# TestInsertReorgDelete +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2004'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = 2004; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 bit); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal(6,4)); +insert into t1 set c1 = '1.1'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal); +insert into t1 set c1 = 1.1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 numeric); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 float); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.3; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 real); +insert into t1 set c1 = 1.4; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 date); +insert into t1 set c1 = '2020-01-01'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 time); +insert into t1 set c1 = '20:00:00'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 datetime); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 timestamp); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2020'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 char(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varchar(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 binary(3)); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varbinary(3)); +insert into t1 set c1 = 'b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 blob); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 text); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 enum('a', 'b')); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 set('a', 'b')); +insert into t1 set c1 = 'a,b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +# TestUpdateDuplicateKey +drop table if exists c; +create table c(i int,j int,k int,primary key(i,j,k)); +insert into c values(1,2,3); +insert into c values(1,2,4); +-- error 1062 +update c set i=1,j=2,k=4 where i=1 and j=2 and k=3; + +# TestIssue37187 +drop table if exists t1, t2; +create table t1 (a int(11) ,b varchar(100) ,primary key (a)); +create table t2 (c int(11) ,d varchar(100) ,primary key (c)); +prepare in1 from 'insert into t1 (a,b) select c,null from t2 t on duplicate key update b=t.d'; +execute in1; + +# TestInsertWrongValueForField +drop table if exists t1; +create table t1(a bigint); +-- error 1366 +insert into t1 values("asfasdfsajhlkhlksdaf"); +drop table if exists t1; +create table t1(a varchar(10)) charset ascii; +-- error 1366 +insert into t1 values('我'); +drop table if exists t1; +create table t1(a char(10) charset utf8); +insert into t1 values('我'); +alter table t1 add column b char(10) charset ascii as ((a)); +select * from t1; +drop table if exists t; +create table t (a year); +-- error 1264 +insert into t values(2156); +DROP TABLE IF EXISTS ts; +CREATE TABLE ts (id int DEFAULT NULL, time1 TIMESTAMP NULL DEFAULT NULL); +SET @@sql_mode=''; +INSERT INTO ts (id, time1) VALUES (1, TIMESTAMP '1018-12-23 00:00:00'); +SHOW WARNINGS; +SELECT * FROM ts ORDER BY id; +SET @@sql_mode='STRICT_TRANS_TABLES'; +-- error 1292 +INSERT INTO ts (id, time1) VALUES (2, TIMESTAMP '1018-12-24 00:00:00'); +DROP TABLE ts; +CREATE TABLE t0(c0 SMALLINT AUTO_INCREMENT PRIMARY KEY); +INSERT IGNORE INTO t0(c0) VALUES (194626268); +INSERT IGNORE INTO t0(c0) VALUES ('*'); +SHOW WARNINGS; +SET @@sql_mode=default; + +# TestInsertValueForCastDecimalField +drop table if exists t1; +create table t1(a decimal(15,2)); +insert into t1 values (1111111111111.01); +select * from t1; +select cast(a as decimal) from t1; + +# TestInsertForMultiValuedIndex +drop table if exists t1; +create table t1(a json, b int, unique index idx((cast(a as signed array)))); +insert into t1 values ('[1,11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +-- error 1062 +insert into t1 values ('[2, 222]', 2); +replace into t1 values ('[1, 10]', 10); +select * from t1; +replace into t1 values ('[1, 2]', 1); +select * from t1; +replace into t1 values ('[1, 11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +insert ignore into t1 values ('[1]', 2); +select * from t1; +insert ignore into t1 values ('[1, 2]', 2); +select * from t1; +insert into t1 values ('[2]', 2) on duplicate key update b = 10; +select * from t1; +-- error 1062 +insert into t1 values ('[2, 1]', 2) on duplicate key update a = '[1,2]'; +-- error 1062 +insert into t1 values ('[1,2]', 2) on duplicate key update a = '[1,2]'; +-- error 1062 +insert into t1 values ('[11, 22]', 2) on duplicate key update a = '[1,2]'; + +# TestInsertDateTimeWithTimeZone +set time_zone="+09:00"; +drop table if exists t; +create table t (id int, c1 datetime not null default CURRENT_TIMESTAMP); +set TIMESTAMP = 1234; +insert t (id) values (1); +select * from t; +drop table if exists t; +create table t (dt datetime); +set @@time_zone='+08:00'; +delete from t; +insert into t values ('2020-10-22'); +select * from t; +delete from t; +insert into t values ('2020-10-22-16'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16-31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16:31-15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16:31:15-10'); +select * from t; +delete from t; +insert into t values ('2020.10-22'); +select * from t; +delete from t; +insert into t values ('2020-10.22-16'); +select * from t; +delete from t; +insert into t values ('2020-10-22.16-31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16.31-15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16.31.15+14'); +select * from t; +delete from t; +insert into t values ('2020-10:22'); +select * from t; +delete from t; +insert into t values ('2020-10-22:16'); +select * from t; +delete from t; +insert into t values ('2020-10-22-16:31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16-31:15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16.31.15+09:30'); +select * from t; +delete from t; +insert into t values ('2020.10-22:16'); +select * from t; +delete from t; +insert into t values ('2020-10.22-16:31'); +select * from t; +delete from t; +insert into t values ('2020-10-22.16-31:15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16:31.15+09:30'); +select * from t; +drop table if exists t; +create table t (dt datetime, ts timestamp); +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+00:00'; +select * from t; +delete from t; +set @@time_zone='-08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+08:00'; +select * from t; +delete from t; +set @@time_zone='-03:00'; +insert into t values ('2020-10-22T16:53:40+03:00', '2020-10-22T16:53:40+03:00'); +set @@time_zone='+08:00'; +select * from t; +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40+08:00', '2020-10-22T16:53:40+08:00'); +set @@time_zone='+08:00'; +select * from t; +drop table if exists t; +create table t (ts timestamp); +insert into t values ('2020-10-22T12:00:00Z'), ('2020-10-22T13:00:00Z'), ('2020-10-22T14:00:00Z'); +select count(*) from t where ts > '2020-10-22T12:00:00Z'; +set @@time_zone='+08:00'; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10+00:00', '2020-10-27T14:39:10.10+00:00'); +select * from t; +drop table if exists t; +create table t (dt datetime(1), ts timestamp(1)); +insert into t values ('2020-10-27T14:39:10.3+0200', '2020-10-27T14:39:10.3+0200'); +select * from t; +drop table if exists t; +create table t (dt datetime(6), ts timestamp(6)); +insert into t values ('2020-10-27T14:39:10.3-02', '2020-10-27T14:39:10.3-02'); +select * from t; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10Z', '2020-10-27T14:39:10.10Z'); +select * from t; +set time_zone=default; +set timestamp=default; + +# TestInsertZeroYear +drop table if exists t1; +create table t1(a year(4)); +insert into t1 values(0000),(00),("0000"),("000"), ("00"), ("0"), (79), ("79"); +select * from t1; +drop table if exists t; +create table t(f_year year NOT NULL DEFAULT '0000')ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(); +select * from t; +insert into t values('0000'); +select * from t; + +# TestAllowInvalidDates +drop table if exists t1, t2, t3, t4; +create table t1(d date); +create table t2(d datetime); +create table t3(d date); +create table t4(d datetime); +set sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +select year(d), month(d), day(d) from t2; +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; + +truncate t1;truncate t2;truncate t3;truncate t4; +set sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +select year(d), month(d), day(d) from t2; +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +set sql_mode=default; + +# TestPartitionInsertOnDuplicate +drop table if exists t1, t2, t3; +create table t1 (a int,b int,primary key(a,b)) partition by range(a) (partition p0 values less than (100),partition p1 values less than (1000)); +insert into t1 set a=1, b=1; +insert into t1 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t1; +create table t2 (a int,b int,primary key(a,b)) partition by hash(a) partitions 4; +insert into t2 set a=1,b=1; +insert into t2 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t2; +CREATE TABLE t3 (a int, b int, c int, d int, e int, + PRIMARY KEY (a,b), + UNIQUE KEY (b,c,d) +) PARTITION BY RANGE ( b ) ( + PARTITION p0 VALUES LESS THAN (4), + PARTITION p1 VALUES LESS THAN (7), + PARTITION p2 VALUES LESS THAN (11) +); +insert into t3 values (1,2,3,4,5); +insert into t3 values (1,2,3,4,5),(6,2,3,4,6) on duplicate key update e = e + values(e); +select * from t3; + +# TestBit +drop table if exists t1; +create table t1 (a bit(3)); +-- error 1406 +insert into t1 values(-1); +-- error 1406 +insert into t1 values(9); +create table t64 (a bit(64)); +insert into t64 values(-1); +insert into t64 values(18446744073709551615); +-- error 1264 +insert into t64 values(18446744073709551616); + +# TestJiraIssue5366 +drop table if exists bug; +create table bug (a varchar(100)); +insert into bug select ifnull(JSON_UNQUOTE(JSON_EXTRACT('[{"amount":2000,"feeAmount":0,"merchantNo":"20190430140319679394","shareBizCode":"20160311162_SECOND"}]', '$[0].merchantNo')),'') merchant_no union SELECT '20180531557' merchant_no; +--sorted_result +select * from bug; + +# TestDMLCast +drop table if exists t; +create table t (a int, b double); +insert into t values (ifnull('',0)+0, 0); +insert into t values (0, ifnull('',0)+0); +select * from t; +-- error 1366 +insert into t values ('', 0); +-- error 1366 +insert into t values (0, ''); +-- error 1292 +update t set a = ''; +-- error 1292 +update t set b = ''; +update t set a = ifnull('',0)+0; +update t set b = ifnull('',0)+0; +delete from t where a = ''; +select * from t; + +# TestInsertFloatOverflow +drop table if exists t,t1; +create table t(col1 FLOAT, col2 FLOAT(10,2), col3 DOUBLE, col4 DOUBLE(10,2), col5 DECIMAL, col6 DECIMAL(10,2)); +-- error 1264 +insert into t values (-3.402823466E+68, -34028234.6611, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +-- error 1264 +insert into t values (-34028234.6611, -3.402823466E+68, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +create table t1(id1 float,id2 float); +insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999); +select @@warning_count; +select convert(id1,decimal(65)),convert(id2,decimal(65)) from t1; + +# TestTextTooLongError +# Fix https://github.com/pingcap/tidb/issues/32601 +set sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_ALL_TABLES,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +# For max_allowed_packet default value is big enough to ensure tinytext, text can test correctly +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +drop table if exists t1; +CREATE TABLE t1(c1 mediumtext); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 8777215)); +# For long text, max_allowed_packet default value can not allow 4GB package, skip the test case. +# Set non strict sql_mode, we are not supposed to raise an error but to truncate the value. +set sql_mode = 'ONLY_FULL_GROUP_BY,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +select length(c1) from t1; +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +select length(c1) from t1; +# For mediumtext or bigger size, for tikv limit, we will get:ERROR 8025 (HY000): entry too large, the max entry size is 6291456, the size of data is 16777247, no need to test. +set sql_mode = default; + +# TestAutoRandomIDExplicit +set @@allow_auto_random_explicit_insert = true; +drop table if exists ar; +create table ar (id bigint key clustered auto_random, name char(10)); +insert into ar(id) values (1); +select id from ar; +select last_insert_id(); +delete from ar; +insert into ar(id) values (1), (2); +select id from ar; +select last_insert_id(); +delete from ar; +drop table ar; +set @@allow_auto_random_explicit_insert = default; + +# TestInsertErrorMsg +drop table if exists t, t1; +create table t (a int primary key, b datetime, d date); +-- error 1292 +insert into t values (1, '2019-02-11 30:00:00', '2019-01-31'); +CREATE TABLE t1 (a BINARY(16) PRIMARY KEY); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +-- error 1062 +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +-- error 1062 +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +drop table if exists t1; +create table t1 (a bit primary key) engine=innodb; +insert into t1 values (b'0'); +-- error 1062 +insert into t1 values (b'0'); + +# TestIssue16366 +drop table if exists t; +create table t(c numeric primary key); +insert ignore into t values(null); +-- error 1062 +insert into t values(0); + +# TestClusterPrimaryTablePlainInsert +set tidb_enable_clustered_index = on; +drop table if exists t1pk; +create table t1pk(id varchar(200) primary key, v int); +insert into t1pk(id, v) values('abc', 1); +select * from t1pk; +set @@tidb_constraint_check_in_place=true; +-- error 1062 +insert into t1pk(id, v) values('abc', 2); +set @@tidb_constraint_check_in_place=false; +-- error 1062 +insert into t1pk(id, v) values('abc', 3); +select v, id from t1pk; +select id from t1pk where id = 'abc'; +select v, id from t1pk where id = 'abc'; +drop table if exists t3pk; +create table t3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 1); +select * from t3pk; +set @@tidb_constraint_check_in_place=true; +-- error 1062 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 2); +set @@tidb_constraint_check_in_place=false; +-- error 1062 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 3); +select v, id3, id2, id1 from t3pk; +select id3, id2, id1 from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +select id3, id2, id1, v from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 101, 1); +insert into t3pk(id1, id2, id3, v) values('abc', 'zzz', 101, 1); +drop table if exists t1pku; +create table t1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into t1pku(id, uk, v) values('abc', 1, 2); +select * from t1pku where id = 'abc'; +-- error 1062 +insert into t1pku(id, uk, v) values('aaa', 1, 3); +select * from t1pku; +select * from t3pk where (id1, id2, id3) in (('abc', 'xyz', 100), ('abc', 'xyz', 101), ('abc', 'zzz', 101)); +set @@tidb_constraint_check_in_place=default; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryTableInsertIgnore +set tidb_enable_clustered_index = on; +drop table if exists it1pk; +create table it1pk(id varchar(200) primary key, v int); +insert into it1pk(id, v) values('abc', 1); +insert ignore into it1pk(id, v) values('abc', 2); +select * from it1pk where id = 'abc'; +drop table if exists it2pk; +create table it2pk(id1 varchar(200), id2 varchar(200), v int, primary key(id1, id2)); +insert into it2pk(id1, id2, v) values('abc', 'cba', 1); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +insert ignore into it2pk(id1, id2, v) values('abc', 'cba', 2); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +drop table if exists it1pku; +create table it1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into it1pku(id, uk, v) values('abc', 1, 2); +select * from it1pku where id = 'abc'; +insert ignore into it1pku(id, uk, v) values('aaa', 1, 3), ('bbb', 2, 1); +select * from it1pku; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryTableInsertDuplicate +set tidb_enable_clustered_index = on; +drop table if exists dt1pi; +create table dt1pi(id varchar(200) primary key, v int); +insert into dt1pi(id, v) values('abb', 1),('acc', 2); +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1; +select * from dt1pi; +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1pi; +drop table if exists dt1piu; +create table dt1piu(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +insert into dt1piu(id, uk, v) values('abb', 1, 10),('acc', 2, 20); +insert into dt1piu(id, uk, v) values('xyz', 1, 100) on duplicate key update v = v + 1; +select * from dt1piu; +insert into dt1piu(id, uk, v) values('abb', 1, 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1piu; +drop table if exists ts1pk; +create table ts1pk(id1 timestamp, id2 timestamp, v int, primary key(id1, id2)); +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 1); +select id1, id2, v from ts1pk; +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v); +select id1, id2, v from ts1pk; +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v), id1 = '2018-01-01 11:11:12'; +select id1, id2, v from ts1pk; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryKeyForIndexScan +set tidb_enable_clustered_index = on; +drop table if exists pkt1; +CREATE TABLE pkt1 (a varchar(255), b int, index idx(b), primary key(a,b)); +insert into pkt1 values ('aaa',1); +select b from pkt1 where b = 1; +drop table if exists pkt2; +CREATE TABLE pkt2 (a varchar(255), b int, unique index idx(b), primary key(a,b)); +insert into pkt2 values ('aaa',1); +select b from pkt2 where b = 1; +drop table if exists issue_18232; +create table issue_18232 (a int, b int, c int, d int, primary key (a, b), index idx(c)); +select a from issue_18232 use index (idx); +select b from issue_18232 use index (idx); +select a,b from issue_18232 use index (idx); +select c from issue_18232 use index (idx); +select a,c from issue_18232 use index (idx); +select b,c from issue_18232 use index (idx); +select a,b,c from issue_18232 use index (idx); +select d from issue_18232 use index (idx); +select a,d from issue_18232 use index (idx); +select b,d from issue_18232 use index (idx); +select a,b,d from issue_18232 use index (idx); +select c,d from issue_18232 use index (idx); +select a,c,d from issue_18232 use index (idx); +select b,c,d from issue_18232 use index (idx); +select a,b,c,d from issue_18232 use index (idx); +set tidb_enable_clustered_index = default; + +# TestIssue20768 +drop table if exists t1, t2; +create table t1(a year, primary key(a)); +insert ignore into t1 values(null); +create table t2(a int, key(a)); +insert into t2 values(0); +select /*+ hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_join(t2) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; + +# TestIssue10402 +drop table if exists vctt; +create table vctt (v varchar(4), c char(4)); +insert into vctt values ('ab ', 'ab '); +select * from vctt; +delete from vctt; +insert into vctt values ('ab\n\n\n', 'ab\n\n\n'), ('ab\t\t\t', 'ab\t\t\t'), ('ab ', 'ab '), ('ab\r\r\r', 'ab\r\r\r'); +show warnings; +select * from vctt; +select length(v), length(c) from vctt; + +# TestDuplicatedEntryErr +# See https://github.com/pingcap/tidb/issues/24582 +drop table if exists t1; +create table t1(a int, b varchar(20), primary key(a,b(3)) clustered); +insert into t1 values(1,'aaaaa'); +-- error 1062 +insert into t1 values(1,'aaaaa'); +-- error 1062 +insert into t1 select 1, 'aaa'; +insert into t1 select 1, 'bb'; +-- error 1062 +insert into t1 select 1, 'bb'; + +# TestBinaryLiteralInsertToEnum +drop table if exists bintest; +create table bintest (h enum(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; + +# TestBinaryLiteralInsertToSet +drop table if exists bintest; +create table bintest (h set(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; + +# TestGlobalTempTableAutoInc +drop table if exists temp_test; +create global temporary table temp_test(id int primary key auto_increment) on commit delete rows; + +## Data is cleared after transaction auto commits. +insert into temp_test(id) values(0); +select * from temp_test; + +## Data is not cleared inside a transaction. +begin; +insert into temp_test(id) values(0); +select * from temp_test; +commit; + +## AutoID allocator is cleared. +begin; +insert into temp_test(id) values(0); +select * from temp_test; +## Test whether auto-inc is incremental +insert into temp_test(id) values(0); +select id from temp_test order by id; +commit; + +## multi-value insert +begin; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +commit; + +## rebase +begin; +insert into temp_test(id) values(10); +insert into temp_test(id) values(0); +select id from temp_test order by id; +insert into temp_test(id) values(20), (30); +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +commit; +drop table if exists temp_test; + +# TestGlobalTempTableRowID +drop table if exists temp_test; +create global temporary table temp_test(id int) on commit delete rows; + +## Data is cleared after transaction auto commits. +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; + +## Data is not cleared inside a transaction. +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +commit; + +## AutoID allocator is cleared. +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +## Test whether row id is incremental +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test order by _tidb_rowid; +commit; + +## multi-value insert +begin; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +commit; +drop table if exists temp_test; + +# TestIssue26762 +drop table if exists t1; +create table t1(c1 date); +-- error 1292 +insert into t1 values('2020-02-31'); +set @@sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values('2020-02-31'); +select * from t1; +set @@sql_mode='STRICT_TRANS_TABLES'; +-- error 1292 +insert into t1 values('2020-02-31'); +set sql_mode=default; + +# TestStringtoDecimal +drop table if exists t; +create table t (id decimal(10)); +-- error 1366 +insert into t values('1sdf'); +-- error 1366 +insert into t values('1edf'); +-- error 1366 +insert into t values('12Ea'); +-- error 1366 +insert into t values('1E'); +-- error 1366 +insert into t values('1e'); +-- error 1366 +insert into t values('1.2A'); +-- error 1366 +insert into t values('1.2.3.4.5'); +-- error 1366 +insert into t values('1.2.'); +-- error 1366 +insert into t values('1,999.00'); +## TODO: MySQL8.0 reports Note 1265 Data truncated for column 'id' at row 1 +insert into t values('12e-3'); +show warnings; +select id from t; +drop table if exists t; + +# TestReplaceAllocatingAutoID +# https://github.com/pingcap/tidb/issues/29483 +SET sql_mode='NO_ENGINE_SUBSTITUTION'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a tinyint not null auto_increment primary key, b char(20)); +INSERT INTO t1 VALUES (127,'maxvalue'); +## Note that this error is different from MySQL's duplicated primary key error +-- error 1467 +REPLACE INTO t1 VALUES (0,'newmaxvalue'); +set sql_mode=default; + +# TestInsertIntoSelectError +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a INT) ENGINE = InnoDB; +INSERT IGNORE into t1(SELECT SLEEP(NULL)); +SHOW WARNINGS; +INSERT IGNORE into t1(SELECT SLEEP(-1)); +SHOW WARNINGS; +INSERT IGNORE into t1(SELECT SLEEP(1)); +SELECT * FROM t1; +DROP TABLE t1; + +# TestIssue32213 +drop table if exists t1; +create table t1(c1 float); +insert into t1 values(999.99); +select cast(t1.c1 as decimal(4, 1)) from t1; +select cast(t1.c1 as decimal(5, 1)) from t1; +drop table if exists t1; +create table t1(c1 decimal(6, 4)); +insert into t1 values(99.9999); +select cast(t1.c1 as decimal(5, 3)) from t1; +select cast(t1.c1 as decimal(6, 3)) from t1; + +# TestInsertBigScientificNotation +# https://github.com/pingcap/tidb/issues/47787 +drop table if exists t1; +create table t1(id int, a int); +set @@SQL_MODE='STRICT_TRANS_TABLES'; +-- error 1264 +insert into t1 values(1, '1e100'); +-- error 1264 +insert into t1 values(2, '-1e100'); +select id, a from t1; +set @@SQL_MODE=''; +insert into t1 values(1, '1e100'); +show warnings; +insert into t1 values(2, '-1e100'); +show warnings; +select id, a from t1 order by id asc; +set sql_mode=default; + +# TestUnsignedDecimalFloatInsertNegative +# https://github.com/pingcap/tidb/issues/47945 +drop table if exists tf; +create table tf(a float(1, 0) unsigned); +-- error 1264 +insert into tf values('-100'); +set @@sql_mode=''; +insert into tf values('-100'); +select * from tf; +set @@sql_mode=default; + +# TestIssue17745 +drop table if exists tt1; +create table tt1 (c1 decimal(64)); +-- error 1264 +insert into tt1 values(89000000000000000000000000000000000000000000000000000000000000000000000000000000000000000); +-- error 1264 +insert into tt1 values(89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000); +insert ignore into tt1 values(89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000); +show warnings; +select c1 from tt1; +-- error 1264 +update tt1 set c1 = 89123456789012345678901234567890123456789012345678901234567890123456789012345678900000000; +drop table if exists tt1; +-- error 1367 +insert into tt1 values(4556414e723532); +select 888888888888888888888888888888888888888888888888888888888888888888888888888888888888; +show warnings; + +# TestIssue38950 +drop table if exists t; +create table t (id smallint auto_increment primary key); +alter table t add column c1 int default 1; +--enable_info +insert ignore into t(id) values (194626268); +--disable_info +select * from t; +--enable_info +insert ignore into t(id) values ('*') on duplicate key update c1 = 2; +--disable_info +select * from t; + +# TestInsertIgnoreOnDup +drop table if exists t; +create table t (i int not null primary key, j int unique key); +--enable_info +insert into t values (1, 1), (2, 2); +insert ignore into t values(1, 1) on duplicate key update i = 2; +--disable_info +select * from t; +--enable_info +insert ignore into t values(1, 1) on duplicate key update j = 2; +--disable_info +select * from t; + +drop table if exists t2; +create table t2(`col_25` set('Alice','Bob','Charlie','David') NOT NULL,`col_26` date NOT NULL DEFAULT '2016-04-15', PRIMARY KEY (`col_26`) clustered, UNIQUE KEY `idx_9` (`col_25`,`col_26`),UNIQUE KEY `idx_10` (`col_25`)); +insert into t2(col_25, col_26) values('Bob', '1989-03-23'),('Alice', '2023-11-24'), ('Charlie', '2023-12-05'); +insert ignore into t2 (col_25,col_26) values ( 'Bob','1977-11-23' ) on duplicate key update col_25 = 'Alice', col_26 = '2036-12-13'; +show warnings; +select * from t2; + +drop table if exists t4; +create table t4(id int primary key clustered, k int, v int, unique key uk1(k)); +insert into t4 values (1, 10, 100), (3, 30, 300); +insert ignore into t4 (id, k, v) values(1, 0, 0) on duplicate key update id = 2, k = 30; +show warnings; +select * from t4; + +drop table if exists t5; +create table t5(k1 varchar(100), k2 varchar(100), uk1 int, v int, primary key(k1, k2) clustered, unique key ukk1(uk1), unique key ukk2(v)); +insert into t5(k1, k2, uk1, v) values('1', '1', 1, '100'), ('1', '3', 2, '200'); +update ignore t5 set k2 = '2', uk1 = 2 where k1 = '1' and k2 = '1'; +show warnings; +select * from t5; + +drop table if exists t6; +create table t6 (a int, b int, c int, primary key(a, b) clustered, unique key idx_14(b), unique key idx_15(b), unique key idx_16(a, b)); +insert into t6 select 10, 10, 20; +insert ignore into t6 set a = 20, b = 10 on duplicate key update a = 100; +select * from t6; +insert ignore into t6 set a = 200, b= 10 on duplicate key update c = 1000; +select * from t6; + +# TestInsertAutoInc +drop table if exists insert_autoinc_test; +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(c1) values (1), (2); +begin; +select * from insert_autoinc_test; +commit; +begin; +insert into insert_autoinc_test(id, c1) values (5,5); +insert into insert_autoinc_test(c1) values (6); +commit; +begin; +select * from insert_autoinc_test; +commit; +begin; +insert into insert_autoinc_test(id, c1) values (3,3); +commit; +begin; +select * from insert_autoinc_test; +commit; +begin; +insert into insert_autoinc_test(c1) values (7); +commit; +begin; +select * from insert_autoinc_test; +commit; +drop table if exists insert_autoinc_test; + +## issue-962 +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(id, c1) values (0.3, 1); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (-0.3, 2); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (-3.3, 3); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (4.3, 4); +select * from insert_autoinc_test; +insert into insert_autoinc_test(c1) values (5); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (null, 6); +select * from insert_autoinc_test; +drop table if exists insert_autoinc_test; + +## SQL_MODE=NO_AUTO_VALUE_ON_ZERO +create table insert_autoinc_test (id int primary key auto_increment, c1 int); +insert into insert_autoinc_test(id, c1) values (5, 1); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (0, 2); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (0, 3); +select * from insert_autoinc_test; +set SQL_MODE=NO_AUTO_VALUE_ON_ZERO; +insert into insert_autoinc_test(id, c1) values (0, 4); +select * from insert_autoinc_test; +-- error 1062 +insert into insert_autoinc_test(id, c1) values (0, 5); +insert into insert_autoinc_test(c1) values (6); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (null, 7); +select * from insert_autoinc_test; +set SQL_MODE=''; +insert into insert_autoinc_test(id, c1) values (0, 8); +select * from insert_autoinc_test; +insert into insert_autoinc_test(id, c1) values (null, 9); +select * from insert_autoinc_test; +set sql_mode = default; + +# TestInsert +drop table if exists insert_test; +create table insert_test (id int PRIMARY KEY AUTO_INCREMENT, c1 int, c2 int, c3 int default 1); +--enable_info +insert insert_test (c1) values (1),(2),(NULL); +--disable_info +begin; +-- error 1136 +insert insert_test (c1) values (); +rollback; +begin; +-- error 1136 +insert insert_test (c1, c2) values (1,2),(1); +rollback; +begin; +-- error 1054 +insert insert_test (xxx) values (3); +rollback; +begin; +-- error 1146 +insert insert_test_xxx (c1) values (); +rollback; +--enable_info +insert insert_test set c1 = 3; +--disable_info +begin; +-- error 1110 +insert insert_test set c1 = 4, c1 = 5; +rollback; +begin; +-- error 1054 +insert insert_test set xxx = 6; +rollback; + +drop table if exists insert_test_1, insert_test_2; +create table insert_test_1 (id int, c1 int); +--enable_info +insert insert_test_1 select id, c1 from insert_test; +--disable_info +create table insert_test_2 (id int, c1 int); +--enable_info +insert insert_test_1 select id, c1 from insert_test union select id * 10, c1 * 10 from insert_test; +--disable_info +begin; +-- error 1136 +insert insert_test_1 select c1 from insert_test; +rollback; +begin; +-- error 1136 +insert insert_test_1 values(default, default, default, default, default); +rollback; +select * from insert_test where id = 1; +--enable_info +insert into insert_test (id, c3) values (1, 2) on duplicate key update id=values(id), c2=10; +--disable_info +select * from insert_test where id = 1; +--enable_info +insert into insert_test (id, c2) values (1, 1) on duplicate key update insert_test.c2=10; +--disable_info +-- error 1054 +insert into insert_test (id, c2) values(1, 1) on duplicate key update t.c2 = 10; +--enable_info +INSERT INTO insert_test (id, c3) VALUES (1, 2) ON DUPLICATE KEY UPDATE c3=values(c3)+c3+3; +--disable_info +select * from insert_test where id = 1; +--enable_info +INSERT IGNORE INTO insert_test (id, c3) VALUES (1, 2) ON DUPLICATE KEY UPDATE c3=values(c3)+c3+3; +--disable_info +select * from insert_test where id = 1; + +drop table if exists insert_err; +create table insert_err (id int, c1 varchar(8)); +-- error 1406 +insert insert_err values (1, 'abcdabcdabcd'); +insert insert_err values (1, '你好,世界'); +create table TEST1 (ID INT NOT NULL, VALUE INT DEFAULT NULL, PRIMARY KEY (ID)); +--enable_info +INSERT INTO TEST1(id,value) VALUE(3,3) on DUPLICATE KEY UPDATE VALUE=4; +--disable_info + +drop table if exists t; +create table t (id int); +insert into t values(1); +update t t1 set id = (select count(*) + 1 from t t2 where t1.id = t2.id); +select * from t; + +## issue 3235 +drop table if exists t; +create table t(c decimal(5, 5)); +insert into t value(0); +-- error 1264 +insert into t value(1); + +drop table if exists t; +create table t(c binary(255)); +insert into t value(1); +select length(c) from t; + +drop table if exists t; +create table t(c varbinary(255)); +insert into t value(1); +select length(c) from t; + +## issue 3509 +drop table if exists t; +create table t(c int); +set @@time_zone = '+08:00'; +insert into t value(Unix_timestamp('2002-10-27 01:00')); +select * from t; +set @@time_zone = default; + +## issue 3832 +drop table if exists t1; +create table t1 (b char(0)); +insert into t1 values (""); + +## issue 3895 +DROP TABLE IF EXISTS t; +CREATE TABLE t(a DECIMAL(4,2)); +INSERT INTO t VALUES (1.000001); +SHOW WARNINGS; +INSERT INTO t VALUES (1.000000); +SHOW WARNINGS; + +## issue 4653 +DROP TABLE IF EXISTS t; +CREATE TABLE t(a datetime); +-- error 1292 +INSERT INTO t VALUES('2017-00-00'); +set sql_mode = ''; +INSERT INTO t VALUES('2017-00-00'); +SELECT * FROM t; +set sql_mode = 'strict_all_tables'; +SELECT * FROM t; +set sql_mode = default; + +drop table if exists test; +CREATE TABLE test(id int(10) UNSIGNED NOT NULL AUTO_INCREMENT, p int(10) UNSIGNED NOT NULL, PRIMARY KEY(p), KEY(id)); +insert into test(p) value(1); +select * from test; +select * from test use index (id) where id = 1; +insert into test values(NULL, 2); +select * from test use index (id) where id = 2; +insert into test values(2, 3); +select * from test use index (id) where id = 2; + +## issue 6360 +drop table if exists t; +create table t(a bigint unsigned); +set @@sql_mode = 'strict_all_tables'; +-- error 1264 +insert into t value (-1); +set @@sql_mode = ''; +insert into t value (-1); +show warnings; +insert into t select -1; +show warnings; +insert into t select cast(-1 as unsigned); +insert into t value (-1.111); +show warnings; +insert into t value ('-1.111'); +show warnings; +update t set a = -1 limit 1; +show warnings; +select * from t; +set @@sql_mode = default; + +# issue 6424 & issue 20207 +drop table if exists t; +create table t(a time(6)); +insert into t value('20070219173709.055870'), ('20070219173709.055'), ('20070219173709.055870123'); +select * from t; +truncate table t; +insert into t value(20070219173709.055870), (20070219173709.055), (20070219173709.055870123); +select * from t; +-- error 1292 +insert into t value(-20070219173709.055870); + +drop table if exists t; +set @@sql_mode=''; +create table t(a float unsigned, b double unsigned); +insert into t value(-1.1, -1.1), (-2.1, -2.1), (0, 0), (1.1, 1.1); +show warnings; +select * from t; +set @@sql_mode=default; + +## issue 7061 +drop table if exists t; +create table t(a int default 1, b int default 2); +insert into t values(default, default); +select * from t; +truncate table t; +insert into t values(default(b), default(a)); +select * from t; +truncate table t; +insert into t (b) values(default); +select * from t; +truncate table t; +insert into t (b) values(default(a)); +select * from t; + +drop view if exists v; +create view v as select * from t; +-- error 1105 +insert into v values(1,2); +-- error 1105 +replace into v values(1,2); +drop view v; + +drop sequence if exists seq; +create sequence seq; +-- error 1105 +insert into seq values(); +-- error 1105 +replace into seq values(); +drop sequence seq; + +## issue 22851 +drop table if exists t; +create table t(name varchar(255), b int, c int, primary key(name(2))); +insert into t(name, b) values("cha", 3); +-- error 1062 +insert into t(name, b) values("chb", 3); +insert into t(name, b) values("测试", 3); +-- error 1062 +insert into t(name, b) values("测试", 3); + +# TestInsertOnDup +drop table if exists t; +create table t (i int unique key); +--enable_info +insert into t values (1),(2); +--disable_info +select * from t; +--enable_info +insert into t values (1), (2) on duplicate key update i = values(i); +--disable_info +select * from t; +--enable_info +insert into t values (2), (3) on duplicate key update i = 3; +--disable_info +select * from t; + +drop table if exists t; +create table t (i int primary key, j int unique key); +--enable_info +insert into t values (-1, 1); +--disable_info +select * from t; +--enable_info +insert into t values (1, 1) on duplicate key update j = values(j); +--disable_info +select * from t; + +drop table if exists test; +create table test (i int primary key, j int unique); +begin; +insert into test values (1,1); +insert into test values (2,1) on duplicate key update i = -i, j = -j; +commit; +select * from test; +delete from test; +insert into test values (1, 1); +begin; +delete from test where i = 1; +insert into test values (2, 1) on duplicate key update i = -i, j = -j; +commit; +select * from test; +delete from test; +insert into test values (1, 1); +begin; +update test set i = 2, j = 2 where i = 1; +insert into test values (1, 3) on duplicate key update i = -i, j = -j; +insert into test values (2, 4) on duplicate key update i = -i, j = -j; +commit; +select * from test order by i; +delete from test; +begin; +insert into test values (1, 3), (1, 3) on duplicate key update i = values(i), j = values(j); +commit; +select * from test order by i; +create table tmp (id int auto_increment, code int, primary key(id, code)); +create table m (id int primary key auto_increment, code int unique); +insert tmp (code) values (1); +insert tmp (code) values (1); +set tidb_init_chunk_size=1; +insert m (code) select code from tmp on duplicate key update code = values(code); +select * from m; + +## The following two cases are used for guaranteeing the last_insert_id +## to be set as the value of on-duplicate-update assigned. +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT AUTO_INCREMENT PRIMARY KEY, +f2 VARCHAR(5) NOT NULL UNIQUE); +--enable_info +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +--disable_info +SELECT LAST_INSERT_ID(); +--enable_info +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +--disable_info +SELECT LAST_INSERT_ID(); + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT AUTO_INCREMENT UNIQUE, +f2 VARCHAR(5) NOT NULL UNIQUE); +--enable_info +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +--disable_info +SELECT LAST_INSERT_ID(); +--enable_info +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = LAST_INSERT_ID(f1); +--disable_info +SELECT LAST_INSERT_ID(); +--enable_info +INSERT t1 (f2) VALUES ('test') ON DUPLICATE KEY UPDATE f1 = 2; +--disable_info +SELECT LAST_INSERT_ID(); + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT); +--enable_info +INSERT t1 VALUES (1) ON DUPLICATE KEY UPDATE f1 = 1; +--disable_info +SELECT * FROM t1; + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (f1 INT PRIMARY KEY, f2 INT NOT NULL UNIQUE); +--enable_info +INSERT t1 VALUES (1, 1); +INSERT t1 VALUES (1, 1), (1, 1) ON DUPLICATE KEY UPDATE f1 = 2, f2 = 2; +--disable_info +SELECT * FROM t1 order by f1; +-- error 1048 +INSERT t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +--enable_info +INSERT IGNORE t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +--disable_info +show warnings; +SELECT * FROM t1 order by f1; + +SET sql_mode=''; +-- error 1048 +INSERT t1 VALUES (1, 1) ON DUPLICATE KEY UPDATE f2 = null; +SELECT * FROM t1 order by f1; +set sql_mode=default; + +set tidb_init_chunk_size=default; + + +# TestInsertOnDuplicateKey +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +--enable_info +insert into t1 values(1, 100); +insert into t2 values(1, 200); +insert into t1 select a2, b2 from t2 on duplicate key update b1 = a2; +--disable_info +select * from t1; +--enable_info +insert into t1 select a2, b2 from t2 on duplicate key update b1 = b2; +--disable_info +select * from t1; +--enable_info +insert into t1 select a2, b2 from t2 on duplicate key update a1 = a2; +--disable_info +select * from t1; +--enable_info +insert into t1 select a2, b2 from t2 on duplicate key update b1 = 300; +--disable_info +select * from t1; +--enable_info +insert into t1 values(1, 1) on duplicate key update b1 = 400; +--disable_info +select * from t1; +--enable_info +insert into t1 select 1, 500 from t2 on duplicate key update b1 = 400; +--disable_info +select * from t1; + +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +-- error 1054 +insert into t1 select * from t2 on duplicate key update c = t2.b; + +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +-- error 1052 +insert into t1 select * from t2 on duplicate key update a = b; + +drop table if exists t1, t2; +create table t1(a bigint primary key, b bigint); +create table t2(a bigint primary key, b bigint); +-- error 1054 +insert into t1 select * from t2 on duplicate key update c = b; + +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +-- error 1054 +insert into t1 select * from t2 on duplicate key update a1 = values(b2); + +drop table if exists t1, t2; +create table t1(a1 bigint primary key, b1 bigint); +create table t2(a2 bigint primary key, b2 bigint); +--enable_info +insert into t1 values(1, 100); +insert into t2 values(1, 200); +insert into t1 select * from t2 on duplicate key update b1 = values(b1) + b2; +--disable_info +select * from t1; +--enable_info +insert into t1 select * from t2 on duplicate key update b1 = values(b1) + b2; +--disable_info +select * from t1; + +drop table if exists t; +create table t(k1 bigint, k2 bigint, val bigint, primary key(k1, k2)); +--enable_info +insert into t (val, k1, k2) values (3, 1, 2); +--disable_info +select * from t; +--enable_info +insert into t (val, k1, k2) select c, a, b from (select 1 as a, 2 as b, 4 as c) tmp on duplicate key update val = tmp.c; +--disable_info +select * from t; + +drop table if exists t; +create table t(k1 double, k2 double, v double, primary key(k1, k2)); +--enable_info +insert into t (v, k1, k2) select c, a, b from (select "3" c, "1" a, "2" b) tmp on duplicate key update v=c; +--disable_info +select * from t; +--enable_info +insert into t (v, k1, k2) select c, a, b from (select "3" c, "1" a, "2" b) tmp on duplicate key update v=c; +--disable_info +select * from t; + +drop table if exists t1, t2; +create table t1(id int, a int, b int); +--enable_info +insert into t1 values (1, 1, 1); +insert into t1 values (2, 2, 1); +insert into t1 values (3, 3, 1); +--disable_info +create table t2(a int primary key, b int, unique(b)); +--enable_info +insert into t2 select a, b from t1 order by id on duplicate key update a=t1.a, b=t1.b; +--disable_info +select * from t2 order by a; + +drop table if exists t1, t2; +create table t1(id int, a int, b int); +--enable_info +insert into t1 values (1, 1, 1); +insert into t1 values (2, 1, 2); +insert into t1 values (3, 3, 1); +--disable_info +create table t2(a int primary key, b int, unique(b)); +--enable_info +insert into t2 select a, b from t1 order by id on duplicate key update a=t1.a, b=t1.b; +--disable_info +select * from t2 order by a; + +drop table if exists t1, t2; +create table t1(id int, a int, b int, c int); +--enable_info +insert into t1 values (1, 1, 1, 1); +insert into t1 values (2, 2, 1, 2); +insert into t1 values (3, 3, 2, 2); +insert into t1 values (4, 4, 2, 2); +--disable_info +create table t2(a int primary key, b int, c int, unique(b), unique(c)); +--enable_info +insert into t2 select a, b, c from t1 order by id on duplicate key update b=t2.b, c=t2.c; +--disable_info +select * from t2 order by a; + +drop table if exists t1; +create table t1(a int primary key, b int); +--enable_info +insert into t1 values(1,1),(2,2),(3,3),(4,4),(5,5); +insert into t1 values(4,14),(5,15),(6,16),(7,17),(8,18) on duplicate key update b=b+10; +--disable_info + +drop table if exists a, b; +create table a(x int primary key); +create table b(x int, y int); +--enable_info +insert into a values(1); +insert into b values(1, 2); +insert into a select x from b ON DUPLICATE KEY UPDATE a.x=b.y; +--disable_info +select * from a; + +--echo ## Test issue 28078. +--echo ## Use different types of columns so that there's likely to be error if the types mismatches. +drop table if exists a, b; +create table a(id int, a1 timestamp, a2 varchar(10), a3 float, unique(id)); +create table b(id int, b1 time, b2 varchar(10), b3 int); +--enable_info +insert into a values (1, '2022-01-04 07:02:04', 'a', 1.1), (2, '2022-01-04 07:02:05', 'b', 2.2); +insert into b values (2, '12:34:56', 'c', 10), (3, '01:23:45', 'd', 20); +insert into a (id) select id from b on duplicate key update a.a2 = b.b2, a.a3 = 3.3; +--disable_info +select * from a; +--enable_info +insert into a (id) select 4 from b where b3 = 20 on duplicate key update a.a3 = b.b3; +--disable_info +select * from a; +--enable_info +insert into a (a2, a3) select 'x', 1.2 from b on duplicate key update a.a2 = b.b3; +--disable_info +select * from a; + +--echo ## reproduce insert on duplicate key update bug under new row format. +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1 use index(primary); + +# TestNonStrictInsertOverflowValue +drop table if exists t; +create table t (d int); +-- error 1690 +insert into t values (cast('18446744073709551616' as unsigned)); +set sql_mode=''; +--enable_warnings +insert into t values (cast('18446744073709551616' as unsigned)); +--disable_warnings +set sql_mode=DEFAULT; + +# TestInsertIgnoreOnDupWithFK +drop table if exists parent, child; +create table parent (id int primary key, ref int, key(ref)); +create table child (id int primary key, ref int, foreign key (ref) references parent(ref)); +insert into parent values (1, 1), (2, 2); +insert into child values (1, 1); + +insert into child values (1, 2) on duplicate key update ref = 2; +-- error 1452 +insert into child values (1, 3) on duplicate key update ref = 3; +--enable_warnings +insert ignore into child values (1, 3) on duplicate key update ref = 3; +--disable_warnings + +-- error 1451 +insert into parent values (2, 3) on duplicate key update ref = 3; +--enable_warnings +insert ignore into parent values (2, 3) on duplicate key update ref = 3; +--disable_warnings + +# TestIssue55457 +drop table if exists t1, t2; +create table t1 (id int primary key, col1 varchar(10) not null default ''); +create table t2 (id int primary key, col1 varchar(10)); +insert into t2 values (1, null); +insert ignore into t1 values(5, null); +set session sql_mode = ''; +-- error 1048 +insert into t1 values(1, null); +-- error 1048 +insert into t1 set id = 1, col1 = null; +-- error 1048 +insert t1 VALUES (5, 5) ON DUPLICATE KEY UPDATE col1 = null; +insert t1 VALUES (5, 5), (6, null) ON DUPLICATE KEY UPDATE col1 = null; +select * from t1; +insert into t1 select * from t2; +show warnings; +insert into t1 values(2, null), (3, 3), (4, 4); +show warnings; +update t1 set col1 = null where id = 3; +show warnings; +insert ignore t1 VALUES (4, 4) ON DUPLICATE KEY UPDATE col1 = null; +select * from t1;