diff --git a/DEPS.bzl b/DEPS.bzl index 30885e3d7e560..472ed1723af67 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -2973,8 +2973,19 @@ def go_deps(): name = "com_github_pingcap_kvproto", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/kvproto", +<<<<<<< HEAD sum = "h1:ZWFeZNN+6poqqEQ3XU6M/Gw6oiNexbDD3yqIZ05GxlM=", version = "v0.0.0-20240112060601-a0e3fbb1eeee", +======= + sha256 = "d470ef683433f2c5bc7a1e610da44d516908d326a0341c07208af76a30f0d8a6", + strip_prefix = "github.com/pingcap/kvproto@v0.0.0-20241113043844-e1fa7ea8c302", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20241113043844-e1fa7ea8c302.zip", + "http://ats.apps.svc/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20241113043844-e1fa7ea8c302.zip", + "https://cache.hawkingrei.com/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20241113043844-e1fa7ea8c302.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/pingcap/kvproto/com_github_pingcap_kvproto-v0.0.0-20241113043844-e1fa7ea8c302.zip", + ], +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)) ) go_repository( name = "com_github_pingcap_log", @@ -3603,8 +3614,19 @@ def go_deps(): name = "com_github_tikv_client_go_v2", build_file_proto_mode = "disable_global", importpath = "github.com/tikv/client-go/v2", +<<<<<<< HEAD sum = "h1:lKLA4jW6wj/A15+sb901WXvGd4xvdGuGDOndtyVTV/8=", version = "v2.0.4-0.20240910032334-87841020c53e", +======= + sha256 = "4bc779621156c4ee6f46b57235da9c34c8ec0ee6d3be5f52e33da4c47098eeed", + strip_prefix = "github.com/tikv/client-go/v2@v2.0.8-0.20241120024459-05d115b3e88b", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20241120024459-05d115b3e88b.zip", + "http://ats.apps.svc/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20241120024459-05d115b3e88b.zip", + "https://cache.hawkingrei.com/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20241120024459-05d115b3e88b.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20241120024459-05d115b3e88b.zip", + ], +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)) ) go_repository( name = "com_github_tikv_pd_client", diff --git a/errno/errcode.go b/errno/errcode.go index a72c6b2ec5e64..32635dda56d5b 100644 --- a/errno/errcode.go +++ b/errno/errcode.go @@ -1042,6 +1042,31 @@ const ( ErrSetTTLEnableForNonTTLTable = 8150 ErrTempTableNotAllowedWithTTL = 8151 ErrUnsupportedTTLReferencedByFK = 8152 +<<<<<<< HEAD:errno/errcode.go +======= + ErrUnsupportedPrimaryKeyTypeWithTTL = 8153 + ErrLoadDataFromServerDisk = 8154 + ErrLoadParquetFromLocal = 8155 + ErrLoadDataEmptyPath = 8156 + ErrLoadDataUnsupportedFormat = 8157 + ErrLoadDataInvalidURI = 8158 + ErrLoadDataCantAccess = 8159 + ErrLoadDataCantRead = 8160 + ErrLoadDataWrongFormatConfig = 8162 + ErrUnknownOption = 8163 + ErrInvalidOptionVal = 8164 + ErrDuplicateOption = 8165 + ErrLoadDataUnsupportedOption = 8166 + ErrLoadDataJobNotFound = 8170 + ErrLoadDataInvalidOperation = 8171 + ErrLoadDataLocalUnsupportedOption = 8172 + ErrLoadDataPreCheckFailed = 8173 + ErrBRJobNotFound = 8174 + ErrMemoryExceedForQuery = 8175 + ErrMemoryExceedForInstance = 8176 + ErrDeleteNotFoundColumn = 8177 + ErrKeyTooLarge = 8178 +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)):pkg/errno/errcode.go // Error codes used by TiDB ddl package ErrUnsupportedDDLOperation = 8200 diff --git a/errno/errname.go b/errno/errname.go index f8063dcda2560..a33a197390628 100644 --- a/errno/errname.go +++ b/errno/errname.go @@ -936,6 +936,7 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrSequenceInvalidTableStructure: mysql.Message("Sequence '%-.64s.%-.64s' table structure is invalid (%s)", nil), // TiDB errors. +<<<<<<< HEAD:errno/errname.go ErrMemExceedThreshold: mysql.Message("%s holds %dB memory, exceeds threshold %dB.%s", nil), ErrForUpdateCantRetry: mysql.Message("[%d] can not retry select for update statement", nil), ErrAdminCheckTable: mysql.Message("TiDB admin check table failed.", nil), @@ -1037,6 +1038,132 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrSetTTLEnableForNonTTLTable: mysql.Message("Cannot set TTL_ENABLE on a table without TTL config", nil), ErrTempTableNotAllowedWithTTL: mysql.Message("Set TTL for temporary table is not allowed", nil), ErrUnsupportedTTLReferencedByFK: mysql.Message("Set TTL for a table referenced by foreign key is not allowed", nil), +======= + ErrMemExceedThreshold: mysql.Message("%s holds %dB memory, exceeds threshold %dB.%s", nil), + ErrForUpdateCantRetry: mysql.Message("[%d] can not retry select for update statement", nil), + ErrAdminCheckTable: mysql.Message("TiDB admin check table failed.", nil), + ErrOptOnTemporaryTable: mysql.Message("`%s` is unsupported on temporary tables.", nil), + ErrDropTableOnTemporaryTable: mysql.Message("`drop global temporary table` can only drop global temporary table", nil), + ErrTxnTooLarge: mysql.Message("Transaction is too large, size: %d", nil), + ErrWriteConflictInTiDB: mysql.Message("Write conflict, txnStartTS %d is stale", nil), + ErrInvalidPluginID: mysql.Message("Wrong plugin id: %s, valid plugin id is [name]-[version], and version should not contain '-'", nil), + ErrInvalidPluginManifest: mysql.Message("Cannot read plugin %s's manifest", nil), + ErrInvalidPluginName: mysql.Message("Plugin load with %s but got wrong name %s", nil), + ErrInvalidPluginVersion: mysql.Message("Plugin load with %s but got %s", nil), + ErrDuplicatePlugin: mysql.Message("Plugin [%s] is redeclared", nil), + ErrInvalidPluginSysVarName: mysql.Message("Plugin %s's sysVar %s must start with its plugin name %s", nil), + ErrRequireVersionCheckFail: mysql.Message("Plugin %s require %s be %v but got %v", nil), + ErrUnsupportedReloadPlugin: mysql.Message("Plugin %s isn't loaded so cannot be reloaded", nil), + ErrUnsupportedReloadPluginVar: mysql.Message("Reload plugin with different sysVar is unsupported %v", nil), + ErrTableLocked: mysql.Message("Table '%s' was locked in %s by %v", nil), + ErrNotExist: mysql.Message("Error: key not exist", nil), + ErrTxnRetryable: mysql.Message("Error: KV error safe to retry %s ", []int{0}), + ErrCannotSetNilValue: mysql.Message("can not set nil value", nil), + ErrInvalidTxn: mysql.Message("invalid transaction", nil), + ErrEntryTooLarge: mysql.Message("entry too large, the max entry size is %d, the size of data is %d", nil), + ErrNotImplemented: mysql.Message("not implemented", nil), + ErrInfoSchemaExpired: mysql.Message("Information schema is out of date: schema failed to update in 1 lease, please make sure TiDB can connect to TiKV", nil), + ErrInfoSchemaChanged: mysql.Message("Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`", nil), + ErrBadNumber: mysql.Message("Bad Number", nil), + ErrCastAsSignedOverflow: mysql.Message("Cast to signed converted positive out-of-range integer to its negative complement", nil), + ErrCastNegIntAsUnsigned: mysql.Message("Cast to unsigned converted negative integer to it's positive complement", nil), + ErrInvalidYearFormat: mysql.Message("invalid year format", nil), + ErrInvalidYear: mysql.Message("invalid year", nil), + ErrIncorrectDatetimeValue: mysql.Message("Incorrect datetime value: '%s'", []int{0}), + ErrInvalidTimeFormat: mysql.Message("invalid time format: '%v'", []int{0}), + ErrInvalidWeekModeFormat: mysql.Message("invalid week mode format: '%v'", nil), + ErrFieldGetDefaultFailed: mysql.Message("Field '%s' get default value fail", nil), + ErrIndexOutBound: mysql.Message("Index column %s offset out of bound, offset: %d, row: %v", []int{2}), + ErrUnsupportedOp: mysql.Message("operation not supported", nil), + ErrRowNotFound: mysql.Message("can not find the row: %s", []int{0}), + ErrTableStateCantNone: mysql.Message("table %s can't be in none state", nil), + ErrColumnStateCantNone: mysql.Message("column %s can't be in none state", nil), + ErrColumnStateNonPublic: mysql.Message("can not use non-public column", nil), + ErrIndexStateCantNone: mysql.Message("index %s can't be in none state", nil), + ErrInvalidRecordKey: mysql.Message("invalid record key", nil), + ErrUnsupportedValueForVar: mysql.Message("variable '%s' does not yet support value: %s", nil), + ErrUnsupportedIsolationLevel: mysql.Message("The isolation level '%s' is not supported. Set tidb_skip_isolation_level_check=1 to skip this error", nil), + ErrInvalidDDLWorker: mysql.Message("Invalid DDL worker", nil), + ErrUnsupportedDDLOperation: mysql.Message("Unsupported %s", nil), + ErrNotOwner: mysql.Message("TiDB server is not a DDL owner", nil), + ErrCantDecodeRecord: mysql.Message("Cannot decode %s value, because %v", nil), + ErrInvalidDDLJob: mysql.Message("Invalid DDL job", nil), + ErrInvalidDDLJobFlag: mysql.Message("Invalid DDL job flag", nil), + ErrWaitReorgTimeout: mysql.Message("Timeout waiting for data reorganization", nil), + ErrInvalidStoreVersion: mysql.Message("Invalid storage current version: %d", nil), + ErrUnknownTypeLength: mysql.Message("Unknown length for type %d", nil), + ErrUnknownFractionLength: mysql.Message("Unknown length for type %d and fraction %d", nil), + ErrInvalidDDLJobVersion: mysql.Message("Version %d of DDL job is greater than current one: %d", nil), + ErrInvalidSplitRegionRanges: mysql.Message("Failed to split region ranges: %s", nil), + ErrReorgPanic: mysql.Message("Reorg worker panic", nil), + ErrInvalidDDLState: mysql.Message("Invalid %s state: %v", nil), + ErrCancelledDDLJob: mysql.Message("Cancelled DDL job", nil), + ErrRepairTable: mysql.Message("Failed to repair table: %s", nil), + ErrLoadPrivilege: mysql.Message("Load privilege table fail: %s", nil), + ErrInvalidPrivilegeType: mysql.Message("unknown privilege type %s", nil), + ErrUnknownFieldType: mysql.Message("unknown field type", nil), + ErrInvalidSequence: mysql.Message("invalid sequence", nil), + ErrInvalidType: mysql.Message("invalid type", nil), + ErrCantGetValidID: mysql.Message("Cannot get a valid auto-ID when retrying the statement", nil), + ErrCantSetToNull: mysql.Message("cannot set variable to null", nil), + ErrSnapshotTooOld: mysql.Message("snapshot is older than GC safe point %s", nil), + ErrInvalidTableID: mysql.Message("invalid TableID", nil), + ErrInvalidAutoRandom: mysql.Message("Invalid auto random: %s", nil), + ErrInvalidHashKeyFlag: mysql.Message("invalid encoded hash key flag", nil), + ErrInvalidListIndex: mysql.Message("invalid list index", nil), + ErrInvalidListMetaData: mysql.Message("invalid list meta data", nil), + ErrWriteOnSnapshot: mysql.Message("write on snapshot", nil), + ErrInvalidKey: mysql.Message("invalid key", nil), + ErrInvalidIndexKey: mysql.Message("invalid index key", nil), + ErrDataInconsistent: mysql.Message("data inconsistency in table: %s, index: %s, handle: %s, index-values:%#v != record-values:%#v", []int{2, 3, 4}), + ErrDDLReorgElementNotExist: mysql.Message("DDL reorg element does not exist", nil), + ErrDDLJobNotFound: mysql.Message("DDL Job:%v not found", nil), + ErrCancelFinishedDDLJob: mysql.Message("This job:%v is finished, so can't be cancelled", nil), + ErrCannotCancelDDLJob: mysql.Message("This job:%v is almost finished, can't be cancelled now", nil), + ErrUnknownAllocatorType: mysql.Message("Invalid allocator type", nil), + ErrAutoRandReadFailed: mysql.Message("Failed to read auto-random value from storage engine", nil), + ErrInvalidIncrementAndOffset: mysql.Message("Invalid auto_increment settings: auto_increment_increment: %d, auto_increment_offset: %d, both of them must be in range [1..65535]", nil), + ErrDataInconsistentMismatchCount: mysql.Message("data inconsistency in table: %s, index: %s, index-count:%d != record-count:%d", nil), + ErrDataInconsistentMismatchIndex: mysql.Message("data inconsistency in table: %s, index: %s, col: %s, handle: %#v, index-values:%#v != record-values:%#v, compare err:%#v", []int{3, 4, 5, 6}), + ErrInconsistentRowValue: mysql.Message("writing inconsistent data in table: %s, expected-values:{%s} != record-values:{%s}", []int{1, 2}), + ErrInconsistentHandle: mysql.Message("writing inconsistent data in table: %s, index: %s, index-handle:%#v != record-handle:%#v, index: %#v, record: %#v", []int{2, 3, 4, 5}), + ErrInconsistentIndexedValue: mysql.Message("writing inconsistent data in table: %s, index: %s, col: %s, indexed-value:{%s} != record-value:{%s}", []int{3, 4}), + ErrAssertionFailed: mysql.Message("assertion failed: key: %s, assertion: %s, start_ts: %v, existing start ts: %v, existing commit ts: %v", []int{0}), + ErrInstanceScope: mysql.Message("modifying %s will require SET GLOBAL in a future version of TiDB", nil), + ErrNonTransactionalJobFailure: mysql.Message("non-transactional job failed, job id: %d, total jobs: %d. job range: [%s, %s], job sql: %s, err: %v", []int{2, 3, 4}), + ErrSettingNoopVariable: mysql.Message("setting %s has no effect in TiDB", nil), + ErrGettingNoopVariable: mysql.Message("variable %s has no effect in TiDB", nil), + ErrCannotMigrateSession: mysql.Message("cannot migrate the current session: %s", nil), + ErrLazyUniquenessCheckFailure: mysql.Message("transaction aborted because lazy uniqueness check is enabled and an error occurred: %s", nil), + ErrUnsupportedColumnInTTLConfig: mysql.Message("Field '%-.192s' is of a not supported type for TTL config, expect DATETIME, DATE or TIMESTAMP", nil), + ErrTTLColumnCannotDrop: mysql.Message("Cannot drop column '%-.192s': needed in TTL config", nil), + ErrSetTTLOptionForNonTTLTable: mysql.Message("Cannot set %s on a table without TTL config", nil), + ErrTempTableNotAllowedWithTTL: mysql.Message("Set TTL for temporary table is not allowed", nil), + ErrUnsupportedTTLReferencedByFK: mysql.Message("Set TTL for a table referenced by foreign key is not allowed", nil), + ErrUnsupportedPrimaryKeyTypeWithTTL: mysql.Message("Unsupported clustered primary key type FLOAT/DOUBLE for TTL", nil), + ErrLoadDataFromServerDisk: mysql.Message("Don't support load data from tidb-server's disk. Or if you want to load local data via client, the path of INFILE '%s' needs to specify the clause of LOCAL first", nil), + ErrLoadParquetFromLocal: mysql.Message("Do not support loading parquet files from local. Please try to load the parquet files from the cloud storage", nil), + ErrLoadDataEmptyPath: mysql.Message("The value of INFILE must not be empty when LOAD DATA from LOCAL", nil), + ErrLoadDataUnsupportedFormat: mysql.Message("The FORMAT '%s' is not supported", nil), + ErrLoadDataInvalidURI: mysql.Message("The URI of %s is invalid. Reason: %s. Please provide a valid URI, such as 's3://import/test.csv?access-key={your_access_key_id ID}&secret-access-key={your_secret_access_key}&session-token={your_session_token}'", nil), + ErrLoadDataCantAccess: mysql.Message("Access to the %s has been denied. Reason: %s. Please check the URI, access key and secret access key are correct", nil), + ErrLoadDataCantRead: mysql.Message("Failed to read source files. Reason: %s. %s", nil), + ErrLoadDataWrongFormatConfig: mysql.Message("", nil), + ErrUnknownOption: mysql.Message("Unknown option %s", nil), + ErrInvalidOptionVal: mysql.Message("Invalid option value for %s", nil), + ErrDuplicateOption: mysql.Message("Option %s specified more than once", nil), + ErrLoadDataUnsupportedOption: mysql.Message("Unsupported option %s for %s", nil), + ErrLoadDataJobNotFound: mysql.Message("Job ID %d doesn't exist", nil), + ErrLoadDataInvalidOperation: mysql.Message("The current job status cannot perform the operation. %s", nil), + ErrLoadDataLocalUnsupportedOption: mysql.Message("Unsupported option for LOAD DATA LOCAL INFILE: %s", nil), + ErrLoadDataPreCheckFailed: mysql.Message("PreCheck failed: %s", nil), + ErrMemoryExceedForQuery: mysql.Message("Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=%d]", nil), + ErrMemoryExceedForInstance: mysql.Message("Your query has been cancelled due to exceeding the allowed memory limit for the tidb-server instance and this query is currently using the most memory. Please try narrowing your query scope or increase the tidb_server_memory_limit and try again.[conn=%d]", nil), + ErrDeleteNotFoundColumn: mysql.Message("Delete can not find column %s for table %s", nil), + ErrKeyTooLarge: mysql.Message("key is too large, the size of given key is %d", nil), + + ErrHTTPServiceError: mysql.Message("HTTP request failed with status %s", nil), +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)):pkg/errno/errname.go ErrWarnOptimizerHintInvalidInteger: mysql.Message("integer value is out of range in '%s'", nil), ErrWarnOptimizerHintUnsupportedHint: mysql.Message("Optimizer hint %s is not supported by TiDB and is ignored", nil), diff --git a/errors.toml b/errors.toml index ed5b374dc7c84..33312d7436a62 100644 --- a/errors.toml +++ b/errors.toml @@ -1811,6 +1811,11 @@ error = ''' not implemented ''' +["kv:8178"] +error = ''' +key is too large, the size of given key is %d +''' + ["kv:9007"] error = ''' Write conflict, txnStartTS=%d, conflictStartTS=%d, conflictCommitTS=%d, key=%s%s%s%s, reason=%s [try again later] diff --git a/executor/adapter.go b/executor/adapter.go index 3360f9b8898d1..5a153484f5b5f 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -593,7 +593,7 @@ func (a *ExecStmt) handleStmtForeignKeyTrigger(ctx context.Context, e Executor) if stmtCtx.ForeignKeyTriggerCtx.HasFKCascades { // If the ExecStmt has foreign key cascade to be executed, we need call `StmtCommit` to commit the ExecStmt itself // change first. - // Since `UnionScanExec` use `SnapshotIter` and `SnapshotGetter` to read txn mem-buffer, if we don't do `StmtCommit`, + // Since `UnionScanExec` use `SnapshotIter` and `SnapshotGetter` to read txn mem-buffer, if we don't do `StmtCommit`, // then the fk cascade executor can't read the mem-buffer changed by the ExecStmt. a.Ctx.StmtCommit() } diff --git a/executor/union_scan.go b/executor/union_scan.go index a23cd8b8c7873..95f6e2f41b411 100644 --- a/executor/union_scan.go +++ b/executor/union_scan.go @@ -189,7 +189,14 @@ func (us *UnionScanExec) Close() error { us.cursor4SnapshotRows = 0 us.addedRows = us.addedRows[:0] us.snapshotRows = us.snapshotRows[:0] +<<<<<<< HEAD:executor/union_scan.go return us.children[0].Close() +======= + if us.addedRowsIter != nil { + us.addedRowsIter.Close() + } + return exec.Close(us.Children(0)) +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)):pkg/executor/union_scan.go } // getOneRow gets one result row from dirty table or child. diff --git a/go.mod b/go.mod index ad06e06b21beb..7aaf42d1cef68 100644 --- a/go.mod +++ b/go.mod @@ -67,6 +67,7 @@ require ( github.com/opentracing/basictracer-go v1.0.0 github.com/opentracing/opentracing-go v1.2.0 github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 +<<<<<<< HEAD github.com/pingcap/badger v1.5.1-0.20220314162537-ab58fbf40580 github.com/pingcap/errors v0.11.5-0.20220729040631-518f63d66278 github.com/pingcap/failpoint v0.0.0-20240527053858-9b3b6e34194a @@ -82,6 +83,25 @@ require ( github.com/prometheus/common v0.37.0 github.com/prometheus/prometheus v0.0.0-20190525122359-d20e84d0fb64 github.com/shirou/gopsutil/v3 v3.22.9 +======= + github.com/pingcap/badger v1.5.1-0.20241015064302-38533b6cbf8d + github.com/pingcap/errors v0.11.5-0.20240318064555-6bd07397691f + github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 + github.com/pingcap/fn v1.0.0 + github.com/pingcap/kvproto v0.0.0-20241113043844-e1fa7ea8c302 + github.com/pingcap/log v1.1.1-0.20240314023424-862ccc32f18d + github.com/pingcap/sysutil v1.0.1-0.20240311050922-ae81ee01f3a5 + github.com/pingcap/tidb/pkg/parser v0.0.0-20211011031125-9b13dc409c5e + github.com/pingcap/tipb v0.0.0-20241022082558-0607513e7fa4 + github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.57.0 + github.com/prometheus/prometheus v0.50.1 + github.com/qri-io/jsonschema v0.2.1 + github.com/robfig/cron/v3 v3.0.1 + github.com/sasha-s/go-deadlock v0.3.5 + github.com/shirou/gopsutil/v3 v3.24.5 +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)) github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0 github.com/soheilhy/cmux v0.1.5 github.com/spf13/cobra v1.6.1 @@ -90,10 +110,18 @@ require ( github.com/stretchr/testify v1.8.4 github.com/tdakkota/asciicheck v0.1.1 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 +<<<<<<< HEAD github.com/tikv/client-go/v2 v2.0.4-0.20240910032334-87841020c53e github.com/tikv/pd/client v0.0.0-20230904040343-947701a32c05 github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 github.com/twmb/murmur3 v1.1.3 +======= + github.com/tidwall/btree v1.7.0 + github.com/tikv/client-go/v2 v2.0.8-0.20241120024459-05d115b3e88b + github.com/tikv/pd/client v0.0.0-20241111073742-238d4d79ea31 + github.com/timakin/bodyclose v0.0.0-20240125160201-f835fa56326a + github.com/twmb/murmur3 v1.1.6 +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)) github.com/uber/jaeger-client-go v2.22.1+incompatible github.com/vbauerster/mpb/v7 v7.5.3 github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f diff --git a/go.sum b/go.sum index b8a66ea5021b8..b5cbbfa6cf865 100644 --- a/go.sum +++ b/go.sum @@ -783,11 +783,16 @@ github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059/go.mod h1:fMRU1BA1y+r89 github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= +<<<<<<< HEAD github.com/pingcap/kvproto v0.0.0-20230726063044-73d6d7f3756b/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= github.com/pingcap/kvproto v0.0.0-20240112060601-a0e3fbb1eeee h1:ZWFeZNN+6poqqEQ3XU6M/Gw6oiNexbDD3yqIZ05GxlM= github.com/pingcap/kvproto v0.0.0-20240112060601-a0e3fbb1eeee/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +======= +github.com/pingcap/kvproto v0.0.0-20241113043844-e1fa7ea8c302 h1:ynwwqr0rLliSOJcx0wHMu4T/NiPXHlK48mk2DCrBKCI= +github.com/pingcap/kvproto v0.0.0-20241113043844-e1fa7ea8c302/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)) github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/log v1.1.1-0.20221116035753-734d527bc87c h1:crhkw6DD+07Bg1wYhW5Piw+kYNKZqFQqfC2puUf6gMI= @@ -948,12 +953,25 @@ github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpR github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= +<<<<<<< HEAD github.com/tikv/client-go/v2 v2.0.4-0.20240910032334-87841020c53e h1:lKLA4jW6wj/A15+sb901WXvGd4xvdGuGDOndtyVTV/8= github.com/tikv/client-go/v2 v2.0.4-0.20240910032334-87841020c53e/go.mod h1:mmVCLP2OqWvQJPOIevQPZvGphzh/oq9vv8J5LDfpadQ= github.com/tikv/pd/client v0.0.0-20230904040343-947701a32c05 h1:e4hLUKfgfPeJPZwOfU+/I/03G0sn6IZqVcbX/5o+hvM= github.com/tikv/pd/client v0.0.0-20230904040343-947701a32c05/go.mod h1:MLIl+d2WbOF4A3U88WKtyXrQQW417wZDDvBcq2IW9bQ= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +======= +github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a h1:J/YdBZ46WKpXsxsW93SG+q0F8KI+yFrcIDT4c/RNoc4= +github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a/go.mod h1:h4xBhSNtOeEosLJ4P7JyKXX7Cabg7AVkWCK5gV2vOrM= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tikv/client-go/v2 v2.0.8-0.20241120024459-05d115b3e88b h1:/hmt2FCt34rCVBX9dswiSdHOkppP67VWaESryTxDKc8= +github.com/tikv/client-go/v2 v2.0.8-0.20241120024459-05d115b3e88b/go.mod h1:NI2GfVlB9n7DsIGCxrKcD4psrcuFNEV8m1BgyzK1Amc= +github.com/tikv/pd/client v0.0.0-20241111073742-238d4d79ea31 h1:oAYc4m5Eu1OY9ogJ103VO47AYPHvhtzbUPD8L8B67Qk= +github.com/tikv/pd/client v0.0.0-20241111073742-238d4d79ea31/go.mod h1:W5a0sDadwUpI9k8p7M77d3jo253ZHdmua+u4Ho4Xw8U= +github.com/timakin/bodyclose v0.0.0-20240125160201-f835fa56326a h1:A6uKudFIfAEpoPdaal3aSqGxBzLyU8TqyXImLwo6dIo= +github.com/timakin/bodyclose v0.0.0-20240125160201-f835fa56326a/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)) github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= diff --git a/kv/error.go b/kv/error.go index d8c344324fe80..0ec235088d1d4 100644 --- a/kv/error.go +++ b/kv/error.go @@ -47,7 +47,14 @@ var ( ErrTxnTooLarge = dbterror.ClassKV.NewStd(mysql.ErrTxnTooLarge) // ErrEntryTooLarge is the error when a key value entry is too large. ErrEntryTooLarge = dbterror.ClassKV.NewStd(mysql.ErrEntryTooLarge) +<<<<<<< HEAD:kv/error.go // ErrKeyExists returns when key is already exist. +======= + // ErrKeyTooLarge is the error when a key is too large to be handled by MemBuffer. + ErrKeyTooLarge = dbterror.ClassKV.NewStd(mysql.ErrKeyTooLarge) + // ErrKeyExists returns when key is already exist. Caller should try to use + // GenKeyExistsErr to generate this error for correct format. +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)):pkg/kv/error.go ErrKeyExists = dbterror.ClassKV.NewStd(mysql.ErrDupEntry) // ErrNotImplemented returns when a function is not implemented yet. ErrNotImplemented = dbterror.ClassKV.NewStd(mysql.ErrNotImplemented) diff --git a/kv/key.go b/kv/key.go index 3c243a06ed2eb..71b11ce106dcd 100644 --- a/kv/key.go +++ b/kv/key.go @@ -96,12 +96,23 @@ func (k Key) String() string { type KeyRange struct { StartKey Key EndKey Key +} + +<<<<<<< HEAD:kv/key.go +======= +// KeyRangeSliceMemUsage return the memory usage of []KeyRange +func KeyRangeSliceMemUsage(k []KeyRange) int64 { + const sizeofKeyRange = int64(unsafe.Sizeof(*(*KeyRange)(nil))) + + res := sizeofKeyRange * int64(cap(k)) + for _, m := range k { + res += int64(cap(m.StartKey)) + int64(cap(m.EndKey)) + } - XXXNoUnkeyedLiteral struct{} - XXXunrecognized []byte - XXXsizecache int32 + return res } +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)):pkg/kv/key.go // IsPoint checks if the key range represents a point. func (r *KeyRange) IsPoint() bool { if len(r.StartKey) != len(r.EndKey) { diff --git a/kv/key_test.go b/kv/key_test.go index 25402d9c50725..5a7c50b1fdc66 100644 --- a/kv/key_test.go +++ b/kv/key_test.go @@ -224,6 +224,18 @@ func TestKeyRangeDefinition(t *testing.T) { // And same default value. require.Equal(t, (*coprocessor.KeyRange)(unsafe.Pointer(&r1)), &r2) require.Equal(t, &r1, (*KeyRange)(unsafe.Pointer(&r2))) +<<<<<<< HEAD:kv/key_test.go +======= + + s := []KeyRange{{ + StartKey: []byte("s1"), + EndKey: []byte("e1"), + }, { + StartKey: []byte("s2"), + EndKey: []byte("e2"), + }} + require.Equal(t, int64(104), KeyRangeSliceMemUsage(s)) +>>>>>>> e234164d7cd (*: bump client-go with some regression tests (#57282)):pkg/kv/key_test.go } func BenchmarkIsPoint(b *testing.B) { diff --git a/pkg/executor/executor_pkg_test.go b/pkg/executor/executor_pkg_test.go new file mode 100644 index 0000000000000..ec0c15fa7ea57 --- /dev/null +++ b/pkg/executor/executor_pkg_test.go @@ -0,0 +1,505 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "fmt" + "runtime" + "strconv" + "strings" + "testing" + "time" + "unsafe" + + "github.com/hashicorp/go-version" + "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/errctx" + "github.com/pingcap/tidb/pkg/executor/aggfuncs" + "github.com/pingcap/tidb/pkg/executor/join" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util/collate" + "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/mock" + "github.com/pingcap/tidb/pkg/util/ranger" + "github.com/pingcap/tidb/pkg/util/tableutil" + "github.com/stretchr/testify/require" +) + +// Note: it's a tricky way to export the `inspectionSummaryRules` and `inspectionRules` for unit test but invisible for normal code +var ( + InspectionSummaryRules = inspectionSummaryRules + InspectionRules = inspectionRules +) + +func TestBuildKvRangesForIndexJoinWithoutCwc(t *testing.T) { + indexRanges := make([]*ranger.Range, 0, 6) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 1, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 2, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 2, 1, 2)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 3, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(2, 1, 1, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(2, 1, 2, 1, 1)) + + joinKeyRows := make([]*join.IndexJoinLookUpContent, 0, 5) + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(1, 1)}) + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(1, 2)}) + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(2, 1)}) + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(2, 2)}) + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(2, 3)}) + + keyOff2IdxOff := []int{1, 3} + ctx := mock.NewContext() + kvRanges, err := buildKvRangesForIndexJoin(ctx.GetDistSQLCtx(), ctx.GetRangerCtx(), 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil, nil, nil) + require.NoError(t, err) + // Check the kvRanges is in order. + for i, kvRange := range kvRanges { + require.True(t, kvRange.StartKey.Cmp(kvRange.EndKey) < 0) + if i > 0 { + require.True(t, kvRange.StartKey.Cmp(kvRanges[i-1].EndKey) >= 0) + } + } +} + +func TestBuildKvRangesForIndexJoinWithoutCwcAndWithMemoryTracker(t *testing.T) { + indexRanges := make([]*ranger.Range, 0, 6) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 1, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 2, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 2, 1, 2)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 3, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(2, 1, 1, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(2, 1, 2, 1, 1)) + + bytesConsumed1 := int64(0) + { + joinKeyRows := make([]*join.IndexJoinLookUpContent, 0, 10) + for i := int64(0); i < 10; i++ { + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(1, i)}) + } + + keyOff2IdxOff := []int{1, 3} + ctx := mock.NewContext() + memTracker := memory.NewTracker(memory.LabelForIndexWorker, -1) + kvRanges, err := buildKvRangesForIndexJoin(ctx.GetDistSQLCtx(), ctx.GetRangerCtx(), 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil, memTracker, nil) + require.NoError(t, err) + // Check the kvRanges is in order. + for i, kvRange := range kvRanges { + require.True(t, kvRange.StartKey.Cmp(kvRange.EndKey) < 0) + if i > 0 { + require.True(t, kvRange.StartKey.Cmp(kvRanges[i-1].EndKey) >= 0) + } + } + bytesConsumed1 = memTracker.BytesConsumed() + } + + bytesConsumed2 := int64(0) + { + joinKeyRows := make([]*join.IndexJoinLookUpContent, 0, 20) + for i := int64(0); i < 20; i++ { + joinKeyRows = append(joinKeyRows, &join.IndexJoinLookUpContent{Keys: generateDatumSlice(1, i)}) + } + + keyOff2IdxOff := []int{1, 3} + ctx := mock.NewContext() + memTracker := memory.NewTracker(memory.LabelForIndexWorker, -1) + kvRanges, err := buildKvRangesForIndexJoin(ctx.GetDistSQLCtx(), ctx.GetRangerCtx(), 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil, memTracker, nil) + require.NoError(t, err) + // Check the kvRanges is in order. + for i, kvRange := range kvRanges { + require.True(t, kvRange.StartKey.Cmp(kvRange.EndKey) < 0) + if i > 0 { + require.True(t, kvRange.StartKey.Cmp(kvRanges[i-1].EndKey) >= 0) + } + } + bytesConsumed2 = memTracker.BytesConsumed() + } + + require.Equal(t, 2*bytesConsumed1, bytesConsumed2) + require.Equal(t, int64(23640), bytesConsumed1) +} + +func generateIndexRange(vals ...int64) *ranger.Range { + lowDatums := generateDatumSlice(vals...) + highDatums := make([]types.Datum, len(vals)) + copy(highDatums, lowDatums) + return &ranger.Range{LowVal: lowDatums, HighVal: highDatums, Collators: collate.GetBinaryCollatorSlice(len(lowDatums))} +} + +func generateDatumSlice(vals ...int64) []types.Datum { + datums := make([]types.Datum, len(vals)) + for i, val := range vals { + datums[i].SetInt64(val) + } + return datums +} + +func TestSlowQueryRuntimeStats(t *testing.T) { + stats := &slowQueryRuntimeStats{ + totalFileNum: 2, + readFileNum: 2, + readFile: time.Second, + initialize: time.Millisecond, + readFileSize: 1024 * 1024 * 1024, + parseLog: int64(time.Millisecond * 100), + concurrent: 15, + } + require.Equal(t, "initialize: 1ms, read_file: 1s, parse_log: {time:100ms, concurrency:15}, total_file: 2, read_file: 2, read_size: 1024 MB", stats.String()) + require.Equal(t, stats.Clone().String(), stats.String()) + stats.Merge(stats.Clone()) + require.Equal(t, "initialize: 2ms, read_file: 2s, parse_log: {time:200ms, concurrency:15}, total_file: 4, read_file: 4, read_size: 2 GB", stats.String()) +} + +// Test whether the actual buckets in Golang Map is same with the estimated number. +// The test relies on the implement of Golang Map. ref https://github.com/golang/go/blob/go1.13/src/runtime/map.go#L114 +func TestAggPartialResultMapperB(t *testing.T) { + // skip err, since we guarantee the success of execution + go113, _ := version.NewVersion(`1.13`) + // go version format is `gox.y.z foobar`, we only need x.y.z part + // The following is pretty hacky, but it only in test which is ok to do so. + actualVer, err := version.NewVersion(runtime.Version()[2:6]) + if err != nil { + t.Fatalf("Cannot get actual go version with error %v\n", err) + } + if actualVer.LessThan(go113) { + t.Fatalf("Unsupported version and should never use any version less than go1.13\n") + } + type testCase struct { + rowNum int + expectedB int + expectedGrowing bool + } + var cases []testCase + // https://github.com/golang/go/issues/63438 + // in 1.21, the load factor of map is 6 rather than 6.5 and the go team refused to backport to 1.21. + // https://github.com/golang/go/issues/65706 + // in 1.23, it has problem. + if strings.Contains(runtime.Version(), `go1.21`) { + cases = []testCase{ + { + rowNum: 0, + expectedB: 0, + expectedGrowing: false, + }, + { + rowNum: 95, + expectedB: 4, + expectedGrowing: false, + }, + { + rowNum: 10000, // 6 * (1 << 11) is 12288 + expectedB: 11, + expectedGrowing: false, + }, + { + rowNum: 1000000, // 6 * (1 << 18) is 1572864 + expectedB: 18, + expectedGrowing: false, + }, + { + rowNum: 786432, // 6 * (1 << 17) + expectedB: 17, + expectedGrowing: false, + }, + { + rowNum: 786433, // 6 * (1 << 17) + 1 + expectedB: 18, + expectedGrowing: true, + }, + { + rowNum: 393216, // 6 * (1 << 16) + expectedB: 16, + expectedGrowing: false, + }, + { + rowNum: 393217, // 6 * (1 << 16) + 1 + expectedB: 17, + expectedGrowing: true, + }, + } + } else { + cases = []testCase{ + { + rowNum: 0, + expectedB: 0, + expectedGrowing: false, + }, + { + rowNum: 100, + expectedB: 4, + expectedGrowing: false, + }, + { + rowNum: 10000, + expectedB: 11, + expectedGrowing: false, + }, + { + rowNum: 1000000, + expectedB: 18, + expectedGrowing: false, + }, + { + rowNum: 851968, // 6.5 * (1 << 17) + expectedB: 17, + expectedGrowing: false, + }, + { + rowNum: 851969, // 6.5 * (1 << 17) + 1 + expectedB: 18, + expectedGrowing: true, + }, + { + rowNum: 425984, // 6.5 * (1 << 16) + expectedB: 16, + expectedGrowing: false, + }, + { + rowNum: 425985, // 6.5 * (1 << 16) + 1 + expectedB: 17, + expectedGrowing: true, + }, + } + } + + for _, tc := range cases { + aggMap := make(aggfuncs.AggPartialResultMapper) + tempSlice := make([]aggfuncs.PartialResult, 10) + for num := 0; num < tc.rowNum; num++ { + aggMap[strconv.Itoa(num)] = tempSlice + } + + require.Equal(t, tc.expectedB, getB(aggMap)) + require.Equal(t, tc.expectedGrowing, getGrowing(aggMap)) + } +} + +// A header for a Go map. +// nolint:structcheck +type hmap struct { + // Note: the format of the hmap is also encoded in cmd/compile/internal/gc/reflect.go. + // Make sure this stays in sync with the compiler's definition. + count int // nolint:unused // # live cells == size of map. Must be first (used by len() builtin) + flags uint8 // nolint:unused + B uint8 // nolint:unused // log_2 of # of buckets (can hold up to loadFactor * 2^B items) + noverflow uint16 // nolint:unused // approximate number of overflow buckets; see incrnoverflow for details + hash0 uint32 // nolint:unused // hash seed + + buckets unsafe.Pointer // nolint:unused // array of 2^B Buckets. may be nil if count==0. + oldbuckets unsafe.Pointer // nolint:unused // previous bucket array of half the size, non-nil only when growing + nevacuate uintptr // nolint:unused // progress counter for evacuation (buckets less than this have been evacuated) +} + +func getB(m aggfuncs.AggPartialResultMapper) int { + point := (**hmap)(unsafe.Pointer(&m)) + value := *point + return int(value.B) +} + +func getGrowing(m aggfuncs.AggPartialResultMapper) bool { + point := (**hmap)(unsafe.Pointer(&m)) + value := *point + return value.oldbuckets != nil +} + +func TestFilterTemporaryTableKeys(t *testing.T) { + vars := variable.NewSessionVars(nil) + const tableID int64 = 3 + vars.TxnCtx = &variable.TransactionContext{ + TxnCtxNoNeedToRestore: variable.TxnCtxNoNeedToRestore{ + TemporaryTables: map[int64]tableutil.TempTable{tableID: nil}, + }, + } + + res := filterTemporaryTableKeys(vars, []kv.Key{tablecodec.EncodeTablePrefix(tableID), tablecodec.EncodeTablePrefix(42)}) + require.Len(t, res, 1) +} + +func TestErrLevelsForResetStmtContext(t *testing.T) { + ctx := mock.NewContext() + domain.BindDomain(ctx, &domain.Domain{}) + + cases := []struct { + name string + sqlMode mysql.SQLMode + stmt []ast.StmtNode + levels errctx.LevelMap + }{ + { + name: "strict,write", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.InsertStmt{}, &ast.UpdateStmt{}, &ast.DeleteStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelError + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "non-strict,write", + sqlMode: mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.InsertStmt{}, &ast.UpdateStmt{}, &ast.DeleteStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict,insert ignore", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.InsertStmt{IgnoreErr: true}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelWarn + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelWarn + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, + { + name: "strict,update ignore", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.UpdateStmt{IgnoreErr: true}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelWarn + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, + { + name: "strict,delete ignore", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.DeleteStmt{IgnoreErr: true}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelWarn + l[errctx.ErrGroupBadNull] = errctx.LevelWarn + l[errctx.ErrGroupNoDefault] = errctx.LevelWarn + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict without error_for_division_by_zero,write", + sqlMode: mysql.ModeStrictAllTables, + stmt: []ast.StmtNode{&ast.InsertStmt{}, &ast.UpdateStmt{}, &ast.DeleteStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelIgnore + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict,select/union", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.SelectStmt{}, &ast.SetOprStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "non-strict,select/union", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.SelectStmt{}, &ast.SetOprStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelWarn + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelError + return + }(), + }, + { + name: "strict,load_data", + sqlMode: mysql.ModeStrictAllTables | mysql.ModeErrorForDivisionByZero, + stmt: []ast.StmtNode{&ast.LoadDataStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, + { + name: "non-strict,load_data", + sqlMode: mysql.SQLMode(0), + stmt: []ast.StmtNode{&ast.LoadDataStmt{}}, + levels: func() (l errctx.LevelMap) { + l[errctx.ErrGroupTruncate] = errctx.LevelError + l[errctx.ErrGroupDupKey] = errctx.LevelError + l[errctx.ErrGroupBadNull] = errctx.LevelError + l[errctx.ErrGroupNoDefault] = errctx.LevelError + l[errctx.ErrGroupDividedByZero] = errctx.LevelWarn + l[errctx.ErrGroupAutoIncReadFailed] = errctx.LevelError + l[errctx.ErrGroupNoMatchedPartition] = errctx.LevelWarn + return + }(), + }, + } + + for i, c := range cases { + require.NotEmpty(t, c.stmt, c.name) + for _, stmt := range c.stmt { + msg := fmt.Sprintf("%d: %s, stmt: %T", i, c.name, stmt) + ctx.GetSessionVars().SQLMode = c.sqlMode + require.NoError(t, ResetContextOfStmt(ctx, stmt), msg) + ec := ctx.GetSessionVars().StmtCtx.ErrCtx() + require.Equal(t, c.levels, ec.LevelMap(), msg) + } + } +} diff --git a/pkg/executor/mem_reader.go b/pkg/executor/mem_reader.go new file mode 100644 index 0000000000000..7fb687672ee57 --- /dev/null +++ b/pkg/executor/mem_reader.go @@ -0,0 +1,1190 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + "math" + "slices" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/pkg/distsql" + "github.com/pingcap/tidb/pkg/executor/internal/exec" + "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx" + transaction "github.com/pingcap/tidb/pkg/store/driver/txn" + "github.com/pingcap/tidb/pkg/table" + "github.com/pingcap/tidb/pkg/table/tables" + "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util/chunk" + "github.com/pingcap/tidb/pkg/util/codec" + "github.com/pingcap/tidb/pkg/util/rowcodec" + "github.com/pingcap/tidb/pkg/util/tracing" +) + +type memReader interface { + getMemRowsHandle() ([]kv.Handle, error) +} + +var ( + _ memReader = &memIndexReader{} + _ memReader = &memTableReader{} + _ memReader = &memIndexLookUpReader{} + _ memReader = &memIndexMergeReader{} +) + +type memIndexReader struct { + ctx sessionctx.Context + index *model.IndexInfo + table *model.TableInfo + kvRanges []kv.KeyRange + conditions []expression.Expression + addedRows [][]types.Datum + addedRowsLen int + retFieldTypes []*types.FieldType + outputOffset []int + cacheTable kv.MemBuffer + keepOrder bool + physTblIDIdx int + partitionIDMap map[int64]struct{} + compareExec + + buf [16]byte + decodeBuff [][]byte + resultRows []types.Datum +} + +func buildMemIndexReader(ctx context.Context, us *UnionScanExec, idxReader *IndexReaderExecutor) *memIndexReader { + defer tracing.StartRegion(ctx, "buildMemIndexReader").End() + kvRanges := idxReader.kvRanges + outputOffset := make([]int, 0, len(us.columns)) + for _, col := range idxReader.outputColumns { + outputOffset = append(outputOffset, col.Index) + } + if us.desc { + slices.Reverse(kvRanges) + } + return &memIndexReader{ + ctx: us.Ctx(), + index: idxReader.index, + table: idxReader.table.Meta(), + kvRanges: kvRanges, + conditions: us.conditions, + retFieldTypes: exec.RetTypes(us), + outputOffset: outputOffset, + cacheTable: us.cacheTable, + keepOrder: us.keepOrder, + compareExec: us.compareExec, + physTblIDIdx: us.physTblIDIdx, + partitionIDMap: us.partitionIDMap, + resultRows: make([]types.Datum, 0, len(outputOffset)), + } +} + +func (m *memIndexReader) getMemRowsIter(ctx context.Context) (memRowsIter, error) { + if m.keepOrder && m.table.GetPartitionInfo() != nil { + data, err := m.getMemRows(ctx) + if err != nil { + return nil, errors.Trace(err) + } + return &defaultRowsIter{data: data}, nil + } + + kvIter, err := newTxnMemBufferIter(m.ctx, m.cacheTable, m.kvRanges, m.desc) + if err != nil { + return nil, errors.Trace(err) + } + tps := m.getTypes() + colInfos := tables.BuildRowcodecColInfoForIndexColumns(m.index, m.table) + colInfos = tables.TryAppendCommonHandleRowcodecColInfos(colInfos, m.table) + return &memRowsIterForIndex{ + kvIter: kvIter, + tps: tps, + mutableRow: chunk.MutRowFromTypes(m.retFieldTypes), + memIndexReader: m, + colInfos: colInfos, + }, nil +} + +func (m *memIndexReader) getTypes() []*types.FieldType { + tps := make([]*types.FieldType, 0, len(m.index.Columns)+1) + cols := m.table.Columns + for _, col := range m.index.Columns { + tps = append(tps, &cols[col.Offset].FieldType) + } + switch { + case m.table.PKIsHandle: + for _, col := range m.table.Columns { + if mysql.HasPriKeyFlag(col.GetFlag()) { + tps = append(tps, &(col.FieldType)) + break + } + } + case m.table.IsCommonHandle: + pkIdx := tables.FindPrimaryIndex(m.table) + for _, pkCol := range pkIdx.Columns { + colInfo := m.table.Columns[pkCol.Offset] + tps = append(tps, &colInfo.FieldType) + } + default: // ExtraHandle Column tp. + tps = append(tps, types.NewFieldType(mysql.TypeLonglong)) + } + return tps +} + +func (m *memIndexReader) getMemRows(ctx context.Context) ([][]types.Datum, error) { + defer tracing.StartRegion(ctx, "memIndexReader.getMemRows").End() + tps := m.getTypes() + colInfos := tables.BuildRowcodecColInfoForIndexColumns(m.index, m.table) + colInfos = tables.TryAppendCommonHandleRowcodecColInfos(colInfos, m.table) + + mutableRow := chunk.MutRowFromTypes(m.retFieldTypes) + err := iterTxnMemBuffer(m.ctx, m.cacheTable, m.kvRanges, m.desc, func(key, value []byte) error { + data, err := m.decodeIndexKeyValue(key, value, tps, colInfos) + if err != nil { + return err + } + + mutableRow.SetDatums(data...) + matched, _, err := expression.EvalBool(m.ctx.GetExprCtx().GetEvalCtx(), m.conditions, mutableRow.ToRow()) + if err != nil || !matched { + return err + } + m.addedRows = append(m.addedRows, data) + m.resultRows = make([]types.Datum, 0, len(data)) + return nil + }) + + if err != nil { + return nil, err + } + + if m.keepOrder && m.table.GetPartitionInfo() != nil { + slices.SortFunc(m.addedRows, func(a, b []types.Datum) int { + ret, err1 := m.compare(m.ctx.GetSessionVars().StmtCtx, a, b) + if err1 != nil { + err = err1 + } + return ret + }) + return m.addedRows, err + } + return m.addedRows, nil +} + +func (m *memIndexReader) decodeIndexKeyValue(key, value []byte, tps []*types.FieldType, colInfos []rowcodec.ColInfo) ([]types.Datum, error) { + hdStatus := tablecodec.HandleDefault + // `HandleIsUnsigned` only affects IntHandle which always has one column. + if mysql.HasUnsignedFlag(tps[len(m.index.Columns)].GetFlag()) { + hdStatus = tablecodec.HandleIsUnsigned + } + + colsLen := len(m.index.Columns) + if m.decodeBuff == nil { + m.decodeBuff = make([][]byte, colsLen, colsLen+len(colInfos)) + } else { + m.decodeBuff = m.decodeBuff[: colsLen : colsLen+len(colInfos)] + } + buf := m.buf[:0] + values, err := tablecodec.DecodeIndexKVEx(key, value, colsLen, hdStatus, colInfos, buf, m.decodeBuff) + if err != nil { + return nil, errors.Trace(err) + } + + physTblIDColumnIdx := math.MaxInt64 + if m.physTblIDIdx >= 0 { + physTblIDColumnIdx = m.outputOffset[m.physTblIDIdx] + } + + ds := m.resultRows[:0] + for i, offset := range m.outputOffset { + // The `value` slice doesn't contain the value of `physTblID`, it fills by `tablecodec.DecodeKeyHead` function. + // For example, the schema is `[a, b, physTblID, c]`, `value` is `[v_a, v_b, v_c]`, `outputOffset` is `[0, 1, 2, 3]` + // when we want the value of `c`, we should recalculate the offset of `c` by `offset - 1`. + if m.physTblIDIdx == i { + tid, _, _, _ := tablecodec.DecodeKeyHead(key) + ds = append(ds, types.NewIntDatum(tid)) + continue + } + if offset > physTblIDColumnIdx { + offset = offset - 1 + } + d, err := tablecodec.DecodeColumnValue(values[offset], tps[offset], m.ctx.GetSessionVars().Location()) + if err != nil { + return nil, err + } + ds = append(ds, d) + } + return ds, nil +} + +type memTableReader struct { + ctx sessionctx.Context + table *model.TableInfo + columns []*model.ColumnInfo + kvRanges []kv.KeyRange + conditions []expression.Expression + addedRows [][]types.Datum + retFieldTypes []*types.FieldType + colIDs map[int64]int + buffer allocBuf + pkColIDs []int64 + cacheTable kv.MemBuffer + offsets []int + keepOrder bool + compareExec +} + +type allocBuf struct { + // cache for decode handle. + handleBytes []byte + rd *rowcodec.BytesDecoder + cd *rowcodec.ChunkDecoder +} + +func buildMemTableReader(ctx context.Context, us *UnionScanExec, kvRanges []kv.KeyRange) *memTableReader { + defer tracing.StartRegion(ctx, "buildMemTableReader").End() + colIDs := make(map[int64]int, len(us.columns)) + for i, col := range us.columns { + colIDs[col.ID] = i + } + + colInfo := make([]rowcodec.ColInfo, 0, len(us.columns)) + for i := range us.columns { + col := us.columns[i] + colInfo = append(colInfo, rowcodec.ColInfo{ + ID: col.ID, + IsPKHandle: us.table.Meta().PKIsHandle && mysql.HasPriKeyFlag(col.GetFlag()), + Ft: rowcodec.FieldTypeFromModelColumn(col), + }) + } + + pkColIDs := tables.TryGetCommonPkColumnIds(us.table.Meta()) + if len(pkColIDs) == 0 { + pkColIDs = []int64{-1} + } + + defVal := func(i int) ([]byte, error) { + d, err := table.GetColOriginDefaultValueWithoutStrictSQLMode(us.Ctx().GetExprCtx(), us.columns[i]) + if err != nil { + return nil, err + } + sctx := us.Ctx().GetSessionVars().StmtCtx + buf, err := tablecodec.EncodeValue(sctx.TimeZone(), nil, d) + return buf, sctx.HandleError(err) + } + cd := NewRowDecoder(us.Ctx(), us.Schema(), us.table.Meta()) + rd := rowcodec.NewByteDecoder(colInfo, pkColIDs, defVal, us.Ctx().GetSessionVars().Location()) + if us.desc { + slices.Reverse(kvRanges) + } + return &memTableReader{ + ctx: us.Ctx(), + table: us.table.Meta(), + columns: us.columns, + kvRanges: kvRanges, + conditions: us.conditions, + retFieldTypes: exec.RetTypes(us), + colIDs: colIDs, + buffer: allocBuf{ + handleBytes: make([]byte, 0, 16), + rd: rd, + cd: cd, + }, + pkColIDs: pkColIDs, + cacheTable: us.cacheTable, + keepOrder: us.keepOrder, + compareExec: us.compareExec, + } +} + +// txnMemBufferIter implements a kv.Iterator, it is an iterator that combines the membuffer data and snapshot data. +type txnMemBufferIter struct { + sctx sessionctx.Context + kvRanges []kv.KeyRange + cacheTable kv.MemBuffer + txn kv.Transaction + idx int + curr kv.Iterator + reverse bool + err error +} + +func newTxnMemBufferIter(sctx sessionctx.Context, cacheTable kv.MemBuffer, kvRanges []kv.KeyRange, reverse bool) (*txnMemBufferIter, error) { + txn, err := sctx.Txn(true) + if err != nil { + return nil, errors.Trace(err) + } + return &txnMemBufferIter{ + sctx: sctx, + txn: txn, + kvRanges: kvRanges, + cacheTable: cacheTable, + reverse: reverse, + }, nil +} + +func (iter *txnMemBufferIter) Valid() bool { + if iter.curr != nil { + if iter.curr.Valid() { + return true + } + iter.idx++ + } + for iter.idx < len(iter.kvRanges) { + rg := iter.kvRanges[iter.idx] + var tmp kv.Iterator + if !iter.reverse { + tmp = iter.txn.GetMemBuffer().SnapshotIter(rg.StartKey, rg.EndKey) + } else { + tmp = iter.txn.GetMemBuffer().SnapshotIterReverse(rg.EndKey, rg.StartKey) + } + snapCacheIter, err := getSnapIter(iter.sctx, iter.cacheTable, rg, iter.reverse) + if err != nil { + iter.err = errors.Trace(err) + return true + } + if snapCacheIter != nil { + tmp, err = transaction.NewUnionIter(tmp, snapCacheIter, iter.reverse) + if err != nil { + iter.err = errors.Trace(err) + return true + } + } + iter.curr = tmp + if iter.curr.Valid() { + return true + } + iter.idx++ + } + return false +} + +func (iter *txnMemBufferIter) Next() error { + if iter.err != nil { + return errors.Trace(iter.err) + } + if iter.curr != nil { + if iter.curr.Valid() { + return iter.curr.Next() + } + } + return nil +} + +func (iter *txnMemBufferIter) Key() kv.Key { + return iter.curr.Key() +} + +func (iter *txnMemBufferIter) Value() []byte { + return iter.curr.Value() +} + +func (iter *txnMemBufferIter) Close() { + if iter.curr != nil { + iter.curr.Close() + } +} + +func (m *memTableReader) getMemRowsIter(ctx context.Context) (memRowsIter, error) { + // txnMemBufferIter not supports keepOrder + partitionTable. + if m.keepOrder && m.table.GetPartitionInfo() != nil { + data, err := m.getMemRows(ctx) + if err != nil { + return nil, errors.Trace(err) + } + return &defaultRowsIter{data: data}, nil + } + + m.offsets = make([]int, len(m.columns)) + for i, col := range m.columns { + m.offsets[i] = m.colIDs[col.ID] + } + + kvIter, err := newTxnMemBufferIter(m.ctx, m.cacheTable, m.kvRanges, m.desc) + if err != nil { + return nil, errors.Trace(err) + } + return &memRowsIterForTable{ + kvIter: kvIter, + cd: m.buffer.cd, + chk: chunk.New(m.retFieldTypes, 1, 1), + datumRow: make([]types.Datum, len(m.retFieldTypes)), + memTableReader: m, + }, nil +} + +func (m *memTableReader) getMemRows(ctx context.Context) ([][]types.Datum, error) { + defer tracing.StartRegion(ctx, "memTableReader.getMemRows").End() + mutableRow := chunk.MutRowFromTypes(m.retFieldTypes) + resultRows := make([]types.Datum, len(m.columns)) + m.offsets = make([]int, len(m.columns)) + for i, col := range m.columns { + m.offsets[i] = m.colIDs[col.ID] + } + err := iterTxnMemBuffer(m.ctx, m.cacheTable, m.kvRanges, m.desc, func(key, value []byte) error { + var err error + resultRows, err = m.decodeRecordKeyValue(key, value, &resultRows) + if err != nil { + return err + } + + mutableRow.SetDatums(resultRows...) + matched, _, err := expression.EvalBool(m.ctx.GetExprCtx().GetEvalCtx(), m.conditions, mutableRow.ToRow()) + if err != nil || !matched { + return err + } + m.addedRows = append(m.addedRows, resultRows) + resultRows = make([]types.Datum, len(m.columns)) + return nil + }) + if err != nil { + return nil, err + } + + if m.keepOrder && m.table.GetPartitionInfo() != nil { + slices.SortFunc(m.addedRows, func(a, b []types.Datum) int { + ret, err1 := m.compare(m.ctx.GetSessionVars().StmtCtx, a, b) + if err1 != nil { + err = err1 + } + return ret + }) + return m.addedRows, err + } + return m.addedRows, nil +} + +func (m *memTableReader) decodeRecordKeyValue(key, value []byte, resultRows *[]types.Datum) ([]types.Datum, error) { + handle, err := tablecodec.DecodeRowKey(key) + if err != nil { + return nil, errors.Trace(err) + } + return m.decodeRowData(handle, value, resultRows) +} + +// decodeRowData uses to decode row data value. +func (m *memTableReader) decodeRowData(handle kv.Handle, value []byte, resultRows *[]types.Datum) ([]types.Datum, error) { + values, err := m.getRowData(handle, value) + if err != nil { + return nil, err + } + for i, col := range m.columns { + var datum types.Datum + err := tablecodec.DecodeColumnValueWithDatum(values[m.offsets[i]], &col.FieldType, m.ctx.GetSessionVars().Location(), &datum) + if err != nil { + return nil, err + } + (*resultRows)[i] = datum + } + return *resultRows, nil +} + +// getRowData decodes raw byte slice to row data. +func (m *memTableReader) getRowData(handle kv.Handle, value []byte) ([][]byte, error) { + colIDs := m.colIDs + pkIsHandle := m.table.PKIsHandle + buffer := &m.buffer + ctx := m.ctx.GetSessionVars().StmtCtx + if rowcodec.IsNewFormat(value) { + return buffer.rd.DecodeToBytes(colIDs, handle, value, buffer.handleBytes) + } + values, err := tablecodec.CutRowNew(value, colIDs) + if err != nil { + return nil, errors.Trace(err) + } + if values == nil { + values = make([][]byte, len(colIDs)) + } + // Fill the handle and null columns. + for _, col := range m.columns { + id := col.ID + offset := colIDs[id] + if m.table.IsCommonHandle { + for i, colID := range m.pkColIDs { + if colID == col.ID && !types.NeedRestoredData(&col.FieldType) { + // Only try to decode handle when there is no corresponding column in the value. + // This is because the information in handle may be incomplete in some cases. + // For example, prefixed clustered index like 'primary key(col1(1))' only store the leftmost 1 char in the handle. + if values[offset] == nil { + values[offset] = handle.EncodedCol(i) + break + } + } + } + } else if (pkIsHandle && mysql.HasPriKeyFlag(col.GetFlag())) || id == model.ExtraHandleID { + var handleDatum types.Datum + if mysql.HasUnsignedFlag(col.GetFlag()) { + // PK column is Unsigned. + handleDatum = types.NewUintDatum(uint64(handle.IntValue())) + } else { + handleDatum = types.NewIntDatum(handle.IntValue()) + } + handleData, err1 := codec.EncodeValue(ctx.TimeZone(), buffer.handleBytes, handleDatum) + err1 = ctx.HandleError(err1) + if err1 != nil { + return nil, errors.Trace(err1) + } + values[offset] = handleData + continue + } + if hasColVal(values, colIDs, id) { + continue + } + // no need to fill default value. + values[offset] = []byte{codec.NilFlag} + } + + return values, nil +} + +// getMemRowsHandle is called when memIndexMergeReader.partialPlans[i] is TableScan. +func (m *memTableReader) getMemRowsHandle() ([]kv.Handle, error) { + handles := make([]kv.Handle, 0, 16) + err := iterTxnMemBuffer(m.ctx, m.cacheTable, m.kvRanges, m.desc, func(key, _ []byte) error { + handle, err := tablecodec.DecodeRowKey(key) + if err != nil { + return err + } + handles = append(handles, handle) + return nil + }) + if err != nil { + return nil, err + } + return handles, nil +} + +func hasColVal(data [][]byte, colIDs map[int64]int, id int64) bool { + offset, ok := colIDs[id] + if ok && data[offset] != nil { + return true + } + return false +} + +type processKVFunc func(key, value []byte) error + +func iterTxnMemBuffer(ctx sessionctx.Context, cacheTable kv.MemBuffer, kvRanges []kv.KeyRange, reverse bool, fn processKVFunc) error { + txn, err := ctx.Txn(true) + if err != nil { + return err + } + + for _, rg := range kvRanges { + var iter kv.Iterator + if !reverse { + iter = txn.GetMemBuffer().SnapshotIter(rg.StartKey, rg.EndKey) + } else { + iter = txn.GetMemBuffer().SnapshotIterReverse(rg.EndKey, rg.StartKey) + } + snapCacheIter, err := getSnapIter(ctx, cacheTable, rg, reverse) + if err != nil { + return err + } + if snapCacheIter != nil { + iter, err = transaction.NewUnionIter(iter, snapCacheIter, reverse) + if err != nil { + return err + } + } + for ; iter.Valid(); err = iter.Next() { + if err != nil { + return err + } + // check whether the key was been deleted. + if len(iter.Value()) == 0 { + continue + } + err = fn(iter.Key(), iter.Value()) + if err != nil { + return err + } + } + } + return nil +} + +func getSnapIter(ctx sessionctx.Context, cacheTable kv.MemBuffer, rg kv.KeyRange, reverse bool) (snapCacheIter kv.Iterator, err error) { + var cacheIter, snapIter kv.Iterator + tempTableData := ctx.GetSessionVars().TemporaryTableData + if tempTableData != nil { + if !reverse { + snapIter, err = tempTableData.Iter(rg.StartKey, rg.EndKey) + } else { + snapIter, err = tempTableData.IterReverse(rg.EndKey, rg.StartKey) + } + if err != nil { + return nil, err + } + snapCacheIter = snapIter + } else if cacheTable != nil { + if !reverse { + cacheIter, err = cacheTable.Iter(rg.StartKey, rg.EndKey) + } else { + cacheIter, err = cacheTable.IterReverse(rg.EndKey, rg.StartKey) + } + if err != nil { + return nil, errors.Trace(err) + } + snapCacheIter = cacheIter + } + return snapCacheIter, nil +} + +func (m *memIndexReader) getMemRowsHandle() ([]kv.Handle, error) { + handles := make([]kv.Handle, 0, m.addedRowsLen) + err := iterTxnMemBuffer(m.ctx, m.cacheTable, m.kvRanges, m.desc, func(key, value []byte) error { + handle, err := tablecodec.DecodeIndexHandle(key, value, len(m.index.Columns)) + if err != nil { + return err + } + // For https://github.com/pingcap/tidb/issues/41827, + // When handle type is year, tablecodec.DecodeIndexHandle will convert it to IntHandle instead of CommonHandle + if m.table.IsCommonHandle && handle.IsInt() { + b, err := codec.EncodeKey(m.ctx.GetSessionVars().StmtCtx.TimeZone(), nil, types.NewDatum(handle.IntValue())) + err = m.ctx.GetSessionVars().StmtCtx.HandleError(err) + if err != nil { + return err + } + handle, err = kv.NewCommonHandle(b) + if err != nil { + return err + } + } + // filter key/value by partitition id + if ph, ok := handle.(kv.PartitionHandle); ok { + if _, exist := m.partitionIDMap[ph.PartitionID]; !exist { + return nil + } + } + handles = append(handles, handle) + return nil + }) + if err != nil { + return nil, err + } + return handles, nil +} + +type memIndexLookUpReader struct { + ctx sessionctx.Context + index *model.IndexInfo + columns []*model.ColumnInfo + table table.Table + conditions []expression.Expression + retFieldTypes []*types.FieldType + schema *expression.Schema + + idxReader *memIndexReader + + // partition mode + partitionMode bool // if this executor is accessing a local index with partition table + partitionTables []table.PhysicalTable // partition tables to access + partitionKVRanges [][]kv.KeyRange // kv ranges for these partition tables + + cacheTable kv.MemBuffer + + keepOrder bool + compareExec +} + +func buildMemIndexLookUpReader(ctx context.Context, us *UnionScanExec, idxLookUpReader *IndexLookUpExecutor) *memIndexLookUpReader { + defer tracing.StartRegion(ctx, "buildMemIndexLookUpReader").End() + + kvRanges := idxLookUpReader.kvRanges + outputOffset := []int{len(idxLookUpReader.index.Columns)} + memIdxReader := &memIndexReader{ + ctx: us.Ctx(), + index: idxLookUpReader.index, + table: idxLookUpReader.table.Meta(), + kvRanges: kvRanges, + retFieldTypes: exec.RetTypes(us), + outputOffset: outputOffset, + cacheTable: us.cacheTable, + partitionIDMap: us.partitionIDMap, + resultRows: make([]types.Datum, 0, len(outputOffset)), + } + + return &memIndexLookUpReader{ + ctx: us.Ctx(), + index: idxLookUpReader.index, + columns: idxLookUpReader.columns, + table: idxLookUpReader.table, + conditions: us.conditions, + retFieldTypes: exec.RetTypes(us), + schema: us.Schema(), + idxReader: memIdxReader, + + partitionMode: idxLookUpReader.partitionTableMode, + partitionKVRanges: idxLookUpReader.partitionKVRanges, + partitionTables: idxLookUpReader.prunedPartitions, + cacheTable: us.cacheTable, + + keepOrder: idxLookUpReader.keepOrder, + compareExec: us.compareExec, + } +} + +func (m *memIndexLookUpReader) getMemRowsIter(ctx context.Context) (memRowsIter, error) { + kvRanges := [][]kv.KeyRange{m.idxReader.kvRanges} + tbls := []table.Table{m.table} + if m.partitionMode { + kvRanges = m.partitionKVRanges + tbls = tbls[:0] + for _, p := range m.partitionTables { + tbls = append(tbls, p) + } + } + + tblKVRanges := make([]kv.KeyRange, 0, 16) + numHandles := 0 + for i, tbl := range tbls { + m.idxReader.kvRanges = kvRanges[i] + handles, err := m.idxReader.getMemRowsHandle() + if err != nil { + return nil, err + } + if len(handles) == 0 { + continue + } + numHandles += len(handles) + ranges, _ := distsql.TableHandlesToKVRanges(getPhysicalTableID(tbl), handles) + tblKVRanges = append(tblKVRanges, ranges...) + } + if numHandles == 0 { + return &defaultRowsIter{}, nil + } + + if m.desc { + slices.Reverse(tblKVRanges) + } + + cd := NewRowDecoder(m.ctx, m.schema, m.table.Meta()) + colIDs, pkColIDs, rd := getColIDAndPkColIDs(m.ctx, m.table, m.columns) + memTblReader := &memTableReader{ + ctx: m.ctx, + table: m.table.Meta(), + columns: m.columns, + kvRanges: tblKVRanges, + conditions: m.conditions, + addedRows: make([][]types.Datum, 0, numHandles), + retFieldTypes: m.retFieldTypes, + colIDs: colIDs, + pkColIDs: pkColIDs, + buffer: allocBuf{ + handleBytes: make([]byte, 0, 16), + rd: rd, + cd: cd, + }, + cacheTable: m.cacheTable, + keepOrder: m.keepOrder, + compareExec: m.compareExec, + } + + return memTblReader.getMemRowsIter(ctx) +} + +func (*memIndexLookUpReader) getMemRowsHandle() ([]kv.Handle, error) { + return nil, errors.New("getMemRowsHandle has not been implemented for memIndexLookUpReader") +} + +type memIndexMergeReader struct { + ctx sessionctx.Context + columns []*model.ColumnInfo + table table.Table + conditions []expression.Expression + retFieldTypes []*types.FieldType + indexMergeReader *IndexMergeReaderExecutor + memReaders []memReader + isIntersection bool + + // partition mode + partitionMode bool // if it is accessing a partition table + partitionTables []table.PhysicalTable // partition tables to access + partitionKVRanges [][][]kv.KeyRange // kv ranges for these partition tables + + keepOrder bool + compareExec +} + +func buildMemIndexMergeReader(ctx context.Context, us *UnionScanExec, indexMergeReader *IndexMergeReaderExecutor) *memIndexMergeReader { + defer tracing.StartRegion(ctx, "buildMemIndexMergeReader").End() + indexCount := len(indexMergeReader.indexes) + memReaders := make([]memReader, 0, indexCount) + for i := 0; i < indexCount; i++ { + if indexMergeReader.indexes[i] == nil { + colIDs, pkColIDs, rd := getColIDAndPkColIDs(indexMergeReader.Ctx(), indexMergeReader.table, indexMergeReader.columns) + memReaders = append(memReaders, &memTableReader{ + ctx: us.Ctx(), + table: indexMergeReader.table.Meta(), + columns: indexMergeReader.columns, + kvRanges: nil, + conditions: us.conditions, + addedRows: make([][]types.Datum, 0), + retFieldTypes: exec.RetTypes(us), + colIDs: colIDs, + pkColIDs: pkColIDs, + buffer: allocBuf{ + handleBytes: make([]byte, 0, 16), + rd: rd, + }, + }) + } else { + outputOffset := []int{len(indexMergeReader.indexes[i].Columns)} + memReaders = append(memReaders, &memIndexReader{ + ctx: us.Ctx(), + index: indexMergeReader.indexes[i], + table: indexMergeReader.table.Meta(), + kvRanges: nil, + compareExec: compareExec{desc: indexMergeReader.descs[i]}, + retFieldTypes: exec.RetTypes(us), + outputOffset: outputOffset, + partitionIDMap: indexMergeReader.partitionIDMap, + resultRows: make([]types.Datum, 0, len(outputOffset)), + }) + } + } + + return &memIndexMergeReader{ + ctx: us.Ctx(), + table: indexMergeReader.table, + columns: indexMergeReader.columns, + conditions: us.conditions, + retFieldTypes: exec.RetTypes(us), + indexMergeReader: indexMergeReader, + memReaders: memReaders, + isIntersection: indexMergeReader.isIntersection, + + partitionMode: indexMergeReader.partitionTableMode, + partitionTables: indexMergeReader.prunedPartitions, + partitionKVRanges: indexMergeReader.partitionKeyRanges, + + keepOrder: us.keepOrder, + compareExec: us.compareExec, + } +} + +type memRowsIter interface { + Next() ([]types.Datum, error) + // Close will release the snapshot it holds, so be sure to call Close. + Close() +} + +type defaultRowsIter struct { + data [][]types.Datum + cursor int +} + +func (iter *defaultRowsIter) Next() ([]types.Datum, error) { + if iter.cursor < len(iter.data) { + ret := iter.data[iter.cursor] + iter.cursor++ + return ret, nil + } + return nil, nil +} + +func (*defaultRowsIter) Close() {} + +// memRowsIterForTable combine a kv.Iterator and a kv decoder to get a memRowsIter. +type memRowsIterForTable struct { + kvIter *txnMemBufferIter // txnMemBufferIter is the kv.Iterator + cd *rowcodec.ChunkDecoder + chk *chunk.Chunk + datumRow []types.Datum + *memTableReader +} + +func (iter *memRowsIterForTable) Next() ([]types.Datum, error) { + curr := iter.kvIter + var ret []types.Datum + for curr.Valid() { + key := curr.Key() + value := curr.Value() + if err := curr.Next(); err != nil { + return nil, errors.Trace(err) + } + + // check whether the key was been deleted. + if len(value) == 0 { + continue + } + handle, err := tablecodec.DecodeRowKey(key) + if err != nil { + return nil, errors.Trace(err) + } + iter.chk.Reset() + + if !rowcodec.IsNewFormat(value) { + // TODO: remove the legacy code! + // fallback to the old way. + iter.datumRow, err = iter.memTableReader.decodeRecordKeyValue(key, value, &iter.datumRow) + if err != nil { + return nil, errors.Trace(err) + } + + mutableRow := chunk.MutRowFromTypes(iter.retFieldTypes) + mutableRow.SetDatums(iter.datumRow...) + matched, _, err := expression.EvalBool(iter.ctx.GetExprCtx().GetEvalCtx(), iter.conditions, mutableRow.ToRow()) + if err != nil { + return nil, errors.Trace(err) + } + if !matched { + continue + } + return iter.datumRow, nil + } + + err = iter.cd.DecodeToChunk(value, handle, iter.chk) + if err != nil { + return nil, errors.Trace(err) + } + + row := iter.chk.GetRow(0) + matched, _, err := expression.EvalBool(iter.ctx.GetExprCtx().GetEvalCtx(), iter.conditions, row) + if err != nil { + return nil, errors.Trace(err) + } + if !matched { + continue + } + ret = row.GetDatumRowWithBuffer(iter.retFieldTypes, iter.datumRow) + break + } + return ret, nil +} + +func (iter *memRowsIterForTable) Close() { + if iter.kvIter != nil { + iter.kvIter.Close() + } +} + +type memRowsIterForIndex struct { + kvIter *txnMemBufferIter + tps []*types.FieldType + mutableRow chunk.MutRow + *memIndexReader + colInfos []rowcodec.ColInfo +} + +func (iter *memRowsIterForIndex) Next() ([]types.Datum, error) { + var ret []types.Datum + curr := iter.kvIter + for curr.Valid() { + key := curr.Key() + value := curr.Value() + if err := curr.Next(); err != nil { + return nil, errors.Trace(err) + } + // check whether the key was been deleted. + if len(value) == 0 { + continue + } + + // filter key/value by partitition id + if iter.index.Global { + _, pid, err := codec.DecodeInt(tablecodec.SplitIndexValue(value).PartitionID) + if err != nil { + return nil, err + } + if _, exists := iter.partitionIDMap[pid]; !exists { + continue + } + } + + data, err := iter.memIndexReader.decodeIndexKeyValue(key, value, iter.tps, iter.colInfos) + if err != nil { + return nil, err + } + + iter.mutableRow.SetDatums(data...) + matched, _, err := expression.EvalBool(iter.memIndexReader.ctx.GetExprCtx().GetEvalCtx(), iter.memIndexReader.conditions, iter.mutableRow.ToRow()) + if err != nil { + return nil, errors.Trace(err) + } + if !matched { + continue + } + ret = data + break + } + return ret, nil +} + +func (iter *memRowsIterForIndex) Close() { + if iter.kvIter != nil { + iter.kvIter.Close() + } +} + +func (m *memIndexMergeReader) getMemRowsIter(ctx context.Context) (memRowsIter, error) { + data, err := m.getMemRows(ctx) + if err != nil { + return nil, errors.Trace(err) + } + return &defaultRowsIter{data: data}, nil +} + +func (m *memIndexMergeReader) getHandles() (handles []kv.Handle, err error) { + hMap := kv.NewHandleMap() + // loop each memReaders and fill handle map + for i, reader := range m.memReaders { + // [partitionNum][rangeNum] + var readerKvRanges [][]kv.KeyRange + if m.partitionMode { + readerKvRanges = m.partitionKVRanges[i] + } else { + readerKvRanges = [][]kv.KeyRange{m.indexMergeReader.keyRanges[i]} + } + for j, kr := range readerKvRanges { + switch r := reader.(type) { + case *memTableReader: + r.kvRanges = kr + case *memIndexReader: + r.kvRanges = kr + default: + return nil, errors.New("memReader have to be memTableReader or memIndexReader") + } + handles, err := reader.getMemRowsHandle() + if err != nil { + return nil, err + } + // Filter same row. + for _, handle := range handles { + if _, ok := handle.(kv.PartitionHandle); !ok && m.partitionMode { + pid := m.partitionTables[j].GetPhysicalID() + handle = kv.NewPartitionHandle(pid, handle) + } + if v, ok := hMap.Get(handle); !ok { + cnt := 1 + hMap.Set(handle, &cnt) + } else { + *(v.(*int))++ + } + } + } + } + + // process handle map, return handles meets the requirements (union or intersection) + hMap.Range(func(h kv.Handle, val any) bool { + if m.isIntersection { + if *(val.(*int)) == len(m.memReaders) { + handles = append(handles, h) + } + } else { + handles = append(handles, h) + } + return true + }) + + return handles, nil +} + +func (m *memIndexMergeReader) getMemRows(ctx context.Context) ([][]types.Datum, error) { + r, ctx := tracing.StartRegionEx(ctx, "memIndexMergeReader.getMemRows") + defer r.End() + + handles, err := m.getHandles() + if err != nil || len(handles) == 0 { + return nil, err + } + + var tblKVRanges []kv.KeyRange + if m.partitionMode { + // `tid` for partition handle is useless, so use 0 here. + tblKVRanges, _ = distsql.TableHandlesToKVRanges(0, handles) + } else { + tblKVRanges, _ = distsql.TableHandlesToKVRanges(getPhysicalTableID(m.table), handles) + } + + colIDs, pkColIDs, rd := getColIDAndPkColIDs(m.ctx, m.table, m.columns) + + memTblReader := &memTableReader{ + ctx: m.ctx, + table: m.table.Meta(), + columns: m.columns, + kvRanges: tblKVRanges, + conditions: m.conditions, + addedRows: make([][]types.Datum, 0, len(handles)), + retFieldTypes: m.retFieldTypes, + colIDs: colIDs, + pkColIDs: pkColIDs, + buffer: allocBuf{ + handleBytes: make([]byte, 0, 16), + rd: rd, + }, + } + + rows, err := memTblReader.getMemRows(ctx) + if err != nil { + return nil, err + } + + // Didn't set keepOrder = true for memTblReader, + // In indexMerge, non-partitioned tables are also need reordered. + if m.keepOrder { + slices.SortFunc(rows, func(a, b []types.Datum) int { + ret, err1 := m.compare(m.ctx.GetSessionVars().StmtCtx, a, b) + if err1 != nil { + err = err1 + } + return ret + }) + } + + return rows, err +} + +func (*memIndexMergeReader) getMemRowsHandle() ([]kv.Handle, error) { + return nil, errors.New("getMemRowsHandle has not been implemented for memIndexMergeReader") +} + +func getColIDAndPkColIDs(ctx sessionctx.Context, tbl table.Table, columns []*model.ColumnInfo) (map[int64]int, []int64, *rowcodec.BytesDecoder) { + colIDs := make(map[int64]int, len(columns)) + for i, col := range columns { + colIDs[col.ID] = i + } + + tblInfo := tbl.Meta() + colInfos := make([]rowcodec.ColInfo, 0, len(columns)) + for i := range columns { + col := columns[i] + colInfos = append(colInfos, rowcodec.ColInfo{ + ID: col.ID, + IsPKHandle: tblInfo.PKIsHandle && mysql.HasPriKeyFlag(col.GetFlag()), + Ft: rowcodec.FieldTypeFromModelColumn(col), + }) + } + pkColIDs := tables.TryGetCommonPkColumnIds(tblInfo) + if len(pkColIDs) == 0 { + pkColIDs = []int64{-1} + } + defVal := func(i int) ([]byte, error) { + d, err := table.GetColOriginDefaultValueWithoutStrictSQLMode(ctx.GetExprCtx(), columns[i]) + if err != nil { + return nil, err + } + buf, err := tablecodec.EncodeValue(ctx.GetSessionVars().StmtCtx.TimeZone(), nil, d) + return buf, ctx.GetSessionVars().StmtCtx.HandleError(err) + } + rd := rowcodec.NewByteDecoder(colInfos, pkColIDs, defVal, ctx.GetSessionVars().Location()) + return colIDs, pkColIDs, rd +} diff --git a/pkg/executor/union_scan_test.go b/pkg/executor/union_scan_test.go new file mode 100644 index 0000000000000..2a1f982ebf76c --- /dev/null +++ b/pkg/executor/union_scan_test.go @@ -0,0 +1,483 @@ +// Copyright 2016 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "encoding/hex" + "fmt" + "testing" + "time" + + "github.com/pingcap/tidb/pkg/executor" + "github.com/pingcap/tidb/pkg/store/helper" + "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/pkg/util/benchdaily" + "github.com/stretchr/testify/require" +) + +func TestUnionScanForMemBufferReader(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("set @@tidb_partition_prune_mode = dynamic") + + for i := 0; i < 2; i++ { + suffix := "" + if i == 1 { + suffix = "PARTITION BY HASH(a) partitions 4" + } + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec(fmt.Sprintf("create table t (a int,b int, index idx(b)) %s", suffix)) + tk.MustExec("analyze table t") + tk.MustExec("insert t values (1,1),(2,2)") + + // Test for delete in union scan + tk.MustExec("begin") + tk.MustExec("delete from t") + tk.MustQuery("select * from t").Check(testkit.Rows()) + tk.MustExec("insert t values (1,1)") + tk.MustQuery("select a,b from t").Check(testkit.Rows("1 1")) + tk.MustQuery("select a,b from t use index(idx)").Check(testkit.Rows("1 1")) + tk.MustExec("commit") + tk.MustExec("admin check table t") + + // Test update with untouched index columns. + tk.MustExec("delete from t") + tk.MustExec("insert t values (1,1),(2,2)") + tk.MustExec("begin") + tk.MustExec("update t set a=a+1") + tk.MustQuery("select * from t").Sort().Check(testkit.Rows("2 1", "3 2")) + tk.MustQuery("select * from t use index (idx)").Sort().Check(testkit.Rows("2 1", "3 2")) + tk.MustQuery("select * from t use index (idx) order by b desc").Check(testkit.Rows("3 2", "2 1")) + tk.MustExec("commit") + tk.MustExec("admin check table t") + + // Test update with index column. + tk.MustQuery("select * from t").Sort().Check(testkit.Rows("2 1", "3 2")) + tk.MustExec("begin") + tk.MustExec("update t set b=b+1 where a=2") + tk.MustQuery("select * from t").Sort().Check(testkit.Rows("2 2", "3 2")) + tk.MustQuery("select * from t use index(idx)").Sort().Check(testkit.Rows("2 2", "3 2")) + tk.MustExec("commit") + tk.MustExec("admin check table t") + + // Test index reader order. + tk.MustQuery("select * from t").Sort().Check(testkit.Rows("2 2", "3 2")) + tk.MustExec("begin") + tk.MustExec("insert t values (3,3),(1,1),(4,4),(-1,-1);") + tk.MustQuery("select * from t use index (idx)").Sort().Check(testkit.Rows("-1 -1", "1 1", "2 2", "3 2", "3 3", "4 4")) + tk.MustQuery("select b from t use index (idx) order by b desc").Check(testkit.Rows("4", "3", "2", "2", "1", "-1")) + tk.MustExec("commit") + tk.MustExec("admin check table t") + + // global index not support. + if i == 0 { + // test for update unique index. + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int,b int, unique index idx(b))") + tk.MustExec("insert t values (1,1),(2,2)") + tk.MustExec("begin") + tk.MustGetErrMsg("update t set b=b+1", "[kv:1062]Duplicate entry '2' for key 't.idx'") + // update with unchange index column. + tk.MustExec("update t set a=a+1") + tk.MustQuery("select * from t use index (idx)").Check(testkit.Rows("2 1", "3 2")) + tk.MustQuery("select b from t use index (idx)").Check(testkit.Rows("1", "2")) + tk.MustExec("update t set b=b+2 where a=2") + tk.MustQuery("select * from t").Check(testkit.Rows("2 3", "3 2")) + tk.MustQuery("select * from t use index (idx) order by b desc").Check(testkit.Rows("2 3", "3 2")) + tk.MustQuery("select * from t use index (idx)").Check(testkit.Rows("3 2", "2 3")) + tk.MustExec("commit") + tk.MustExec("admin check table t") + } + + // Test for getMissIndexRowsByHandle return nil. + tk.MustExec("drop table if exists t") + tk.MustExec(fmt.Sprintf("create table t (a int,b int, index idx(a)) %s", suffix)) + tk.MustExec("analyze table t") + tk.MustExec("insert into t values (1,1),(2,2),(3,3)") + tk.MustExec("begin") + tk.MustExec("update t set b=0 where a=2") + tk.MustQuery("select * from t ignore index (idx) where a>0 and b>0;").Sort().Check(testkit.Rows("1 1", "3 3")) + tk.MustQuery("select * from t use index (idx) where a>0 and b>0;").Sort().Check(testkit.Rows("1 1", "3 3")) + tk.MustExec("commit") + tk.MustExec("admin check table t") + + // Test index lookup reader corner case. + tk.MustExec("drop table if exists tt") + tk.MustExec(fmt.Sprintf("create table tt (a bigint, b int,c int,primary key (a,b)) %s;", suffix)) + tk.MustExec("analyze table tt") + tk.MustExec("insert into tt set a=1,b=1;") + tk.MustExec("begin;") + tk.MustExec("update tt set c=1;") + tk.MustQuery("select * from tt use index (PRIMARY) where c is not null;").Check(testkit.Rows("1 1 1")) + tk.MustExec("commit") + tk.MustExec("admin check table tt") + + // Test index reader corner case. + tk.MustExec("drop table if exists t1") + tk.MustExec(fmt.Sprintf("create table t1 (a int,b int,primary key(a,b)) %s;", suffix)) + tk.MustExec("analyze table t1") + tk.MustExec("begin;") + tk.MustExec("insert into t1 values(1, 1);") + tk.MustQuery("select * from t1 use index(primary) where a=1;").Check(testkit.Rows("1 1")) + tk.MustExec("commit") + tk.MustExec("admin check table t1;") + + // Test index reader with pk handle. + tk.MustExec("drop table if exists t1") + tk.MustExec(fmt.Sprintf("create table t1 (a int unsigned key,b int,c varchar(10), index idx(b,a,c)) %s;", suffix)) + tk.MustExec("analyze table t1") + tk.MustExec("begin;") + tk.MustExec("insert into t1 (a,b) values (0, 0), (1, 1);") + tk.MustQuery("select a,b from t1 use index(idx) where b>0;").Check(testkit.Rows("1 1")) + tk.MustQuery("select a,b,c from t1 ignore index(idx) where a>=1 order by a desc").Check(testkit.Rows("1 1 ")) + tk.MustExec("insert into t1 values (2, 2, null), (3, 3, 'a');") + tk.MustQuery("select a,b from t1 use index(idx) where b>1 and c is not null;").Check(testkit.Rows("3 3")) + tk.MustExec("commit") + tk.MustExec("admin check table t1;") + + // Test insert and update with untouched index. + tk.MustExec("drop table if exists t1") + tk.MustExec(fmt.Sprintf("create table t1 (a int,b int,c int,index idx(b)) %s;", suffix)) + tk.MustExec("analyze table t1") + tk.MustExec("begin;") + tk.MustExec("insert into t1 values (1, 1, 1), (2, 2, 2);") + tk.MustExec("update t1 set c=c+1 where a=1;") + tk.MustQuery("select * from t1 use index(idx);").Sort().Check(testkit.Rows("1 1 2", "2 2 2")) + tk.MustExec("commit") + tk.MustExec("admin check table t1;") + + if i == 0 { + // Test insert and update with untouched unique index. + tk.MustExec("drop table if exists t1") + tk.MustExec(fmt.Sprintf("create table t1 (a int,b int,c int,unique index idx(b)) %s;", suffix)) + tk.MustExec("begin;") + tk.MustExec("insert into t1 values (1, 1, 1), (2, 2, 2);") + tk.MustExec("update t1 set c=c+1 where a=1;") + tk.MustQuery("select * from t1 use index(idx);").Check(testkit.Rows("1 1 2", "2 2 2")) + tk.MustExec("commit") + tk.MustExec("admin check table t1;") + } + + // Test update with 2 index, one untouched, the other index is touched. + tk.MustExec("drop table if exists t1") + tk.MustExec(fmt.Sprintf("create table t1 (a int,b int,c int,unique index idx1(a), index idx2(b)) %s;", suffix)) + tk.MustExec("analyze table t1") + tk.MustExec("insert into t1 values (1, 1, 1);") + tk.MustExec("update t1 set b=b+1 where a=1;") + tk.MustQuery("select * from t1 use index(idx2);").Check(testkit.Rows("1 2 1")) + tk.MustExec("admin check table t1;") + } +} + +func TestIssue53951(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`CREATE TABLE gholla_dummy1 ( + id varchar(10) NOT NULL, + mark int, + deleted_at datetime(3) NOT NULL DEFAULT '1970-01-01 01:00:01.000', + account_id varchar(10) NOT NULL, + metastore_id varchar(10) NOT NULL, + is_deleted tinyint(1) GENERATED ALWAYS AS ((deleted_at > _utf8mb4'1970-01-01 01:00:01.000')) VIRTUAL NOT NULL, + PRIMARY KEY (account_id,metastore_id,id), + KEY isDeleted_accountId_metastoreId (is_deleted,account_id,metastore_id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;`) + tk.MustExec(`CREATE TABLE gholla_dummy2 ( + id varchar(10) NOT NULL, + mark int, + deleted_at datetime(3) NOT NULL DEFAULT '1970-01-01 01:00:01.000', + account_id varchar(10) NOT NULL, + metastore_id varchar(10) NOT NULL, + is_deleted tinyint(1) GENERATED ALWAYS AS ((deleted_at > _utf8mb4'1970-01-01 01:00:01.000')) VIRTUAL NOT NULL, + PRIMARY KEY (account_id,metastore_id,id), + KEY isDeleted_accountId_metastoreId (is_deleted,account_id,metastore_id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; `) + tk.MustExec(`INSERT INTO gholla_dummy1 (id,mark,deleted_at,account_id,metastore_id) VALUES ('ABC', 1, '1970-01-01 01:00:01.000', 'ABC', 'ABC');`) + tk.MustExec(`INSERT INTO gholla_dummy2 (id,mark,deleted_at,account_id,metastore_id) VALUES ('ABC', 1, '1970-01-01 01:00:01.000', 'ABC', 'ABC');`) + tk.MustExec(`start transaction;`) + tk.MustExec(`update gholla_dummy2 set deleted_at = NOW(), mark=2 where account_id = 'ABC' and metastore_id = 'ABC' and id = 'ABC';`) + tk.MustQuery(`select + /*+ INL_JOIN(g1) */ + g1.account_id, + g2.mark +from + gholla_dummy1 g1 FORCE INDEX(isDeleted_accountId_metastoreId) +STRAIGHT_JOIN + gholla_dummy2 g2 FORCE INDEX (PRIMARY) +ON + g1.account_id = g2.account_id AND + g1.metastore_id = g2.metastore_id AND + g1.id = g2.id +WHERE + g1.account_id = 'ABC' AND + g1.metastore_id = 'ABC' AND + g1.is_deleted = FALSE AND + g2.is_deleted = FALSE;`).Check(testkit.Rows()) // empty result, no error + tk.MustQuery(`select + /*+ INL_JOIN(g2) */ + g1.account_id, + g2.mark +from + gholla_dummy1 g1 FORCE INDEX(isDeleted_accountId_metastoreId) +STRAIGHT_JOIN + gholla_dummy2 g2 FORCE INDEX (PRIMARY) +ON + g1.account_id = g2.account_id AND + g1.metastore_id = g2.metastore_id AND + g1.id = g2.id +WHERE + g1.account_id = 'ABC' AND + g1.metastore_id = 'ABC' AND + g1.is_deleted = FALSE AND + g2.is_deleted = FALSE;`).Check(testkit.Rows()) // empty result, no error + tk.MustExec(`rollback`) +} + +func TestIssue28073(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int, c_str) , key(c_int)) partition by hash (c_int) partitions 4") + tk.MustExec("create table t2 like t1") + tk.MustExec("insert into t1 values (1, 'flamboyant mcclintock')") + tk.MustExec("insert into t2 select * from t1") + + tk.MustExec("begin") + tk.MustExec("insert into t2 (c_int, c_str) values (2, 'romantic grothendieck')") + tk.MustQuery("select * from t2 use index(primary) left join t1 use index(primary) on t1.c_int = t2.c_int for update").Sort().Check( + testkit.Rows( + "1 flamboyant mcclintock 1 flamboyant mcclintock", + "2 romantic grothendieck ", + )) + tk.MustExec("commit") + + // Check no key is written to table ID 0 + txn, err := store.Begin() + require.NoError(t, err) + start := tablecodec.EncodeTablePrefix(0) + end := tablecodec.EncodeTablePrefix(1) + iter, err := txn.Iter(start, end) + require.NoError(t, err) + + exist := false + for iter.Valid() { + require.Nil(t, iter.Next()) + exist = true + break + } + require.False(t, exist) + + // Another case, left join on partition table should not generate locks on physical ID = 0 + tk.MustExec("drop table if exists t1, t2;") + tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int, c_str));") + tk.MustExec("create table t2 (c_int int, c_str varchar(40), primary key (c_int)) partition by hash (c_int) partitions 4;") + tk.MustExec("insert into t1 (`c_int`, `c_str`) values (1, 'upbeat solomon'), (5, 'sharp rubin');") + tk.MustExec("insert into t2 (`c_int`, `c_str`) values (1, 'clever haibt'), (4, 'kind margulis');") + tk.MustExec("begin pessimistic;") + tk.MustQuery("select * from t1 left join t2 on t1.c_int = t2.c_int for update;").Check(testkit.Rows( + "1 upbeat solomon 1 clever haibt", + "5 sharp rubin ", + )) + key, err := hex.DecodeString("7480000000000000005F728000000000000000") + require.NoError(t, err) + h := helper.NewHelper(store.(helper.Storage)) + resp, err := h.GetMvccByEncodedKey(key) + require.NoError(t, err) + require.Nil(t, resp.Info.Lock) + require.Len(t, resp.Info.Writes, 0) + require.Len(t, resp.Info.Values, 0) + + tk.MustExec("rollback;") +} + +func TestIssue32422(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + + tk.MustExec("create table t (id int, c int, index(id));") + tk.MustExec("insert into t values (3,3), (4,4), (5,5);") + tk.MustExec("alter table t cache;") + + var cacheUsed bool + for i := 0; i < 20; i++ { + tk.MustQuery("select id+1, c from t where c = 4;").Check(testkit.Rows("5 4")) + if tk.Session().GetSessionVars().StmtCtx.ReadFromTableCache { + cacheUsed = true + break + } + time.Sleep(50 * time.Millisecond) + } + require.True(t, cacheUsed) + + tk.MustQuery("select id+1, c from t where c = 4;").Check(testkit.Rows("5 4")) + + // Some extra tests. + // Since cached table use UnionScanExec utilities, check what happens when they work together. + // In these cases, the cache data serve as the snapshot, tikv is skipped, and txn membuffer works the same way. + tk.MustExec("begin") + tk.MustQuery("select id+1, c from t where c = 4;").Check(testkit.Rows("5 4")) + tk.MustExec("insert into t values (6, 6)") + // Check for the new added data. + tk.MustHavePlan("select id+1, c from t where c = 6;", "UnionScan") + tk.MustQuery("select id+1, c from t where c = 6;").Check(testkit.Rows("7 6")) + require.True(t, tk.Session().GetSessionVars().StmtCtx.ReadFromTableCache) + // Check for the old data. + tk.MustQuery("select id+1, c from t where c = 4;").Check(testkit.Rows("5 4")) + require.True(t, tk.Session().GetSessionVars().StmtCtx.ReadFromTableCache) + + // Index Lookup + tk.MustHavePlan("select id+1, c from t where id = 6", "IndexLookUp") + tk.MustQuery("select id+1, c from t use index(id) where id = 6").Check(testkit.Rows("7 6")) + require.True(t, tk.Session().GetSessionVars().StmtCtx.ReadFromTableCache) + tk.MustQuery("select id+1, c from t use index(id) where id = 4").Check(testkit.Rows("5 4")) + require.True(t, tk.Session().GetSessionVars().StmtCtx.ReadFromTableCache) + + // Index Reader + tk.MustHavePlan("select id from t where id = 6", "IndexReader") + tk.MustQuery("select id from t use index(id) where id = 6").Check(testkit.Rows("6")) + require.True(t, tk.Session().GetSessionVars().StmtCtx.ReadFromTableCache) + tk.MustQuery("select id from t use index(id) where id = 4").Check(testkit.Rows("4")) + require.True(t, tk.Session().GetSessionVars().StmtCtx.ReadFromTableCache) + + tk.MustExec("rollback") +} + +func TestSnapshotWithConcurrentWrite(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (id int auto_increment key, b int, index(b));") + + tk.MustExec("begin") + tk.MustExec("insert into t1 (b) values (1),(2),(3),(4),(5),(6),(7),(8);") + for j := 0; j < 16; j++ { + tk.MustExec("insert into t1 (b) select /*+ use_index(t1, b) */ id from t1;") + } + tk.MustQuery("select count(1) from t1").Check(testkit.Rows("524288")) // 8 * 2^16 rows + tk.MustExec("rollback") +} + +func BenchmarkUnionScanRead(b *testing.B) { + store := testkit.CreateMockStore(b) + + tk := testkit.NewTestKit(b, store) + tk.MustExec("use test") + tk.MustExec(`create table t_us ( +c1 varchar(10), +c2 varchar(30), +c3 varchar(1), +c4 varchar(12), +c5 varchar(10), +c6 datetime);`) + tk.MustExec(`begin;`) + for i := 0; i < 8000; i++ { + tk.MustExec("insert into t_us values ('54321', '1234', '1', '000000', '7518', '2014-05-08')") + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + tk.MustQuery("select * from t_us where c1 = '12345'").Check(testkit.Rows()) + } + b.StopTimer() + tk.MustExec("rollback") +} + +func BenchmarkUnionScanIndexReadDescRead(b *testing.B) { + store := testkit.CreateMockStore(b) + + tk := testkit.NewTestKit(b, store) + tk.MustExec("use test") + tk.MustExec(`create table t(a int, b int, c int, primary key(a), index k(b))`) + tk.MustExec(`begin;`) + for i := 0; i < 100; i++ { + tk.MustExec(fmt.Sprintf("insert into t values (%d, %d, %d)", i, i, i)) + } + + tk.MustHavePlan("select b from t use index(k) where b > 50 order by b desc", "IndexReader") + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + // indexReader + tk.MustQuery("select b from t use index(k) where b > 50 order by b desc") + } + b.StopTimer() + tk.MustExec("rollback") +} + +func BenchmarkUnionScanTableReadDescRead(b *testing.B) { + store := testkit.CreateMockStore(b) + + tk := testkit.NewTestKit(b, store) + tk.MustExec("use test") + tk.MustExec(`create table t(a int, b int, c int, primary key(a), index k(b))`) + tk.MustExec(`begin;`) + for i := 0; i < 100; i++ { + tk.MustExec(fmt.Sprintf("insert into t values (%d, %d, %d)", i, i, i)) + } + + tk.MustHavePlan("select * from t where a > 50 order by a desc", "TableReader") + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + // tableReader + tk.MustQuery("select * from t where a > 50 order by a desc") + } + b.StopTimer() + tk.MustExec("rollback") +} + +func BenchmarkUnionScanIndexLookUpDescRead(b *testing.B) { + store := testkit.CreateMockStore(b) + + tk := testkit.NewTestKit(b, store) + tk.MustExec("use test") + tk.MustExec(`create table t(a int, b int, c int, primary key(a), index k(b))`) + tk.MustExec(`begin;`) + for i := 0; i < 100; i++ { + tk.MustExec(fmt.Sprintf("insert into t values (%d, %d, %d)", i, i, i)) + } + + tk.MustHavePlan("select * from t use index(k) where b > 50 order by b desc", "IndexLookUp") + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + // indexLookUp + tk.MustQuery("select * from t use index(k) where b > 50 order by b desc") + } + b.StopTimer() + tk.MustExec("rollback") +} + +func TestBenchDaily(t *testing.T) { + benchdaily.Run( + executor.BenchmarkReadLastLinesOfHugeLine, + executor.BenchmarkCompleteInsertErr, + executor.BenchmarkCompleteLoadErr, + BenchmarkUnionScanRead, + BenchmarkUnionScanIndexReadDescRead, + BenchmarkUnionScanTableReadDescRead, + BenchmarkUnionScanIndexLookUpDescRead, + ) +} diff --git a/pkg/session/test/txn/BUILD.bazel b/pkg/session/test/txn/BUILD.bazel new file mode 100644 index 0000000000000..456c700f3123a --- /dev/null +++ b/pkg/session/test/txn/BUILD.bazel @@ -0,0 +1,28 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "txn_test", + timeout = "short", + srcs = [ + "main_test.go", + "txn_test.go", + ], + flaky = True, + race = "on", + shard_count = 10, + deps = [ + "//pkg/config", + "//pkg/kv", + "//pkg/parser/auth", + "//pkg/parser/mysql", + "//pkg/parser/terror", + "//pkg/testkit", + "//pkg/testkit/testmain", + "//pkg/testkit/testsetup", + "//pkg/util/dbterror/plannererrors", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_stretchr_testify//require", + "@com_github_tikv_client_go_v2//tikv", + "@org_uber_go_goleak//:goleak", + ], +) diff --git a/pkg/session/test/txn/txn_test.go b/pkg/session/test/txn/txn_test.go new file mode 100644 index 0000000000000..9a40530c76427 --- /dev/null +++ b/pkg/session/test/txn/txn_test.go @@ -0,0 +1,581 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package txn + +import ( + "context" + "fmt" + "strings" + "sync" + "testing" + "time" + + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/parser/auth" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/parser/terror" + "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/pkg/util/dbterror/plannererrors" + "github.com/stretchr/testify/require" +) + +// TestAutocommit . See https://dev.mysql.com/doc/internals/en/status-flags.html +func TestAutocommit(t *testing.T) { + store := testkit.CreateMockStore(t) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk.MustExec("drop table if exists t;") + require.Greater(t, int(tk.Session().Status()&mysql.ServerStatusAutocommit), 0) + tk.MustExec("create table t (id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL)") + require.Greater(t, int(tk.Session().Status()&mysql.ServerStatusAutocommit), 0) + tk.MustExec("insert t values ()") + require.Greater(t, int(tk.Session().Status()&mysql.ServerStatusAutocommit), 0) + tk.MustExec("begin") + require.Greater(t, int(tk.Session().Status()&mysql.ServerStatusAutocommit), 0) + tk.MustExec("insert t values ()") + require.Greater(t, int(tk.Session().Status()&mysql.ServerStatusAutocommit), 0) + tk.MustExec("drop table if exists t") + require.Greater(t, int(tk.Session().Status()&mysql.ServerStatusAutocommit), 0) + + tk.MustExec("create table t (id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL)") + require.Greater(t, int(tk.Session().Status()&mysql.ServerStatusAutocommit), 0) + tk.MustExec("set autocommit=0") + require.Equal(t, 0, int(tk.Session().Status()&mysql.ServerStatusAutocommit)) + tk.MustExec("insert t values ()") + require.Equal(t, 0, int(tk.Session().Status()&mysql.ServerStatusAutocommit)) + tk.MustExec("commit") + require.Equal(t, 0, int(tk.Session().Status()&mysql.ServerStatusAutocommit)) + tk.MustExec("drop table if exists t") + require.Equal(t, 0, int(tk.Session().Status()&mysql.ServerStatusAutocommit)) + tk.MustExec("set autocommit='On'") + require.Greater(t, int(tk.Session().Status()&mysql.ServerStatusAutocommit), 0) + + // When autocommit is 0, transaction start ts should be the first *valid* + // statement, rather than *any* statement. + tk.MustExec("create table t (id int key)") + tk.MustExec("set @@autocommit = 0") + tk.MustExec("rollback") + tk.MustExec("set @@autocommit = 0") + + tk1 := testkit.NewTestKit(t, store) + tk1.MustExec("use test") + tk1.MustExec("insert into t select 1") + //nolint:all_revive,revive + tk.MustQuery("select * from t").Check(testkit.Rows("1")) + tk.MustExec("delete from t") + + // When the transaction is rolled back, the global set statement would succeed. + tk.MustExec("set @@global.autocommit = 0") + tk.MustExec("begin") + tk.MustExec("insert into t values (1)") + tk.MustExec("set @@global.autocommit = 1") + tk.MustExec("rollback") + tk.MustQuery("select count(*) from t where id = 1").Check(testkit.Rows("0")) + tk.MustQuery("select @@global.autocommit").Check(testkit.Rows("1")) + + // When the transaction is committed because of switching mode, the session set statement should succeed. + tk.MustExec("set autocommit = 0") + tk.MustExec("begin") + tk.MustExec("insert into t values (1)") + tk.MustExec("set autocommit = 1") + tk.MustExec("rollback") + tk.MustQuery("select count(*) from t where id = 1").Check(testkit.Rows("1")) + tk.MustQuery("select @@autocommit").Check(testkit.Rows("1")) + + tk.MustExec("set autocommit = 0") + tk.MustExec("insert into t values (2)") + tk.MustExec("set autocommit = 1") + tk.MustExec("rollback") + tk.MustQuery("select count(*) from t where id = 2").Check(testkit.Rows("1")) + tk.MustQuery("select @@autocommit").Check(testkit.Rows("1")) + + // Set should not take effect if the mode is not changed. + tk.MustExec("set autocommit = 0") + tk.MustExec("begin") + tk.MustExec("insert into t values (3)") + tk.MustExec("set autocommit = 0") + tk.MustExec("rollback") + tk.MustQuery("select count(*) from t where id = 3").Check(testkit.Rows("0")) + tk.MustQuery("select @@autocommit").Check(testkit.Rows("0")) + + tk.MustExec("set autocommit = 1") + tk.MustExec("begin") + tk.MustExec("insert into t values (4)") + tk.MustExec("set autocommit = 1") + tk.MustExec("rollback") + tk.MustQuery("select count(*) from t where id = 4").Check(testkit.Rows("0")) + tk.MustQuery("select @@autocommit").Check(testkit.Rows("1")) +} + +// TestTxnLazyInitialize tests that when autocommit = 0, not all statement starts +// a new transaction. +func TestTxnLazyInitialize(t *testing.T) { + testTxnLazyInitialize(t, false) + testTxnLazyInitialize(t, true) +} + +func testTxnLazyInitialize(t *testing.T, isPessimistic bool) { + store := testkit.CreateMockStore(t) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (id int)") + if isPessimistic { + tk.MustExec("set tidb_txn_mode = 'pessimistic'") + } + + tk.MustExec("set @@autocommit = 0") + _, err := tk.Session().Txn(true) + require.True(t, kv.ErrInvalidTxn.Equal(err)) + txn, err := tk.Session().Txn(false) + require.NoError(t, err) + require.False(t, txn.Valid()) + tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0")) + tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0")) + + // Those statements should not start a new transaction automatically. + tk.MustQuery("select 1") + tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0")) + + tk.MustExec("set @@tidb_general_log = 0") + tk.MustQuery("select @@tidb_current_ts").Check(testkit.Rows("0")) + + // Explain now also build the query and starts a transaction + tk.MustQuery("explain select * from t") + res := tk.MustQuery("select @@tidb_current_ts") + require.NotEqual(t, "0", res.Rows()[0][0]) + + // Begin statement should start a new transaction. + tk.MustExec("begin") + txn, err = tk.Session().Txn(false) + require.NoError(t, err) + require.True(t, txn.Valid()) + tk.MustExec("rollback") + + tk.MustExec("select * from t") + txn, err = tk.Session().Txn(false) + require.NoError(t, err) + require.True(t, txn.Valid()) + tk.MustExec("rollback") + + tk.MustExec("insert into t values (1)") + txn, err = tk.Session().Txn(false) + require.NoError(t, err) + require.True(t, txn.Valid()) + tk.MustExec("rollback") +} + +func TestDisableTxnAutoRetry(t *testing.T) { + store := testkit.CreateMockStoreWithSchemaLease(t, 1*time.Second) + + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") + tk1 := testkit.NewTestKit(t, store) + tk2 := testkit.NewTestKit(t, store) + + tk1.MustExec("use test") + tk2.MustExec("use test") + + tk1.MustExec("create table no_retry (id int)") + tk1.MustExec("insert into no_retry values (1)") + tk1.MustExec("set @@tidb_disable_txn_auto_retry = 1") + + tk1.MustExec("begin") + tk1.MustExec("update no_retry set id = 2") + + tk2.MustExec("begin") + tk2.MustExec("update no_retry set id = 3") + tk2.MustExec("commit") + + // No auto retry because tidb_disable_txn_auto_retry is set to 1. + _, err := tk1.Session().Execute(context.Background(), "commit") + require.Error(t, err) + + // session 1 starts a transaction early. + // execute a select statement to clear retry history. + tk1.MustExec("select 1") + err = tk1.Session().PrepareTxnCtx(context.Background()) + require.NoError(t, err) + // session 2 update the value. + tk2.MustExec("update no_retry set id = 4") + // AutoCommit update will retry, so it would not fail. + tk1.MustExec("update no_retry set id = 5") + + // RestrictedSQL should retry. + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnOthers) + tk1.Session().ExecuteInternal(ctx, "begin") + + tk2.MustExec("update no_retry set id = 6") + + tk1.Session().ExecuteInternal(ctx, "update no_retry set id = 7") + tk1.Session().ExecuteInternal(ctx, "commit") + + // test for disable transaction local latch + defer config.RestoreFunc()() + config.UpdateGlobal(func(conf *config.Config) { + conf.TxnLocalLatches.Enabled = false + }) + tk1.MustExec("begin") + tk1.MustExec("update no_retry set id = 9") + + tk2.MustExec("update no_retry set id = 8") + + _, err = tk1.Session().Execute(context.Background(), "commit") + require.Error(t, err) + require.True(t, kv.ErrWriteConflict.Equal(err), fmt.Sprintf("err %v", err)) + require.Contains(t, err.Error(), kv.TxnRetryableMark) + tk1.MustExec("rollback") + + config.UpdateGlobal(func(conf *config.Config) { + conf.TxnLocalLatches.Enabled = true + }) + tk1.MustExec("begin") + tk2.MustExec("alter table no_retry add index idx(id)") + tk2.MustQuery("select * from no_retry").Check(testkit.Rows("8")) + tk1.MustExec("update no_retry set id = 10") + _, err = tk1.Session().Execute(context.Background(), "commit") + require.Error(t, err) + + // set autocommit to begin and commit + tk1.MustExec("set autocommit = 0") + tk1.MustQuery("select * from no_retry").Check(testkit.Rows("8")) + tk2.MustExec("update no_retry set id = 11") + tk1.MustExec("update no_retry set id = 12") + _, err = tk1.Session().Execute(context.Background(), "set autocommit = 1") + require.Error(t, err) + require.True(t, kv.ErrWriteConflict.Equal(err), fmt.Sprintf("err %v", err)) + require.Contains(t, err.Error(), kv.TxnRetryableMark) + tk1.MustExec("rollback") + tk2.MustQuery("select * from no_retry").Check(testkit.Rows("11")) + + tk1.MustExec("set autocommit = 0") + tk1.MustQuery("select * from no_retry").Check(testkit.Rows("11")) + tk2.MustExec("update no_retry set id = 13") + tk1.MustExec("update no_retry set id = 14") + _, err = tk1.Session().Execute(context.Background(), "commit") + require.Error(t, err) + require.True(t, kv.ErrWriteConflict.Equal(err), fmt.Sprintf("err %v", err)) + require.Contains(t, err.Error(), kv.TxnRetryableMark) + tk1.MustExec("rollback") + tk2.MustQuery("select * from no_retry").Check(testkit.Rows("13")) +} + +// The Read-only flags are checked in the planning stage of queries, +// but this test checks we check them again at commit time. +// The main use case for this is a long-running auto-commit statement. +func TestAutoCommitRespectsReadOnly(t *testing.T) { + store := testkit.CreateMockStore(t) + var wg sync.WaitGroup + tk1 := testkit.NewTestKit(t, store) + tk2 := testkit.NewTestKit(t, store) + require.NoError(t, tk1.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) + require.NoError(t, tk2.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) + + tk1.MustExec("create table test.auto_commit_test (a int)") + wg.Add(1) + go func() { + err := tk1.ExecToErr("INSERT INTO test.auto_commit_test VALUES (SLEEP(1))") + require.True(t, terror.ErrorEqual(err, plannererrors.ErrSQLInReadOnlyMode), fmt.Sprintf("err %v", err)) + wg.Done() + }() + tk2.MustExec("SET GLOBAL tidb_restricted_read_only = 1") + err := tk2.ExecToErr("INSERT INTO test.auto_commit_test VALUES (0)") // should also be an error + require.True(t, terror.ErrorEqual(err, plannererrors.ErrSQLInReadOnlyMode), fmt.Sprintf("err %v", err)) + // Reset and check with the privilege to ignore the readonly flag and continue to insert. + wg.Wait() + tk1.MustExec("SET GLOBAL tidb_restricted_read_only = 0") + tk1.MustExec("SET GLOBAL tidb_super_read_only = 0") + tk1.MustExec("GRANT RESTRICTED_REPLICA_WRITER_ADMIN on *.* to 'root'") + + wg.Add(1) + go func() { + tk1.MustExec("INSERT INTO test.auto_commit_test VALUES (SLEEP(1))") + wg.Done() + }() + tk2.MustExec("SET GLOBAL tidb_restricted_read_only = 1") + tk2.MustExec("INSERT INTO test.auto_commit_test VALUES (0)") + + // wait for go routines + wg.Wait() + tk1.MustExec("SET GLOBAL tidb_restricted_read_only = 0") + tk1.MustExec("SET GLOBAL tidb_super_read_only = 0") +} + +func TestTxnRetryErrMsg(t *testing.T) { + store := testkit.CreateMockStore(t) + setTxnTk := testkit.NewTestKit(t, store) + setTxnTk.MustExec("set global tidb_txn_mode=''") + tk1 := testkit.NewTestKit(t, store) + tk2 := testkit.NewTestKit(t, store) + tk1.MustExec("use test") + tk1.MustExec("create table no_retry (id int)") + tk1.MustExec("insert into no_retry values (1)") + tk1.MustExec("begin") + tk2.MustExec("use test") + tk2.MustExec("update no_retry set id = id + 1") + tk1.MustExec("update no_retry set id = id + 1") + require.NoError(t, failpoint.Enable("tikvclient/mockRetryableErrorResp", `return(true)`)) + _, err := tk1.Session().Execute(context.Background(), "commit") + require.NoError(t, failpoint.Disable("tikvclient/mockRetryableErrorResp")) + require.Error(t, err) + require.True(t, kv.ErrTxnRetryable.Equal(err), "error: %s", err) + require.True(t, strings.Contains(err.Error(), "mock retryable error"), "error: %s", err) + require.True(t, strings.Contains(err.Error(), kv.TxnRetryableMark), "error: %s", err) +} + +func TestSetTxnScope(t *testing.T) { + // Check the default value of @@tidb_enable_local_txn and @@txn_scope without configuring the zone label. + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustQuery("select @@global.tidb_enable_local_txn;").Check(testkit.Rows("0")) + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + // Check the default value of @@tidb_enable_local_txn and @@txn_scope with configuring the zone label. + require.NoError(t, failpoint.Enable("tikvclient/injectTxnScope", `return("bj")`)) + tk = testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustQuery("select @@global.tidb_enable_local_txn;").Check(testkit.Rows("0")) + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + require.NoError(t, failpoint.Disable("tikvclient/injectTxnScope")) + + // @@tidb_enable_local_txn is off without configuring the zone label. + tk = testkit.NewTestKit(t, store) + tk.MustQuery("select @@global.tidb_enable_local_txn;").Check(testkit.Rows("0")) + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + // Set @@txn_scope to local. + err := tk.ExecToErr("set @@txn_scope = 'local';") + require.Error(t, err) + require.Regexp(t, `.*txn_scope can not be set to local when tidb_enable_local_txn is off.*`, err) + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + // Set @@txn_scope to global. + tk.MustExec("set @@txn_scope = 'global';") + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + + // @@tidb_enable_local_txn is off with configuring the zone label. + require.NoError(t, failpoint.Enable("tikvclient/injectTxnScope", `return("bj")`)) + tk = testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustQuery("select @@global.tidb_enable_local_txn;").Check(testkit.Rows("0")) + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + // Set @@txn_scope to local. + err = tk.ExecToErr("set @@txn_scope = 'local';") + require.Error(t, err) + require.Regexp(t, `.*txn_scope can not be set to local when tidb_enable_local_txn is off.*`, err) + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + // Set @@txn_scope to global. + tk.MustExec("set @@txn_scope = 'global';") + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + require.NoError(t, failpoint.Disable("tikvclient/injectTxnScope")) + + // @@tidb_enable_local_txn is on without configuring the zone label. + tk = testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set global tidb_enable_local_txn = on;") + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + // Set @@txn_scope to local. + err = tk.ExecToErr("set @@txn_scope = 'local';") + require.Error(t, err) + require.Regexp(t, `.*txn_scope can not be set to local when zone label is empty or "global".*`, err) + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + // Set @@txn_scope to global. + tk.MustExec("set @@txn_scope = 'global';") + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + + // @@tidb_enable_local_txn is on with configuring the zone label. + require.NoError(t, failpoint.Enable("tikvclient/injectTxnScope", `return("bj")`)) + tk = testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set global tidb_enable_local_txn = on;") + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.LocalTxnScope)) + require.Equal(t, "bj", tk.Session().GetSessionVars().CheckAndGetTxnScope()) + // Set @@txn_scope to global. + tk.MustExec("set @@txn_scope = 'global';") + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.GlobalTxnScope)) + require.Equal(t, kv.GlobalTxnScope, tk.Session().GetSessionVars().CheckAndGetTxnScope()) + // Set @@txn_scope to local. + tk.MustExec("set @@txn_scope = 'local';") + tk.MustQuery("select @@txn_scope;").Check(testkit.Rows(kv.LocalTxnScope)) + require.Equal(t, "bj", tk.Session().GetSessionVars().CheckAndGetTxnScope()) + // Try to set @@txn_scope to an invalid value. + err = tk.ExecToErr("set @@txn_scope='foo'") + require.Error(t, err) + require.Regexp(t, `.*txn_scope value should be global or local.*`, err) + require.NoError(t, failpoint.Disable("tikvclient/injectTxnScope")) +} + +func TestErrorRollback(t *testing.T) { + store := testkit.CreateMockStore(t) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t_rollback") + tk.MustExec("create table t_rollback (c1 int, c2 int, primary key(c1))") + tk.MustExec("insert into t_rollback values (0, 0)") + + var wg sync.WaitGroup + cnt := 4 + wg.Add(cnt) + num := 20 + + for i := 0; i < cnt; i++ { + go func() { + defer wg.Done() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@session.tidb_retry_limit = 100") + for j := 0; j < num; j++ { + _, _ = tk.Exec("insert into t_rollback values (1, 1)") + tk.MustExec("update t_rollback set c2 = c2 + 1 where c1 = 0") + } + }() + } + + wg.Wait() + tk.MustQuery("select c2 from t_rollback where c1 = 0").Check(testkit.Rows(fmt.Sprint(cnt * num))) +} + +// TestInTrans . See https://dev.mysql.com/doc/internals/en/status-flags.html +func TestInTrans(t *testing.T) { + store := testkit.CreateMockStore(t) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL)") + tk.MustExec("insert t values ()") + tk.MustExec("begin") + txn, err := tk.Session().Txn(true) + require.NoError(t, err) + require.True(t, txn.Valid()) + tk.MustExec("insert t values ()") + require.True(t, txn.Valid()) + tk.MustExec("drop table if exists t;") + require.False(t, txn.Valid()) + tk.MustExec("create table t (id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL)") + require.False(t, txn.Valid()) + tk.MustExec("insert t values ()") + require.False(t, txn.Valid()) + tk.MustExec("commit") + tk.MustExec("insert t values ()") + + tk.MustExec("set autocommit=0") + tk.MustExec("begin") + require.True(t, txn.Valid()) + tk.MustExec("insert t values ()") + require.True(t, txn.Valid()) + tk.MustExec("commit") + require.False(t, txn.Valid()) + tk.MustExec("insert t values ()") + require.True(t, txn.Valid()) + tk.MustExec("commit") + require.False(t, txn.Valid()) + + tk.MustExec("set autocommit=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (id BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL)") + tk.MustExec("begin") + require.True(t, txn.Valid()) + tk.MustExec("insert t values ()") + require.True(t, txn.Valid()) + tk.MustExec("rollback") + require.False(t, txn.Valid()) +} + +func TestMemBufferSnapshotRead(t *testing.T) { + store := testkit.CreateMockStore(t) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int primary key, b int, index i(b));") + + tk.MustExec("set session tidb_distsql_scan_concurrency = 1;") + tk.MustExec("set session tidb_index_lookup_join_concurrency = 1;") + tk.MustExec("set session tidb_projection_concurrency=1;") + tk.MustExec("set session tidb_init_chunk_size=1;") + tk.MustExec("set session tidb_max_chunk_size=40;") + tk.MustExec("set session tidb_index_join_batch_size = 10") + + tk.MustExec("begin;") + // write (0, 0), (1, 1), ... ,(100, 100) into membuffer + var sb strings.Builder + sb.WriteString("insert into t values ") + for i := 0; i <= 100; i++ { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("(%d, %d)", i, i)) + } + tk.MustExec(sb.String()) + + // insert on duplicate key statement should update the table to (0, 100), (1, 99), ... (100, 0) + // This statement will create UnionScan dynamically during execution, and some UnionScan will see staging data(should be bypassed), + // so it relies on correct snapshot read to get the expected result. + tk.MustExec("insert into t (select /*+ INL_JOIN(t1) */ 100 - t1.a as a, t1.b from t t1, (select a, b from t) t2 where t1.b = t2.b) on duplicate key update b = values(b)") + + require.Empty(t, tk.MustQuery("select a, b from t where a + b != 100;").Rows()) + tk.MustExec("commit;") + require.Empty(t, tk.MustQuery("select a, b from t where a + b != 100;").Rows()) + + tk.MustExec("set session tidb_distsql_scan_concurrency = default;") + tk.MustExec("set session tidb_index_lookup_join_concurrency = default;") + tk.MustExec("set session tidb_projection_concurrency=default;") + tk.MustExec("set session tidb_init_chunk_size=default;") + tk.MustExec("set session tidb_max_chunk_size=default;") + tk.MustExec("set session tidb_index_join_batch_size = default") +} + +func TestMemBufferCleanupMemoryLeak(t *testing.T) { + // Test if cleanup memory will cause a memory leak. + // When an in-txn statement fails, TiDB cleans up the mutations from this statement. + // If there's a memory leak, the memory usage could increase uncontrollably with retries. + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t(a varchar(255) primary key)") + key1 := strings.Repeat("a", 255) + key2 := strings.Repeat("b", 255) + tk.MustExec(`set global tidb_mem_oom_action='cancel'`) + tk.MustExec("set session tidb_mem_quota_query=10240") + tk.MustExec("begin") + tk.MustExec("insert into t values(?)", key2) + for i := 0; i < 100; i++ { + // The insert statement will fail because of the duplicate key error. + err := tk.ExecToErr("insert into t values(?), (?)", key1, key2) + require.Error(t, err) + if strings.Contains(err.Error(), "Duplicate") { + continue + } + require.NoError(t, err) + } + tk.MustExec("commit") +} diff --git a/store/driver/error/error.go b/store/driver/error/error.go index 1d9543cc1437d..9dc51a00b7094 100644 --- a/store/driver/error/error.go +++ b/store/driver/error/error.go @@ -92,6 +92,11 @@ func ToTiDBErr(err error) error { return kv.ErrEntryTooLarge.GenWithStackByArgs(entryTooLarge.Limit, entryTooLarge.Size) } + var keyTooLarge *tikverr.ErrKeyTooLarge + if stderrs.As(err, &keyTooLarge) { + return kv.ErrKeyTooLarge.GenWithStackByArgs(keyTooLarge.KeySize) + } + if stderrs.Is(err, tikverr.ErrInvalidTxn) { return kv.ErrInvalidTxn } diff --git a/store/driver/error/error_test.go b/store/driver/error/error_test.go index f1f2878fb7743..45e864d180a37 100644 --- a/store/driver/error/error_test.go +++ b/store/driver/error/error_test.go @@ -15,6 +15,7 @@ package error //nolint: predeclared import ( + "math" "testing" "github.com/pingcap/errors" @@ -51,3 +52,16 @@ func TestConvertError(t *testing.T) { assert.True(t, errors.ErrorEqual(tidbErr, terror.ErrResultUndetermined)) } } + +func TestMemBufferOversizeError(t *testing.T) { + err2str := map[error]string{ + &tikverr.ErrTxnTooLarge{Size: 100}: "Transaction is too large, size: 100", + &tikverr.ErrEntryTooLarge{Limit: 10, Size: 20}: "entry too large, the max entry size is 10, the size of data is 20", + &tikverr.ErrKeyTooLarge{KeySize: math.MaxUint16 + 1}: "key is too large, the size of given key is 65536", + } + for err, errString := range err2str { + tidbErr := ToTiDBErr(err) + assert.NotNil(t, tidbErr) + assert.Contains(t, tidbErr.Error(), errString) + } +} diff --git a/tests/integrationtest/r/executor/ddl.result b/tests/integrationtest/r/executor/ddl.result new file mode 100644 index 0000000000000..37f01e8b845b9 --- /dev/null +++ b/tests/integrationtest/r/executor/ddl.result @@ -0,0 +1,590 @@ +drop table if exists truncate_test; +create table truncate_test (a int); +insert truncate_test values (1),(2),(3); +select * from truncate_test; +a +1 +2 +3 +truncate table truncate_test; +select * from truncate_test; +a +drop table if exists t; +drop view if exists recursive_view1, recursive_view2; +create table if not exists t(a int); +create definer='root'@'localhost' view recursive_view1 as select * from t; +create definer='root'@'localhost' view recursive_view2 as select * from recursive_view1; +drop table t; +rename table recursive_view2 to t; +select * from recursive_view1; +Error 1462 (HY000): `executor__ddl`.`recursive_view1` contains view recursion +drop view recursive_view1, t; +drop table if exists t; +drop view if exists recursive_view1, recursive_view2; +create table if not exists t(a int); +create view view_issue16250 as select * from t; +truncate table view_issue16250; +Error 1146 (42S02): Table 'executor__ddl.view_issue16250' doesn't exist +drop table if exists t; +drop view if exists view_issue16250; +drop table if exists zy_tab; +create table if not exists zy_tab ( +zy_code int, +zy_name varchar(100) +); +drop table if exists bj_tab; +create table if not exists bj_tab ( +bj_code int, +bj_name varchar(100), +bj_addr varchar(100), +bj_person_count int, +zy_code int +); +drop table if exists st_tab; +create table if not exists st_tab ( +st_code int, +st_name varchar(100), +bj_code int +); +drop view if exists v_st_2; +create definer='root'@'localhost' view v_st_2 as +select st.st_name,bj.bj_name,zy.zy_name +from ( +select bj_code, +bj_name, +zy_code +from bj_tab as b +where b.bj_code = 1 +) as bj +left join zy_tab as zy on zy.zy_code = bj.zy_code +left join st_tab as st on bj.bj_code = st.bj_code; +show create view v_st_2; +View Create View character_set_client collation_connection +v_st_2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v_st_2` (`st_name`, `bj_name`, `zy_name`) AS SELECT `st`.`st_name` AS `st_name`,`bj`.`bj_name` AS `bj_name`,`zy`.`zy_name` AS `zy_name` FROM ((SELECT `bj_code` AS `bj_code`,`bj_name` AS `bj_name`,`zy_code` AS `zy_code` FROM `executor__ddl`.`bj_tab` AS `b` WHERE `b`.`bj_code`=1) AS `bj` LEFT JOIN `executor__ddl`.`zy_tab` AS `zy` ON `zy`.`zy_code`=`bj`.`zy_code`) LEFT JOIN `executor__ddl`.`st_tab` AS `st` ON `bj`.`bj_code`=`st`.`bj_code` utf8mb4 utf8mb4_general_ci +select * from v_st_2; +st_name bj_name zy_name +drop view if exists v_st_2; +drop table if exists zy_tab; +drop table if exists bj_tab; +drop table if exists st_tab; +drop sequence if exists seq; +drop sequence if exists seq1; +create sequence if not exists seq; +truncate table seq; +Error 1146 (42S02): Table 'executor__ddl.seq' doesn't exist +create sequence if not exists seq1 start 10 increment 2 maxvalue 10000 cycle; +truncate table seq1; +Error 1146 (42S02): Table 'executor__ddl.seq1' doesn't exist +drop sequence if exists seq; +drop sequence if exists seq1; +drop table if exists drop_test; +create table if not exists drop_test (a int); +create index idx_a on drop_test (a); +drop index idx_a on drop_test; +drop table drop_test; +drop table if exists t; +create table t (a bigint auto_random(5), b int, primary key (a, b) clustered); +insert into t (b) values (1); +set @@allow_auto_random_explicit_insert = 0; +insert into t values (100, 2); +Error 8216 (HY000): Invalid auto random: Explicit insertion on auto_random column is disabled. Try to set @@allow_auto_random_explicit_insert = true. +set @@allow_auto_random_explicit_insert = 1; +insert into t values (100, 2); +select b from t order by b; +b +1 +2 +alter table t modify column a bigint auto_random(6); +drop table t; +create table t (a bigint, b bigint auto_random(4, 32), primary key (b, a) clustered); +insert into t (a) values (1); +select a from t; +a +1 +drop table if exists t; +set @@allow_auto_random_explicit_insert = default; +drop table if exists t; +create table t(a bigint PRIMARY KEY, b int); +insert into t values(9223372036854775807, 1); +insert into t values(-9223372036854775808, 1); +alter table t add index idx_b(b); +admin check table t; +create table t1(a bigint UNSIGNED PRIMARY KEY, b int); +insert into t1 values(18446744073709551615, 1); +insert into t1 values(0, 1); +alter table t1 add index idx_b(b); +admin check table t1; +drop table if exists t; +drop table if exists t; +create table t(c time DEFAULT '12:12:12.8'); +show create table `t`; +Table Create Table +t CREATE TABLE `t` ( + `c` time DEFAULT '12:12:13' +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +alter table t add column c1 time default '12:12:12.000000'; +show create table `t`; +Table Create Table +t CREATE TABLE `t` ( + `c` time DEFAULT '12:12:13', + `c1` time DEFAULT '12:12:12' +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +alter table t alter column c1 set default '2019-02-01 12:12:10.4'; +show create table `t`; +Table Create Table +t CREATE TABLE `t` ( + `c` time DEFAULT '12:12:13', + `c1` time DEFAULT '12:12:10' +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +alter table t modify c1 time DEFAULT '770:12:12.000000'; +show create table `t`; +Table Create Table +t CREATE TABLE `t` ( + `c` time DEFAULT '12:12:13', + `c1` time DEFAULT '770:12:12' +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table if exists t; +drop table if exists t, t2, t3; +create table t ( tt timestamp default now(1)); +Error 1067 (42000): Invalid default value for 'tt' +create table t ( tt timestamp(1) default current_timestamp); +Error 1067 (42000): Invalid default value for 'tt' +create table t ( tt timestamp(1) default now(2)); +Error 1067 (42000): Invalid default value for 'tt' +create table t ( tt timestamp(1) default now(1)); +create table t2 ( tt timestamp default current_timestamp()); +create table t3 ( tt timestamp default current_timestamp(0)); +alter table t add column ttt timestamp default now(2); +Error 1067 (42000): Invalid default value for 'ttt' +alter table t add column ttt timestamp(5) default current_timestamp; +Error 1067 (42000): Invalid default value for 'ttt' +alter table t add column ttt timestamp(5) default now(2); +Error 1067 (42000): Invalid default value for 'ttt' +alter table t modify column tt timestamp(1) default now(); +Error 1067 (42000): Invalid default value for 'tt' +alter table t modify column tt timestamp(4) default now(5); +Error 1067 (42000): Invalid default value for 'tt' +alter table t change column tt tttt timestamp(4) default now(5); +Error 1067 (42000): Invalid default value for 'tttt' +alter table t change column tt tttt timestamp(1) default now(); +Error 1067 (42000): Invalid default value for 'tttt' +drop table if exists t, t2, t3; +drop table if exists tdv; +create table tdv(a int); +ALTER TABLE tdv ADD COLUMN ts timestamp DEFAULT '1970-01-01 08:00:01'; +drop table if exists tdv; +drop table if exists t; +CREATE TABLE t (created_at datetime) TTL = `created_at` + INTERVAL 5 DAY; +SHOW CREATE TABLE t; +Table Create Table +t CREATE TABLE `t` ( + `created_at` datetime DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![ttl] TTL=`created_at` + INTERVAL 5 DAY */ /*T![ttl] TTL_ENABLE='ON' */ /*T![ttl] TTL_JOB_INTERVAL='1h' */ +DROP TABLE t; +CREATE TABLE t (id int) TTL = `id` + INTERVAL 5 DAY; +Error 8148 (HY000): Field 'id' is of a not supported type for TTL config, expect DATETIME, DATE or TIMESTAMP +CREATE TABLE t (id int) TTL_ENABLE = 'ON'; +Error 8150 (HY000): Cannot set TTL_ENABLE on a table without TTL config +CREATE TABLE t (id int) TTL_JOB_INTERVAL = '1h'; +Error 8150 (HY000): Cannot set TTL_JOB_INTERVAL on a table without TTL config +CREATE TABLE t (created_at datetime) TTL_ENABLE = 'ON' TTL = `created_at` + INTERVAL 1 DAY TTL_ENABLE = 'OFF' TTL_JOB_INTERVAL = '1d'; +SHOW CREATE TABLE t; +Table Create Table +t CREATE TABLE `t` ( + `created_at` datetime DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![ttl] TTL=`created_at` + INTERVAL 1 DAY */ /*T![ttl] TTL_ENABLE='OFF' */ /*T![ttl] TTL_JOB_INTERVAL='1d' */ +DROP TABLE t; +CREATE TABLE t (created_at datetime) TTL_ENABLE = 'ON' TTL = `created_at` + INTERVAL 1 DAY TTL = `created_at` + INTERVAL 2 DAY TTL = `created_at` + INTERVAL 3 DAY TTL_ENABLE = 'OFF'; +SHOW CREATE TABLE t; +Table Create Table +t CREATE TABLE `t` ( + `created_at` datetime DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![ttl] TTL=`created_at` + INTERVAL 3 DAY */ /*T![ttl] TTL_ENABLE='OFF' */ /*T![ttl] TTL_JOB_INTERVAL='1h' */ +DROP TABLE t; +drop table if exists t; +CREATE TABLE t (created_at datetime, updated_at datetime, wrong_type int) TTL = `created_at` + INTERVAL 5 DAY; +ALTER TABLE t TTL = `updated_at` + INTERVAL 2 YEAR; +SHOW CREATE TABLE t; +Table Create Table +t CREATE TABLE `t` ( + `created_at` datetime DEFAULT NULL, + `updated_at` datetime DEFAULT NULL, + `wrong_type` int DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![ttl] TTL=`updated_at` + INTERVAL 2 YEAR */ /*T![ttl] TTL_ENABLE='ON' */ /*T![ttl] TTL_JOB_INTERVAL='1h' */ +ALTER TABLE t TTL_ENABLE = 'OFF'; +SHOW CREATE TABLE t; +Table Create Table +t CREATE TABLE `t` ( + `created_at` datetime DEFAULT NULL, + `updated_at` datetime DEFAULT NULL, + `wrong_type` int DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![ttl] TTL=`updated_at` + INTERVAL 2 YEAR */ /*T![ttl] TTL_ENABLE='OFF' */ /*T![ttl] TTL_JOB_INTERVAL='1h' */ +ALTER TABLE t TTL_JOB_INTERVAL = '1d'; +SHOW CREATE TABLE t; +Table Create Table +t CREATE TABLE `t` ( + `created_at` datetime DEFAULT NULL, + `updated_at` datetime DEFAULT NULL, + `wrong_type` int DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![ttl] TTL=`updated_at` + INTERVAL 2 YEAR */ /*T![ttl] TTL_ENABLE='OFF' */ /*T![ttl] TTL_JOB_INTERVAL='1d' */ +ALTER TABLE t TTL = `not_exist` + INTERVAL 2 YEAR; +Error 1054 (42S22): Unknown column 'not_exist' in 'TTL config' +ALTER TABLE t TTL = `wrong_type` + INTERVAL 2 YEAR; +Error 8148 (HY000): Field 'wrong_type' is of a not supported type for TTL config, expect DATETIME, DATE or TIMESTAMP +ALTER TABLE t DROP COLUMN updated_at; +Error 8149 (HY000): Cannot drop column 'updated_at': needed in TTL config +ALTER TABLE t CHANGE updated_at updated_at_new INT; +Error 8148 (HY000): Field 'updated_at_new' is of a not supported type for TTL config, expect DATETIME, DATE or TIMESTAMP +ALTER TABLE t RENAME COLUMN `updated_at` TO `updated_at_2`; +SHOW CREATE TABLE t; +Table Create Table +t CREATE TABLE `t` ( + `created_at` datetime DEFAULT NULL, + `updated_at_2` datetime DEFAULT NULL, + `wrong_type` int DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![ttl] TTL=`updated_at_2` + INTERVAL 2 YEAR */ /*T![ttl] TTL_ENABLE='OFF' */ /*T![ttl] TTL_JOB_INTERVAL='1d' */ +ALTER TABLE t CHANGE `updated_at_2` `updated_at_3` date; +SHOW CREATE TABLE t; +Table Create Table +t CREATE TABLE `t` ( + `created_at` datetime DEFAULT NULL, + `updated_at_3` date DEFAULT NULL, + `wrong_type` int DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![ttl] TTL=`updated_at_3` + INTERVAL 2 YEAR */ /*T![ttl] TTL_ENABLE='OFF' */ /*T![ttl] TTL_JOB_INTERVAL='1d' */ +ALTER TABLE t TTL = `updated_at_3` + INTERVAL 3 YEAR; +SHOW CREATE TABLE t; +Table Create Table +t CREATE TABLE `t` ( + `created_at` datetime DEFAULT NULL, + `updated_at_3` date DEFAULT NULL, + `wrong_type` int DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![ttl] TTL=`updated_at_3` + INTERVAL 3 YEAR */ /*T![ttl] TTL_ENABLE='OFF' */ /*T![ttl] TTL_JOB_INTERVAL='1d' */ +ALTER TABLE t TTL_ENABLE = 'OFF' REMOVE TTL; +Error 8200 (HY000): Unsupported multi schema change for alter table ttl +ALTER TABLE t REMOVE TTL; +SHOW CREATE TABLE t; +Table Create Table +t CREATE TABLE `t` ( + `created_at` datetime DEFAULT NULL, + `updated_at_3` date DEFAULT NULL, + `wrong_type` int DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +ALTER TABLE t TTL_ENABLE = 'OFF'; +Error 8150 (HY000): Cannot set TTL_ENABLE on a table without TTL config +ALTER TABLE t TTL_JOB_INTERVAL = '1h'; +Error 8150 (HY000): Cannot set TTL_JOB_INTERVAL on a table without TTL config +drop table if exists t; +drop table if exists t; +CREATE TEMPORARY TABLE t (created_at datetime) TTL = `created_at` + INTERVAL 5 DAY; +Error 8151 (HY000): Set TTL for temporary table is not allowed +set global tidb_enable_foreign_key='ON'; +drop table if exists t, t_1; +CREATE TABLE t (id int primary key, created_at datetime); +CREATE TABLE t_1 (t_id int, foreign key fk_t_id(t_id) references t(id)); +ALTER TABLE t TTL = created_at + INTERVAL 5 YEAR; +Error 8152 (HY000): Set TTL for a table referenced by foreign key is not allowed +drop table t,t_1; +CREATE TABLE t (id int primary key, created_at datetime) TTL = created_at + INTERVAL 5 YEAR; +CREATE TABLE t_1 (t_id int, foreign key fk_t_id(t_id) references t(id)); +Error 8152 (HY000): Set TTL for a table referenced by foreign key is not allowed +drop table t; +CREATE TABLE t (id int primary key, created_at datetime) TTL = created_at + INTERVAL 5 YEAR; +CREATE TABLE t_1 (t_id int); +ALTER TABLE t_1 ADD FOREIGN KEY fk_t_id(t_id) references t(id); +Error 8152 (HY000): Set TTL for a table referenced by foreign key is not allowed +drop table t,t_1; +set global tidb_enable_foreign_key=default; +drop table if exists source_table, t1, t2, test_v_nested; +drop view if exists view_t, v, v1, v2, v3, v4, v5, v6, v7, v_nested, v_nested2; +CREATE TABLE source_table (id INT NOT NULL DEFAULT 1, name varchar(255), PRIMARY KEY(id)); +CREATE VIEW view_t AS select id , name from source_table; +CREATE VIEW view_t AS select id , name from source_table; +Error 1050 (42S01): Table 'executor__ddl.view_t' already exists +create view v1 (c,d) as select a,b from t1; +Error 1146 (42S02): Table 'executor__ddl.t1' doesn't exist +create table t1 (a int ,b int); +insert into t1 values (1,2), (1,3), (2,4), (2,5), (3,10); +create view v1 (c) as select b+1 from t1; +create view v2 as select b+1 from t1; +create view v3 as select b+1 as c from t1; +create view v4 (c) as select b+1 as d from t1; +create view v5 as select * from t1; +create view v6 (c,d) as select * from t1; +create view v7 (c,d,e) as select * from t1; +Error 1353 (HY000): In definition of view, derived table or common table expression, SELECT list and column names list have different column counts +drop view v1,v2,v3,v4,v5,v6; +create view v1 (c,d) as select a,b+@@global.max_user_connections from t1; +Error 1351 (HY000): View's SELECT contains a variable or parameter +create view v1 (c,d) as select a,b from t1 where a = @@global.max_user_connections; +Error 1351 (HY000): View's SELECT contains a variable or parameter +create view v1 (c,d,e) as select a,b from t1 ; +Error 1353 (HY000): In definition of view, derived table or common table expression, SELECT list and column names list have different column counts +create view v1 (c) as select a,b from t1 ; +Error 1353 (HY000): In definition of view, derived table or common table expression, SELECT list and column names list have different column counts +drop view if exists v1; +create view v1 (c,d) as select a,b from t1; +create or replace view v1 (c,d) as select a,b from t1 ; +create table if not exists t1 (a int ,b int); +create or replace view t1 as select * from t1; +Error 1347 (HY000): 'executor__ddl.t1' is not VIEW +prepare stmt from "create view v10 (x) as select 1"; +execute stmt; +drop table if exists t1, t2; +drop view if exists v; +create view v as select * from t1 union select * from t2; +Error 1146 (42S02): Table 'executor__ddl.t1' doesn't exist +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 values(1,2), (1,1), (1,2); +insert into t2 values(1,1),(1,3); +create definer='root'@'localhost' view v as select * from t1 union select * from t2; +select * from v; +a b +1 1 +1 2 +1 3 +alter table t1 drop column a; +select * from v; +Error 1356 (HY000): View 'executor__ddl.v' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them +alter table t1 add column a int; +select * from v; +a b +NULL 1 +NULL 2 +1 1 +1 3 +alter table t1 drop column a; +alter table t2 drop column b; +select * from v; +Error 1356 (HY000): View 'executor__ddl.v' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them +drop view v; +create view v as (select * from t1); +drop view v; +create view v as (select * from t1 union select * from t2); +drop view v; +drop view if exists v_if_exists; +show warnings; +Level Code Message +Note 1051 Unknown table 'executor__ddl.v_if_exists' +create view v1_if_exists as (select * from t1); +drop view if exists v1_if_exists,v2_if_exists,v3_if_exists; +show warnings; +Level Code Message +Note 1051 Unknown table 'executor__ddl.v2_if_exists' +Note 1051 Unknown table 'executor__ddl.v3_if_exists' +create table test_v_nested(a int); +create definer='root'@'localhost' view v_nested as select * from test_v_nested; +create definer='root'@'localhost' view v_nested2 as select * from v_nested; +create or replace definer='root'@'localhost' view v_nested as select * from v_nested2; +Error 1146 (42S02): Table 'executor__ddl.v_nested' doesn't exist +drop table test_v_nested; +drop view v_nested, v_nested2; +select sleep(1); +sleep(1) +0 +create view v_stale as select * from source_table as of timestamp date_sub(current_timestamp(3), interval 1 second); +Error 1356 (HY000): View 'executor__ddl.v_stale' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them +drop view if exists v1,v2; +drop table if exists t1; +CREATE TABLE t1(a INT, b INT); +CREATE DEFINER=1234567890abcdefGHIKL1234567890abcdefGHIKL@localhost VIEW v1 AS SELECT a FROM t1; +Error 1470 (HY000): String '1234567890abcdefGHIKL1234567890abcdefGHIKL' is too long for user name (should be no longer than 32) +CREATE DEFINER=some_user_name@host_1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890X VIEW v2 AS SELECT b FROM t1; +Error 1470 (HY000): String 'host_1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij12345' is too long for host name (should be no longer than 255) +DROP VIEW IF EXISTS view_t; +drop table if exists t; +drop view if exists v; +create table t(a int); +create view v as select distinct'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', max('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'), 'cccccccccc', 'ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd'; +select * from v; +name_exp_1 name_exp_2 cccccccccc name_exp_4 +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb cccccccccc ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd +select name_exp_1, name_exp_2, cccccccccc, name_exp_4 from v; +name_exp_1 name_exp_2 cccccccccc name_exp_4 +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb cccccccccc ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`%` SQL SECURITY DEFINER VIEW `v` (`name_exp_1`, `name_exp_2`, `cccccccccc`, `name_exp_4`) AS SELECT DISTINCT _UTF8MB4'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' AS `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`,MAX(_UTF8MB4'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb') AS `max('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')`,_UTF8MB4'cccccccccc' AS `cccccccccc`,_UTF8MB4'ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' AS `ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd` utf8mb4 utf8mb4_general_ci +drop view v; +CREATE ALGORITHM=UNDEFINED DEFINER=``@`` SQL SECURITY DEFINER VIEW `v` (`name_exp_1`, `name_exp_2`, `cccccccccc`, `name_exp_4`) AS SELECT DISTINCT _UTF8MB4'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' AS `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`,MAX(_UTF8MB4'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb') AS `max('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')`,_UTF8MB4'cccccccccc' AS `cccccccccc`,_UTF8MB4'ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' AS `ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd`; +drop view v ; +create definer='root'@'localhost' view v as select 'a', 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' from t union select 'ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', count(distinct 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', 'c'); +select * from v; +a name_exp_2 +ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 1 +select a, name_exp_2 from v; +a name_exp_2 +ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 1 +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` (`a`, `name_exp_2`) AS SELECT _UTF8MB4'a' AS `a`,_UTF8MB4'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' AS `bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb` FROM `executor__ddl`.`t` UNION SELECT _UTF8MB4'ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' AS `ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc`,COUNT(DISTINCT _UTF8MB4'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', _UTF8MB4'c') AS `count(distinct 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', 'c')` utf8mb4 utf8mb4_general_ci +drop view v; +CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` (`a`, `name_exp_2`) AS SELECT _UTF8MB4'a' AS `a`,_UTF8MB4'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' AS `bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb` FROM `executor__ddl`.`t` UNION SELECT _UTF8MB4'ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' AS `ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc`,COUNT(DISTINCT _UTF8MB4'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', _UTF8MB4'c') AS `count(distinct 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', 'c')`; +drop view v ; +create definer='root'@'localhost' view v as select 'a' as 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' from t; +select * from v; +name_exp_1 +select name_exp_1 from v; +name_exp_1 +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` (`name_exp_1`) AS SELECT _UTF8MB4'a' AS `bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb` FROM `executor__ddl`.`t` utf8mb4 utf8mb4_general_ci +drop view v; +CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` (`name_exp_1`) AS SELECT _UTF8MB4'a' AS `bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb` FROM `executor__ddl`.`t`; +drop view v ; +create view v(`bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb`) as select a from t; +Error 1059 (42000): Identifier name 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' is too long +drop table t; +drop table if exists drop_test; +create table if not exists drop_test (a int); +drop table if exists drop_test; +create table drop_test (a int); +drop table drop_test; +drop table mysql.gc_delete_range; +Error 1105 (HY000): Drop tidb system table 'mysql.gc_delete_range' is forbidden +drop table if exists t_v, t_v1, t_v2; +drop view if exists v; +create or replace view drop_test as select 1,2; +drop table drop_test; +Error 1051 (42S02): Unknown table 'executor__ddl.drop_test' +drop view if exists drop_test; +drop view mysql.gc_delete_range; +Error 1105 (HY000): Drop tidb system table 'mysql.gc_delete_range' is forbidden +drop view drop_test; +Error 1051 (42S02): Unknown table 'executor__ddl.drop_test' +create table t_v(a int); +drop view t_v; +Error 1347 (HY000): 'executor__ddl.t_v' is not VIEW +create table t_v1(a int, b int); +create table t_v2(a int, b int); +create view v as select * from t_v1; +create or replace view v as select * from t_v2; +select * from information_schema.views where table_name ='v' and table_schema='executor__ddl'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE CHARACTER_SET_CLIENT COLLATION_CONNECTION +def executor__ddl v SELECT `executor__ddl`.`t_v2`.`a` AS `a`,`executor__ddl`.`t_v2`.`b` AS `b` FROM `executor__ddl`.`t_v2` CASCADED NO root@% DEFINER utf8mb4 utf8mb4_general_ci +drop database if exists aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +create database aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +drop database aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +create database aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +Error 1059 (42000): Identifier name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' is too long +drop table if exists bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb; +create table bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb(c int); +drop table bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb; +create table bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb(c int); +Error 1059 (42000): Identifier name 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' is too long +drop table if exists t; +create table t(cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc int); +drop table t; +create table t(ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc int); +Error 1059 (42000): Identifier name 'ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' is too long +create table t(c int); +create index dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd on t(c); +drop index dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd on t; +create index ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd on t(c); +Error 1059 (42000): Identifier name 'ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' is too long +drop table t; +create table t(c int, index ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd(c)); +Error 1059 (42000): Identifier name 'ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' is too long +drop table if exists t1; +CREATE database test; +Error 1007 (HY000): Can't create database 'test'; database exists +create table t1 (b double generated always as (rand()) virtual); +Error 3102 (HY000): Expression of generated column 'b' contains a disallowed function. +create table t1 (a varchar(64), b varchar(1024) generated always as (load_file(a)) virtual); +Error 3102 (HY000): Expression of generated column 'b' contains a disallowed function. +create table t1 (a datetime generated always as (curdate()) virtual); +Error 3102 (HY000): Expression of generated column 'a' contains a disallowed function. +create table t1 (a datetime generated always as (current_time()) virtual); +Error 3102 (HY000): Expression of generated column 'a' contains a disallowed function. +create table t1 (a datetime generated always as (current_timestamp()) virtual); +Error 3102 (HY000): Expression of generated column 'a' contains a disallowed function. +create table t1 (a datetime, b varchar(10) generated always as (localtime()) virtual); +Error 3102 (HY000): Expression of generated column 'b' contains a disallowed function. +create table t1 (a varchar(1024) generated always as (uuid()) virtual); +Error 3102 (HY000): Expression of generated column 'a' contains a disallowed function. +create table t1 (a varchar(1024), b varchar(1024) generated always as (is_free_lock(a)) virtual); +Error 3102 (HY000): Expression of generated column 'b' contains a disallowed function. +create table t1 (a bigint not null primary key auto_increment, b bigint, c bigint as (b + 1)); +alter table t1 add column d varchar(1024) generated always as (database()); +Error 3102 (HY000): Expression of generated column 'd' contains a disallowed function. +alter table t1 add column d bigint generated always as (b + 1); +alter table t1 modify column d bigint generated always as (connection_id()); +Error 3102 (HY000): Expression of generated column 'd' contains a disallowed function. +alter table t1 change column c cc bigint generated always as (connection_id()); +Error 3102 (HY000): Expression of generated column 'cc' contains a disallowed function. +drop table if exists t1; +create table t1 (a bigint not null primary key auto_increment, b bigint as (a + 1)); +Error 3109 (HY000): Generated column 'b' cannot refer to auto-increment column. +create table t1 (a bigint not null primary key auto_increment, b bigint, c bigint as (b + 1)); +alter table t1 add column d bigint generated always as (a + 1); +Error 3109 (HY000): Generated column 'd' cannot refer to auto-increment column. +alter table t1 add column d bigint generated always as (b + 1); +alter table t1 modify column d bigint generated always as (a + 1); +Error 3109 (HY000): Generated column 'd' cannot refer to auto-increment column. +set session tidb_enable_auto_increment_in_generated = 1; +alter table t1 modify column d bigint generated always as (a + 1); +alter table t1 add column e bigint as (z + 1); +Error 1054 (42S22): Unknown column 'z' in 'generated column function' +drop table t1; +create table t1(a int, b int as (a+1), c int as (b+1)); +insert into t1 (a) values (1); +alter table t1 modify column c int as (b+1) first; +Error 3107 (HY000): Generated column can refer only to generated columns defined prior to it. +alter table t1 modify column b int as (a+1) after c; +Error 3107 (HY000): Generated column can refer only to generated columns defined prior to it. +select * from t1; +a b c +1 2 3 +set session tidb_enable_auto_increment_in_generated = default; +drop table if exists t1; +CREATE TABLE t1 (t1_id INT NOT NULL AUTO_INCREMENT PRIMARY KEY); +CREATE INDEX idx1 ON t1 ((t1_id + t1_id)); +Error 3754 (HY000): Expression index 'idx1' cannot refer to an auto-increment column +SET SESSION tidb_enable_auto_increment_in_generated = 1; +CREATE INDEX idx1 ON t1 ((t1_id + t1_id)); +SET SESSION tidb_enable_auto_increment_in_generated = default; +set tidb_enable_clustered_index=on; +drop table if exists t1, t2, t3, t4, t11, t12, t13, t21, t22, t23; +create table t1(id float primary key, t timestamp) TTL=`t`+INTERVAL 1 DAY; +Error 8153 (HY000): Unsupported clustered primary key type FLOAT/DOUBLE for TTL +create table t1(id float(10,2) primary key, t timestamp) TTL=`t`+INTERVAL 1 DAY; +Error 8153 (HY000): Unsupported clustered primary key type FLOAT/DOUBLE for TTL +create table t1(id double primary key, t timestamp) TTL=`t`+INTERVAL 1 DAY; +Error 8153 (HY000): Unsupported clustered primary key type FLOAT/DOUBLE for TTL +create table t1(id float(10,2) primary key, t timestamp) TTL=`t`+INTERVAL 1 DAY; +Error 8153 (HY000): Unsupported clustered primary key type FLOAT/DOUBLE for TTL +create table t1(id1 int, id2 float, t timestamp, primary key(id1, id2)) TTL=`t`+INTERVAL 1 DAY; +Error 8153 (HY000): Unsupported clustered primary key type FLOAT/DOUBLE for TTL +create table t1(id1 int, id2 double, t timestamp, primary key(id1, id2)) TTL=`t`+INTERVAL 1 DAY; +Error 8153 (HY000): Unsupported clustered primary key type FLOAT/DOUBLE for TTL +create table t1(id float primary key, t timestamp); +create table t2(id double primary key, t timestamp); +create table t3(id1 int, id2 float, primary key(id1, id2), t timestamp); +create table t4(id1 int, id2 double, primary key(id1, id2), t timestamp); +alter table t1 TTL=`t`+INTERVAL 1 DAY; +Error 8153 (HY000): Unsupported clustered primary key type FLOAT/DOUBLE for TTL +alter table t2 TTL=`t`+INTERVAL 1 DAY; +Error 8153 (HY000): Unsupported clustered primary key type FLOAT/DOUBLE for TTL +alter table t3 TTL=`t`+INTERVAL 1 DAY; +Error 8153 (HY000): Unsupported clustered primary key type FLOAT/DOUBLE for TTL +alter table t4 TTL=`t`+INTERVAL 1 DAY; +Error 8153 (HY000): Unsupported clustered primary key type FLOAT/DOUBLE for TTL +create table t11(id float primary key nonclustered, t timestamp) TTL=`t`+INTERVAL 1 DAY; +create table t12(id double primary key nonclustered, t timestamp) TTL=`t`+INTERVAL 1 DAY; +create table t13(id1 int, id2 float, t timestamp, primary key(id1, id2) nonclustered) TTL=`t`+INTERVAL 1 DAY; +create table t21(id float primary key nonclustered, t timestamp); +create table t22(id double primary key nonclustered, t timestamp); +create table t23(id1 int, id2 float, t timestamp, primary key(id1, id2) nonclustered); +alter table t21 TTL=`t`+INTERVAL 1 DAY; +alter table t22 TTL=`t`+INTERVAL 1 DAY; +alter table t23 TTL=`t`+INTERVAL 1 DAY; +set tidb_enable_clustered_index=default; +drop table if exists t; +create table t (c_int int, c_str varchar(40)); +insert into t values (1, 'quizzical hofstadter'); +begin; +select c_int from t where c_str is not null for update; +c_int +1 +alter table t add index idx_4 (c_str); +rollback; diff --git a/tests/integrationtest/r/globalindex/insert.result b/tests/integrationtest/r/globalindex/insert.result new file mode 100644 index 0000000000000..e873815ea20ff --- /dev/null +++ b/tests/integrationtest/r/globalindex/insert.result @@ -0,0 +1,12 @@ +drop table if exists t; +create table t(a int, b int, unique index idx(a) global) partition by hash(b) partitions 5; +insert into t values (1, 1), (1, 2) on duplicate key update a=1, b=3; +select * from t use index (idx); +a b +1 3 +alter table t add unique index idx1(b) global; +insert into t values (2, 4), (3, 4) on duplicate key update a=2, b=5; +select * from t use index (idx1) order by a desc; +a b +2 5 +1 3 diff --git a/tests/integrationtest/t/executor/ddl.test b/tests/integrationtest/t/executor/ddl.test new file mode 100644 index 0000000000000..97df2bd3e865e --- /dev/null +++ b/tests/integrationtest/t/executor/ddl.test @@ -0,0 +1,536 @@ +# TestTruncateTable +drop table if exists truncate_test; +create table truncate_test (a int); +insert truncate_test values (1),(2),(3); +select * from truncate_test; +truncate table truncate_test; +select * from truncate_test; + +# TestViewRecursion +drop table if exists t; +drop view if exists recursive_view1, recursive_view2; +create table if not exists t(a int); +create definer='root'@'localhost' view recursive_view1 as select * from t; +create definer='root'@'localhost' view recursive_view2 as select * from recursive_view1; +drop table t; +rename table recursive_view2 to t; +--error 1462 +select * from recursive_view1; +drop view recursive_view1, t; +drop table if exists t; +drop view if exists recursive_view1, recursive_view2; + +# TestIssue16250 +create table if not exists t(a int); +create view view_issue16250 as select * from t; +-- error 1146 +truncate table view_issue16250; +drop table if exists t; +drop view if exists view_issue16250; + +# TestIssue24771 +drop table if exists zy_tab; +create table if not exists zy_tab ( + zy_code int, + zy_name varchar(100) + ); +drop table if exists bj_tab; +create table if not exists bj_tab ( + bj_code int, + bj_name varchar(100), + bj_addr varchar(100), + bj_person_count int, + zy_code int + ); +drop table if exists st_tab; +create table if not exists st_tab ( + st_code int, + st_name varchar(100), + bj_code int + ); +drop view if exists v_st_2; +create definer='root'@'localhost' view v_st_2 as + select st.st_name,bj.bj_name,zy.zy_name + from ( + select bj_code, + bj_name, + zy_code + from bj_tab as b + where b.bj_code = 1 + ) as bj + left join zy_tab as zy on zy.zy_code = bj.zy_code + left join st_tab as st on bj.bj_code = st.bj_code; +show create view v_st_2; +select * from v_st_2; +drop view if exists v_st_2; +drop table if exists zy_tab; +drop table if exists bj_tab; +drop table if exists st_tab; + +# TestTruncateSequence +drop sequence if exists seq; +drop sequence if exists seq1; +create sequence if not exists seq; +-- error 1146 +truncate table seq; +create sequence if not exists seq1 start 10 increment 2 maxvalue 10000 cycle; +-- error 1146 +truncate table seq1; +drop sequence if exists seq; +drop sequence if exists seq1; + +# TestCreateDropIndex +drop table if exists drop_test; +create table if not exists drop_test (a int); +create index idx_a on drop_test (a); +drop index idx_a on drop_test; +drop table drop_test; + +# TestAutoRandomClusteredPrimaryKey +drop table if exists t; +create table t (a bigint auto_random(5), b int, primary key (a, b) clustered); +insert into t (b) values (1); +set @@allow_auto_random_explicit_insert = 0; +-- error 8216 +insert into t values (100, 2); +set @@allow_auto_random_explicit_insert = 1; +insert into t values (100, 2); +select b from t order by b; +alter table t modify column a bigint auto_random(6); +drop table t; +create table t (a bigint, b bigint auto_random(4, 32), primary key (b, a) clustered); +insert into t (a) values (1); +select a from t; +drop table if exists t; +set @@allow_auto_random_explicit_insert = default; + +# TestMaxHandleAddIndex +drop table if exists t; +create table t(a bigint PRIMARY KEY, b int); +insert into t values(9223372036854775807, 1); +insert into t values(-9223372036854775808, 1); +alter table t add index idx_b(b); +admin check table t; +create table t1(a bigint UNSIGNED PRIMARY KEY, b int); +insert into t1 values(18446744073709551615, 1); +insert into t1 values(0, 1); +alter table t1 add index idx_b(b); +admin check table t1; +drop table if exists t; + +# TestIssue9205 +drop table if exists t; +create table t(c time DEFAULT '12:12:12.8'); +show create table `t`; +alter table t add column c1 time default '12:12:12.000000'; +show create table `t`; +alter table t alter column c1 set default '2019-02-01 12:12:10.4'; +show create table `t`; +alter table t modify c1 time DEFAULT '770:12:12.000000'; +show create table `t`; +drop table if exists t; + +# TestCheckDefaultFsp +drop table if exists t, t2, t3; +-- error 1067 +create table t ( tt timestamp default now(1)); +-- error 1067 +create table t ( tt timestamp(1) default current_timestamp); +-- error 1067 +create table t ( tt timestamp(1) default now(2)); +create table t ( tt timestamp(1) default now(1)); +create table t2 ( tt timestamp default current_timestamp()); +create table t3 ( tt timestamp default current_timestamp(0)); +-- error 1067 +alter table t add column ttt timestamp default now(2); +-- error 1067 +alter table t add column ttt timestamp(5) default current_timestamp; +-- error 1067 +alter table t add column ttt timestamp(5) default now(2); +-- error 1067 +alter table t modify column tt timestamp(1) default now(); +-- error 1067 +alter table t modify column tt timestamp(4) default now(5); +-- error 1067 +alter table t change column tt tttt timestamp(4) default now(5); +-- error 1067 +alter table t change column tt tttt timestamp(1) default now(); +drop table if exists t, t2, t3; + +# TestTimestampMinDefaultValue +drop table if exists tdv; +create table tdv(a int); +ALTER TABLE tdv ADD COLUMN ts timestamp DEFAULT '1970-01-01 08:00:01'; +drop table if exists tdv; + +# TestCreateTableWithTTL +drop table if exists t; +CREATE TABLE t (created_at datetime) TTL = `created_at` + INTERVAL 5 DAY; +SHOW CREATE TABLE t; +DROP TABLE t; +-- error 8148 +CREATE TABLE t (id int) TTL = `id` + INTERVAL 5 DAY; +-- error 8150 +CREATE TABLE t (id int) TTL_ENABLE = 'ON'; +-- error 8150 +CREATE TABLE t (id int) TTL_JOB_INTERVAL = '1h'; +CREATE TABLE t (created_at datetime) TTL_ENABLE = 'ON' TTL = `created_at` + INTERVAL 1 DAY TTL_ENABLE = 'OFF' TTL_JOB_INTERVAL = '1d'; +SHOW CREATE TABLE t; +DROP TABLE t; +CREATE TABLE t (created_at datetime) TTL_ENABLE = 'ON' TTL = `created_at` + INTERVAL 1 DAY TTL = `created_at` + INTERVAL 2 DAY TTL = `created_at` + INTERVAL 3 DAY TTL_ENABLE = 'OFF'; +SHOW CREATE TABLE t; +DROP TABLE t; + +# TestAlterTTLInfo +drop table if exists t; +CREATE TABLE t (created_at datetime, updated_at datetime, wrong_type int) TTL = `created_at` + INTERVAL 5 DAY; +ALTER TABLE t TTL = `updated_at` + INTERVAL 2 YEAR; +SHOW CREATE TABLE t; +ALTER TABLE t TTL_ENABLE = 'OFF'; +SHOW CREATE TABLE t; +ALTER TABLE t TTL_JOB_INTERVAL = '1d'; +SHOW CREATE TABLE t; +-- error 1054 +ALTER TABLE t TTL = `not_exist` + INTERVAL 2 YEAR; +-- error 8148 +ALTER TABLE t TTL = `wrong_type` + INTERVAL 2 YEAR; +-- error 8149 +ALTER TABLE t DROP COLUMN updated_at; +-- error 8148 +ALTER TABLE t CHANGE updated_at updated_at_new INT; +ALTER TABLE t RENAME COLUMN `updated_at` TO `updated_at_2`; +SHOW CREATE TABLE t; +ALTER TABLE t CHANGE `updated_at_2` `updated_at_3` date; +SHOW CREATE TABLE t; +ALTER TABLE t TTL = `updated_at_3` + INTERVAL 3 YEAR; +SHOW CREATE TABLE t; +-- error 8200 +ALTER TABLE t TTL_ENABLE = 'OFF' REMOVE TTL; +ALTER TABLE t REMOVE TTL; +SHOW CREATE TABLE t; +-- error 8150 +ALTER TABLE t TTL_ENABLE = 'OFF'; +-- error 8150 +ALTER TABLE t TTL_JOB_INTERVAL = '1h'; +drop table if exists t; + +# TestDisableTTLForTempTable +drop table if exists t; +--error 8151 +CREATE TEMPORARY TABLE t (created_at datetime) TTL = `created_at` + INTERVAL 5 DAY; + +# TestDisableTTLForFKParentTable +set global tidb_enable_foreign_key='ON'; +drop table if exists t, t_1; +CREATE TABLE t (id int primary key, created_at datetime); +CREATE TABLE t_1 (t_id int, foreign key fk_t_id(t_id) references t(id)); +--error 8152 +ALTER TABLE t TTL = created_at + INTERVAL 5 YEAR; +drop table t,t_1; +CREATE TABLE t (id int primary key, created_at datetime) TTL = created_at + INTERVAL 5 YEAR; +--error 8152 +CREATE TABLE t_1 (t_id int, foreign key fk_t_id(t_id) references t(id)); +drop table t; +CREATE TABLE t (id int primary key, created_at datetime) TTL = created_at + INTERVAL 5 YEAR; +CREATE TABLE t_1 (t_id int); +--error 8152 +ALTER TABLE t_1 ADD FOREIGN KEY fk_t_id(t_id) references t(id); +drop table t,t_1; +set global tidb_enable_foreign_key=default; + +# TestCreateView +drop table if exists source_table, t1, t2, test_v_nested; +drop view if exists view_t, v, v1, v2, v3, v4, v5, v6, v7, v_nested, v_nested2; +CREATE TABLE source_table (id INT NOT NULL DEFAULT 1, name varchar(255), PRIMARY KEY(id)); +CREATE VIEW view_t AS select id , name from source_table; +-- error 1050 +CREATE VIEW view_t AS select id , name from source_table; +-- error 1146 +create view v1 (c,d) as select a,b from t1; +create table t1 (a int ,b int); +insert into t1 values (1,2), (1,3), (2,4), (2,5), (3,10); +create view v1 (c) as select b+1 from t1; +create view v2 as select b+1 from t1; +create view v3 as select b+1 as c from t1; +create view v4 (c) as select b+1 as d from t1; +create view v5 as select * from t1; +create view v6 (c,d) as select * from t1; +-- error 1353 +create view v7 (c,d,e) as select * from t1; +drop view v1,v2,v3,v4,v5,v6; +-- error 1351 +create view v1 (c,d) as select a,b+@@global.max_user_connections from t1; +-- error 1351 +create view v1 (c,d) as select a,b from t1 where a = @@global.max_user_connections; +-- error 1353 +create view v1 (c,d,e) as select a,b from t1 ; +-- error 1353 +create view v1 (c) as select a,b from t1 ; +drop view if exists v1; +create view v1 (c,d) as select a,b from t1; +create or replace view v1 (c,d) as select a,b from t1 ; +create table if not exists t1 (a int ,b int); +-- error 1347 +create or replace view t1 as select * from t1; +prepare stmt from "create view v10 (x) as select 1"; +execute stmt; +drop table if exists t1, t2; +drop view if exists v; +-- error 1146 +create view v as select * from t1 union select * from t2; +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 values(1,2), (1,1), (1,2); +insert into t2 values(1,1),(1,3); +create definer='root'@'localhost' view v as select * from t1 union select * from t2; +--sorted_result +select * from v; +alter table t1 drop column a; +-- error 1356 +select * from v; +alter table t1 add column a int; +--sorted_result +select * from v; +alter table t1 drop column a; +alter table t2 drop column b; +-- error 1356 +select * from v; +drop view v; +create view v as (select * from t1); +drop view v; +create view v as (select * from t1 union select * from t2); +drop view v; +drop view if exists v_if_exists; +show warnings; +create view v1_if_exists as (select * from t1); +drop view if exists v1_if_exists,v2_if_exists,v3_if_exists; +show warnings; +create table test_v_nested(a int); +create definer='root'@'localhost' view v_nested as select * from test_v_nested; +create definer='root'@'localhost' view v_nested2 as select * from v_nested; +-- error 1146 +create or replace definer='root'@'localhost' view v_nested as select * from v_nested2; +drop table test_v_nested; +drop view v_nested, v_nested2; +## Refer https://github.com/pingcap/tidb/issues/25876 +select sleep(1); +-- error 1356 +create view v_stale as select * from source_table as of timestamp date_sub(current_timestamp(3), interval 1 second); +## Refer https://github.com/pingcap/tidb/issues/32682 +drop view if exists v1,v2; +drop table if exists t1; +CREATE TABLE t1(a INT, b INT); +-- error 1470 +CREATE DEFINER=1234567890abcdefGHIKL1234567890abcdefGHIKL@localhost VIEW v1 AS SELECT a FROM t1; +-- error 1470 +CREATE DEFINER=some_user_name@host_1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890X VIEW v2 AS SELECT b FROM t1; +DROP VIEW IF EXISTS view_t; + +# TestCreateViewWithOverlongColName +drop table if exists t; +drop view if exists v; +create table t(a int); +create view v as select distinct'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', max('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'), 'cccccccccc', 'ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd'; +select * from v; +select name_exp_1, name_exp_2, cccccccccc, name_exp_4 from v; +show create view v; +drop view v; +CREATE ALGORITHM=UNDEFINED DEFINER=``@`` SQL SECURITY DEFINER VIEW `v` (`name_exp_1`, `name_exp_2`, `cccccccccc`, `name_exp_4`) AS SELECT DISTINCT _UTF8MB4'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' AS `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`,MAX(_UTF8MB4'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb') AS `max('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb')`,_UTF8MB4'cccccccccc' AS `cccccccccc`,_UTF8MB4'ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd' AS `ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd`; +drop view v ; +create definer='root'@'localhost' view v as select 'a', 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' from t union select 'ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc', count(distinct 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', 'c'); +select * from v; +select a, name_exp_2 from v; +show create view v; +drop view v; +CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` (`a`, `name_exp_2`) AS SELECT _UTF8MB4'a' AS `a`,_UTF8MB4'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' AS `bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb` FROM `executor__ddl`.`t` UNION SELECT _UTF8MB4'ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' AS `ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc`,COUNT(DISTINCT _UTF8MB4'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', _UTF8MB4'c') AS `count(distinct 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb', 'c')`; +drop view v ; +create definer='root'@'localhost' view v as select 'a' as 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' from t; +select * from v; +select name_exp_1 from v; +show create view v; +drop view v; +CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` (`name_exp_1`) AS SELECT _UTF8MB4'a' AS `bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb` FROM `executor__ddl`.`t`; +drop view v ; +-- error 1059 +create view v(`bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb`) as select a from t; +drop table t; + +# TestCreateDropTable +drop table if exists drop_test; +create table if not exists drop_test (a int); +drop table if exists drop_test; +create table drop_test (a int); +drop table drop_test; +-- error 1105 +drop table mysql.gc_delete_range; + +# TestCreateDropView +drop table if exists t_v, t_v1, t_v2; +drop view if exists v; +create or replace view drop_test as select 1,2; +-- error 1051 +drop table drop_test; +drop view if exists drop_test; +-- error 1105 +drop view mysql.gc_delete_range; +-- error 1051 +drop view drop_test; +create table t_v(a int); +-- error 1347 +drop view t_v; +create table t_v1(a int, b int); +create table t_v2(a int, b int); +create view v as select * from t_v1; +create or replace view v as select * from t_v2; +select * from information_schema.views where table_name ='v' and table_schema='executor__ddl'; + +# TestTooLargeIdentifierLength +drop database if exists aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +create database aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +drop database aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; +-- error 1059 +create database aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; + +drop table if exists bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb; +create table bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb(c int); +drop table bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb; +-- error 1059 +create table bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb(c int); + +drop table if exists t; +create table t(cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc int); +drop table t; +-- error 1059 +create table t(ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc int); + +create table t(c int); +create index dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd on t(c); +drop index dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd on t; +-- error 1059 +create index ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd on t(c); +drop table t; +-- error 1059 +create table t(c int, index ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd(c)); + +# TestIllegalFunctionCall4GeneratedColumns +drop table if exists t1; +-- error 1007 +CREATE database test; +-- error 3102 +create table t1 (b double generated always as (rand()) virtual); +-- error 3102 +create table t1 (a varchar(64), b varchar(1024) generated always as (load_file(a)) virtual); +-- error 3102 +create table t1 (a datetime generated always as (curdate()) virtual); +-- error 3102 +create table t1 (a datetime generated always as (current_time()) virtual); +-- error 3102 +create table t1 (a datetime generated always as (current_timestamp()) virtual); +-- error 3102 +create table t1 (a datetime, b varchar(10) generated always as (localtime()) virtual); +-- error 3102 +create table t1 (a varchar(1024) generated always as (uuid()) virtual); +-- error 3102 +create table t1 (a varchar(1024), b varchar(1024) generated always as (is_free_lock(a)) virtual); +create table t1 (a bigint not null primary key auto_increment, b bigint, c bigint as (b + 1)); +-- error 3102 +alter table t1 add column d varchar(1024) generated always as (database()); +alter table t1 add column d bigint generated always as (b + 1); +-- error 3102 +alter table t1 modify column d bigint generated always as (connection_id()); +-- error 3102 +alter table t1 change column c cc bigint generated always as (connection_id()); + +# TestGeneratedColumnRelatedDDL +drop table if exists t1; +-- error 3109 +create table t1 (a bigint not null primary key auto_increment, b bigint as (a + 1)); +create table t1 (a bigint not null primary key auto_increment, b bigint, c bigint as (b + 1)); +-- error 3109 +alter table t1 add column d bigint generated always as (a + 1); +alter table t1 add column d bigint generated always as (b + 1); +-- error 3109 +alter table t1 modify column d bigint generated always as (a + 1); + +## This mysql compatibility check can be disabled using tidb_enable_auto_increment_in_generated +set session tidb_enable_auto_increment_in_generated = 1; +alter table t1 modify column d bigint generated always as (a + 1); +-- error 1054 +alter table t1 add column e bigint as (z + 1); +drop table t1; +create table t1(a int, b int as (a+1), c int as (b+1)); +insert into t1 (a) values (1); +-- error 3107 +alter table t1 modify column c int as (b+1) first; +-- error 3107 +alter table t1 modify column b int as (a+1) after c; +select * from t1; +set session tidb_enable_auto_increment_in_generated = default; + +# TestAutoIncrementColumnErrorMessage +drop table if exists t1; +CREATE TABLE t1 (t1_id INT NOT NULL AUTO_INCREMENT PRIMARY KEY); +-- error 3754 +CREATE INDEX idx1 ON t1 ((t1_id + t1_id)); + +## This mysql compatibility check can be disabled using tidb_enable_auto_increment_in_generated +SET SESSION tidb_enable_auto_increment_in_generated = 1; +CREATE INDEX idx1 ON t1 ((t1_id + t1_id)); +SET SESSION tidb_enable_auto_increment_in_generated = default; + +# TestCheckPrimaryKeyForTTLTable +set tidb_enable_clustered_index=on; +drop table if exists t1, t2, t3, t4, t11, t12, t13, t21, t22, t23; + +## create table should fail when pk contains double/float +-- error 8153 +create table t1(id float primary key, t timestamp) TTL=`t`+INTERVAL 1 DAY; +-- error 8153 +create table t1(id float(10,2) primary key, t timestamp) TTL=`t`+INTERVAL 1 DAY; +-- error 8153 +create table t1(id double primary key, t timestamp) TTL=`t`+INTERVAL 1 DAY; +-- error 8153 +create table t1(id float(10,2) primary key, t timestamp) TTL=`t`+INTERVAL 1 DAY; +-- error 8153 +create table t1(id1 int, id2 float, t timestamp, primary key(id1, id2)) TTL=`t`+INTERVAL 1 DAY; +-- error 8153 +create table t1(id1 int, id2 double, t timestamp, primary key(id1, id2)) TTL=`t`+INTERVAL 1 DAY; + +## alter table should fail when pk contains double/float +create table t1(id float primary key, t timestamp); +create table t2(id double primary key, t timestamp); +create table t3(id1 int, id2 float, primary key(id1, id2), t timestamp); +create table t4(id1 int, id2 double, primary key(id1, id2), t timestamp); +-- error 8153 +alter table t1 TTL=`t`+INTERVAL 1 DAY; +-- error 8153 +alter table t2 TTL=`t`+INTERVAL 1 DAY; +-- error 8153 +alter table t3 TTL=`t`+INTERVAL 1 DAY; +-- error 8153 +alter table t4 TTL=`t`+INTERVAL 1 DAY; + +## create table should not fail when the pk is not clustered +create table t11(id float primary key nonclustered, t timestamp) TTL=`t`+INTERVAL 1 DAY; +create table t12(id double primary key nonclustered, t timestamp) TTL=`t`+INTERVAL 1 DAY; +create table t13(id1 int, id2 float, t timestamp, primary key(id1, id2) nonclustered) TTL=`t`+INTERVAL 1 DAY; + +## alter table should not fail when the pk is not clustered +create table t21(id float primary key nonclustered, t timestamp); +create table t22(id double primary key nonclustered, t timestamp); +create table t23(id1 int, id2 float, t timestamp, primary key(id1, id2) nonclustered); +alter table t21 TTL=`t`+INTERVAL 1 DAY; +alter table t22 TTL=`t`+INTERVAL 1 DAY; +alter table t23 TTL=`t`+INTERVAL 1 DAY; + +set tidb_enable_clustered_index=default; + +# TestInTxnExecDDLInvalid +drop table if exists t; +create table t (c_int int, c_str varchar(40)); +insert into t values (1, 'quizzical hofstadter'); +begin; +select c_int from t where c_str is not null for update; +alter table t add index idx_4 (c_str); +rollback; + diff --git a/tests/integrationtest/t/globalindex/insert.test b/tests/integrationtest/t/globalindex/insert.test new file mode 100644 index 0000000000000..9e641354a341c --- /dev/null +++ b/tests/integrationtest/t/globalindex/insert.test @@ -0,0 +1,10 @@ +drop table if exists t; +create table t(a int, b int, unique index idx(a) global) partition by hash(b) partitions 5; +insert into t values (1, 1), (1, 2) on duplicate key update a=1, b=3; +select * from t use index (idx); + +alter table t add unique index idx1(b) global; +insert into t values (2, 4), (3, 4) on duplicate key update a=2, b=5; +select * from t use index (idx1) order by a desc; + +