diff --git a/br/pkg/glue/glue.go b/br/pkg/glue/glue.go index 4c203179146f5..24b0852d31290 100644 --- a/br/pkg/glue/glue.go +++ b/br/pkg/glue/glue.go @@ -38,6 +38,7 @@ type Session interface { CreateTable(ctx context.Context, dbName model.CIStr, table *model.TableInfo) error CreatePlacementPolicy(ctx context.Context, policy *model.PolicyInfo) error Close() + GetGlobalVariable(name string) (string, error) } // BatchCreateTableSession is an interface to batch create table parallelly diff --git a/br/pkg/gluetidb/glue.go b/br/pkg/gluetidb/glue.go index 1b514596b2ff0..d17a098613648 100644 --- a/br/pkg/gluetidb/glue.go +++ b/br/pkg/gluetidb/glue.go @@ -203,6 +203,11 @@ func (gs *tidbSession) Close() { gs.se.Close() } +// GetGlobalVariables implements glue.Session. +func (gs *tidbSession) GetGlobalVariable(name string) (string, error) { + return gs.se.GetSessionVars().GlobalVarsAccessor.GetTiDBTableValue(name) +} + // showCreateTable shows the result of SHOW CREATE TABLE from a TableInfo. func (gs *tidbSession) showCreateTable(tbl *model.TableInfo) (string, error) { table := tbl.Clone() diff --git a/br/pkg/restore/split.go b/br/pkg/restore/split.go index 91af891f13fb0..9c28d0f3f9a9c 100644 --- a/br/pkg/restore/split.go +++ b/br/pkg/restore/split.go @@ -438,11 +438,18 @@ func PaginateScanRegion( } var regions []*RegionInfo - err := utils.WithRetry(ctx, func() error { + var err error + // we don't need to return multierr. since there only 3 times retry. + // in most case 3 times retry have the same error. so we just return the last error. + // actually we'd better remove all multierr in br/lightning. + // because it's not easy to check multierr equals normal error. + // see https://github.com/pingcap/tidb/issues/33419. + _ = utils.WithRetry(ctx, func() error { regions = []*RegionInfo{} scanStartKey := startKey for { - batch, err := client.ScanRegions(ctx, scanStartKey, endKey, limit) + var batch []*RegionInfo + batch, err = client.ScanRegions(ctx, scanStartKey, endKey, limit) if err != nil { return errors.Trace(err) } @@ -458,7 +465,7 @@ func PaginateScanRegion( break } } - if err := CheckRegionConsistency(startKey, endKey, regions); err != nil { + if err = CheckRegionConsistency(startKey, endKey, regions); err != nil { log.Warn("failed to scan region, retrying", logutil.ShortError(err)) return err } diff --git a/br/pkg/restore/split_test.go b/br/pkg/restore/split_test.go index 89fc769062874..e07946a5b25af 100644 --- a/br/pkg/restore/split_test.go +++ b/br/pkg/restore/split_test.go @@ -39,6 +39,7 @@ type TestClient struct { supportBatchScatter bool scattered map[uint64]bool + InjectErr bool } func NewTestClient( @@ -215,6 +216,10 @@ func (c *TestClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.Ge } func (c *TestClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*restore.RegionInfo, error) { + if c.InjectErr { + return nil, errors.New("mock scan error") + } + infos := c.regionsInfo.ScanRange(key, endKey, limit) regions := make([]*restore.RegionInfo, 0, len(infos)) for _, info := range infos { @@ -567,12 +572,17 @@ func TestRegionConsistency(t *testing.T) { } type fakeRestorer struct { + mu sync.Mutex + errorInSplit bool splitRanges []rtree.Range restoredFiles []*backuppb.File } func (f *fakeRestorer) SplitRanges(ctx context.Context, ranges []rtree.Range, rewriteRules *restore.RewriteRules, updateCh glue.Progress, isRawKv bool) error { + f.mu.Lock() + defer f.mu.Unlock() + if ctx.Err() != nil { return ctx.Err() } @@ -587,6 +597,9 @@ func (f *fakeRestorer) SplitRanges(ctx context.Context, ranges []rtree.Range, re } func (f *fakeRestorer) RestoreFiles(ctx context.Context, files []*backuppb.File, rewriteRules *restore.RewriteRules, updateCh glue.Progress) error { + f.mu.Lock() + defer f.mu.Unlock() + if ctx.Err() != nil { return ctx.Err() } diff --git a/br/pkg/restore/util_test.go b/br/pkg/restore/util_test.go index 16348d5212f9a..cde804ba89b08 100644 --- a/br/pkg/restore/util_test.go +++ b/br/pkg/restore/util_test.go @@ -10,6 +10,7 @@ import ( backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" + berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/restore" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" @@ -228,6 +229,7 @@ func TestPaginateScanRegion(t *testing.T) { var batch []*restore.RegionInfo _, err := restore.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) require.Error(t, err) + require.True(t, berrors.ErrPDBatchScanRegion.Equal(err)) require.Regexp(t, ".*scan region return empty result.*", err.Error()) regionMap, regions = makeRegions(1) @@ -268,12 +270,20 @@ func TestPaginateScanRegion(t *testing.T) { _, err = restore.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), []byte{2}, []byte{1}, 3) require.Error(t, err) + require.True(t, berrors.ErrRestoreInvalidRange.Equal(err)) require.Regexp(t, ".*startKey >= endKey.*", err.Error()) + tc := NewTestClient(stores, regionMap, 0) + tc.InjectErr = true + _, err = restore.PaginateScanRegion(ctx, tc, regions[1].Region.EndKey, regions[5].Region.EndKey, 3) + require.Error(t, err) + require.Regexp(t, ".*mock scan error.*", err.Error()) + // make the regionMap losing some region, this will cause scan region check fails delete(regionMap, uint64(3)) _, err = restore.PaginateScanRegion(ctx, NewTestClient(stores, regionMap, 0), regions[1].Region.EndKey, regions[5].Region.EndKey, 3) require.Error(t, err) + require.True(t, berrors.ErrPDBatchScanRegion.Equal(err)) require.Regexp(t, ".*region endKey not equal to next region startKey.*", err.Error()) } diff --git a/br/pkg/task/backup.go b/br/pkg/task/backup.go index bcd8eb06ce042..a94d943577c44 100644 --- a/br/pkg/task/backup.go +++ b/br/pkg/task/backup.go @@ -261,6 +261,16 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig statsHandle = mgr.GetDomain().StatsHandle() } + se, err := g.CreateSession(mgr.GetStorage()) + if err != nil { + return errors.Trace(err) + } + newCollationEnable, err := se.GetGlobalVariable(tidbNewCollationEnabled) + if err != nil { + return errors.Trace(err) + } + log.Info("get newCollationEnable for check during restore", zap.String("newCollationEnable", newCollationEnable)) + client, err := backup.NewBackupClient(ctx, mgr) if err != nil { return errors.Trace(err) @@ -351,6 +361,7 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig m.ClusterId = req.ClusterId m.ClusterVersion = clusterVersion m.BrVersion = brVersion + m.NewCollationsEnabled = newCollationEnable }) log.Info("get placement policies", zap.Int("count", len(policies))) diff --git a/br/pkg/task/common.go b/br/pkg/task/common.go index 6d2b63571d5da..632129fab874c 100644 --- a/br/pkg/task/common.go +++ b/br/pkg/task/common.go @@ -85,6 +85,8 @@ const ( crypterAES128KeyLen = 16 crypterAES192KeyLen = 24 crypterAES256KeyLen = 32 + + tidbNewCollationEnabled = "new_collation_enabled" ) // TLSConfig is the common configuration for TLS connection. diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index 186d7978d9ca1..fa53cf7e01df7 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -4,6 +4,7 @@ package task import ( "context" + "strings" "time" "github.com/opentracing/opentracing-go" @@ -21,6 +22,7 @@ import ( "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/version" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/kv" "github.com/spf13/pflag" "go.uber.org/multierr" "go.uber.org/zap" @@ -265,6 +267,42 @@ func CheckRestoreDBAndTable(client *restore.Client, cfg *RestoreConfig) error { return nil } +func CheckNewCollationEnable( + backupNewCollationEnable string, + g glue.Glue, + storage kv.Storage, + CheckRequirements bool, +) error { + if backupNewCollationEnable == "" { + if CheckRequirements { + return errors.Annotatef(berrors.ErrUnknown, + "NewCollactionEnable not found in backupmeta. "+ + "if you ensure the NewCollactionEnable config of backup cluster is as same as restore cluster, "+ + "use --check-requirements=false to skip") + } else { + log.Warn("no NewCollactionEnable in backup") + return nil + } + } + + se, err := g.CreateSession(storage) + if err != nil { + return errors.Trace(err) + } + + newCollationEnable, err := se.GetGlobalVariable(tidbNewCollationEnabled) + if err != nil { + return errors.Trace(err) + } + + if !strings.EqualFold(backupNewCollationEnable, newCollationEnable) { + return errors.Annotatef(berrors.ErrUnknown, + "newCollationEnable not match, upstream:%v, downstream: %v", + backupNewCollationEnable, newCollationEnable) + } + return nil +} + func isFullRestore(cmdName string) bool { return cmdName == FullRestoreCmd } @@ -315,6 +353,10 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf return errors.Trace(versionErr) } } + if err = CheckNewCollationEnable(backupMeta.GetNewCollationsEnabled(), g, mgr.GetStorage(), cfg.CheckRequirements); err != nil { + return errors.Trace(err) + } + reader := metautil.NewMetaReader(backupMeta, s, &cfg.CipherInfo) if err = client.InitBackupMeta(c, backupMeta, u, s, reader); err != nil { return errors.Trace(err) diff --git a/br/pkg/version/version.go b/br/pkg/version/version.go index 342d73d67e332..17d8bc6728296 100644 --- a/br/pkg/version/version.go +++ b/br/pkg/version/version.go @@ -102,6 +102,7 @@ func CheckClusterVersion(ctx context.Context, client pd.Client, checker VerCheck if err := checkTiFlashVersion(s); err != nil { return errors.Trace(err) } + continue } tikvVersionString := removeVAndHash(s.Version) diff --git a/br/tests/br_check_new_collocation_enable/config/new_collation_enable_false.toml b/br/tests/br_check_new_collocation_enable/config/new_collation_enable_false.toml new file mode 100644 index 0000000000000..dd82812d27156 --- /dev/null +++ b/br/tests/br_check_new_collocation_enable/config/new_collation_enable_false.toml @@ -0,0 +1,16 @@ +# config of tidb + +# Schema lease duration +# There are lot of ddl in the tests, setting this +# to 360s to test whether BR is gracefully shutdown. +lease = "360s" + +new_collations_enabled_on_first_bootstrap = false + +[security] +ssl-ca = "/tmp/backup_restore_test/certs/ca.pem" +ssl-cert = "/tmp/backup_restore_test/certs/tidb.pem" +ssl-key = "/tmp/backup_restore_test/certs/tidb.key" +cluster-ssl-ca = "/tmp/backup_restore_test/certs/ca.pem" +cluster-ssl-cert = "/tmp/backup_restore_test/certs/tidb.pem" +cluster-ssl-key = "/tmp/backup_restore_test/certs/tidb.key" diff --git a/br/tests/br_check_new_collocation_enable/config/new_collation_enable_true.toml b/br/tests/br_check_new_collocation_enable/config/new_collation_enable_true.toml new file mode 100644 index 0000000000000..d9cb1df6178f0 --- /dev/null +++ b/br/tests/br_check_new_collocation_enable/config/new_collation_enable_true.toml @@ -0,0 +1,16 @@ +# config of tidb + +# Schema lease duration +# There are lot of ddl in the tests, setting this +# to 360s to test whether BR is gracefully shutdown. +lease = "360s" + +new_collations_enabled_on_first_bootstrap = true + +[security] +ssl-ca = "/tmp/backup_restore_test/certs/ca.pem" +ssl-cert = "/tmp/backup_restore_test/certs/tidb.pem" +ssl-key = "/tmp/backup_restore_test/certs/tidb.key" +cluster-ssl-ca = "/tmp/backup_restore_test/certs/ca.pem" +cluster-ssl-cert = "/tmp/backup_restore_test/certs/tidb.pem" +cluster-ssl-key = "/tmp/backup_restore_test/certs/tidb.key" diff --git a/br/tests/br_check_new_collocation_enable/run.sh b/br/tests/br_check_new_collocation_enable/run.sh new file mode 100755 index 0000000000000..d74262148fb5c --- /dev/null +++ b/br/tests/br_check_new_collocation_enable/run.sh @@ -0,0 +1,102 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" + +cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source $cur/../_utils/run_services + +PROGRESS_FILE="$TEST_DIR/progress_unit_file" +rm -rf $PROGRESS_FILE + +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.usertable1 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" + +run_sql "CREATE TABLE $DB.usertable2 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable2 VALUES (\"c\", \"d\");" + +# backup db +echo "backup start ... with brv4.0.8 without NewCollactionEnable" +bin/brv4.0.8 backup db --db "$DB" -s "local://$TEST_DIR/$DB" \ + --ca "$TEST_DIR/certs/ca.pem" \ + --cert "$TEST_DIR/certs/br.pem" \ + --key "$TEST_DIR/certs/br.key" \ + --pd $PD_ADDR \ + --check-requirements=false + +# restore db from v4.0.8 version without `newCollationEnable` +echo "restore start ... without NewCollactionEnable in backupmeta" +restore_fail=0 +error_str="NewCollactionEnable not found in backupmeta" +test_log="new_collotion_enable_test.log" +unset BR_LOG_TO_TERM +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --log-file $test_log || restore_fail=1 +if [ $restore_fail -ne 1 ]; then + echo "TEST: [$TEST_NAME] test restore failed!" + exit 1 +fi + +if ! grep -i "$error_str" $test_log; then + echo "${error_str} not found in log" + echo "TEST: [$TEST_NAME] test restore failed!" + exit 1 +fi + +rm -rf "$test_log" + +# backup with NewCollationEable = false +echo "Restart cluster with new_collation_enable=false" +start_services --tidb-cfg $cur/config/new_collation_enable_false.toml + +echo "backup start ... witch NewCollactionEnable=false in TiDB" +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$cur/${DB}_2" + +echo "Restart cluster with new_collation_enable=true" +start_services --tidb-cfg $cur/config/new_collation_enable_true.toml + +echo "restore start ... with NewCollactionEnable=True in TiDB" +restore_fail=0 +test_log2="new_collotion_enable_test2.log" +error_str="newCollationEnable not match" +unset BR_LOG_TO_TERM +run_br restore db --db $DB -s "local://$cur/${DB}_2" --pd $PD_ADDR --log-file $test_log2 || restore_fail=1 +if [ $restore_fail -ne 1 ]; then + echo "TEST: [$TEST_NAME] test restore failed!" + exit 1 +fi + +if ! grep -i "$error_str" $test_log2; then + echo "${error_str} not found in log" + echo "TEST: [$TEST_NAME] test restore failed!" + exit 1 +fi + +rm -rf "$test_log2" +rm -rf "$cur/${DB}_2" diff --git a/br/tests/br_s3/run.sh b/br/tests/br_s3/run.sh index d0c20996db7ef..9cd383de4f026 100755 --- a/br/tests/br_s3/run.sh +++ b/br/tests/br_s3/run.sh @@ -101,6 +101,12 @@ for p in $(seq 2); do exit 1 fi + target_log="get newCollationEnable for check during restore" + if ! grep -i "$target_log" $BACKUP_LOG; then + echo "${target_log} not found in log" + exit 1 + fi + for i in $(seq $DB_COUNT); do run_sql "DROP DATABASE $DB${i};" done diff --git a/config/config.go b/config/config.go index 2c62f8a4b80b2..a52def3169317 100644 --- a/config/config.go +++ b/config/config.go @@ -782,21 +782,28 @@ func StoreGlobalConfig(config *Config) { } var deprecatedConfig = map[string]struct{}{ - "pessimistic-txn.ttl": {}, - "pessimistic-txn.enable": {}, - "log.file.log-rotate": {}, - "log.log-slow-query": {}, - "txn-local-latches": {}, - "txn-local-latches.enabled": {}, - "txn-local-latches.capacity": {}, - "performance.max-memory": {}, - "max-txn-time-use": {}, - "experimental.allow-auto-random": {}, - "enable-redact-log": {}, // use variable tidb_redact_log instead - "tikv-client.copr-cache.enable": {}, - "alter-primary-key": {}, // use NONCLUSTERED keyword instead - "enable-streaming": {}, - "performance.mem-profile-interval": {}, + "pessimistic-txn.ttl": {}, + "pessimistic-txn.enable": {}, + "log.file.log-rotate": {}, + "log.log-slow-query": {}, + "txn-local-latches": {}, + "txn-local-latches.enabled": {}, + "txn-local-latches.capacity": {}, + "performance.max-memory": {}, + "max-txn-time-use": {}, + "experimental.allow-auto-random": {}, + "enable-redact-log": {}, // use variable tidb_redact_log instead + "tikv-client.copr-cache.enable": {}, + "alter-primary-key": {}, // use NONCLUSTERED keyword instead + "enable-streaming": {}, + "performance.mem-profile-interval": {}, + "stmt-summary": {}, + "stmt-summary.enable": {}, + "stmt-summary.enable-internal-query": {}, + "stmt-summary.max-stmt-count": {}, + "stmt-summary.max-sql-length": {}, + "stmt-summary.refresh-interval": {}, + "stmt-summary.history-size": {}, } func isAllDeprecatedConfigItems(items []string) bool { diff --git a/config/config_test.go b/config/config_test.go index 913dbe088f0be..bc90a2763bfad 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -327,7 +327,14 @@ grpc-max-send-msg-size = 40960 [log.file] log-rotate = true [performance] -mem-profile-interval="1m"`) +mem-profile-interval="1m" +[stmt-summary] +enable=false +enable-internal-query=true +max-stmt-count=1000 +max-sql-length=1024 +refresh-interval=100 +history-size=100`) require.NoError(t, err) err = conf.Load(configFile) tmp := err.(*ErrConfigValidationFailed) diff --git a/executor/brie.go b/executor/brie.go index 0e2cc6ff95f68..fcbfbd234ac8a 100644 --- a/executor/brie.go +++ b/executor/brie.go @@ -523,6 +523,11 @@ func (gs *tidbGlueSession) CreatePlacementPolicy(ctx context.Context, policy *mo func (gs *tidbGlueSession) Close() { } +// GetGlobalVariables implements glue.Session. +func (gs *tidbGlueSession) GetGlobalVariable(name string) (string, error) { + return gs.se.GetSessionVars().GlobalVarsAccessor.GetTiDBTableValue(name) +} + // Open implements glue.Glue func (gs *tidbGlueSession) Open(string, pd.SecurityOption) (kv.Storage, error) { return gs.se.GetStore(), nil diff --git a/executor/stale_txn_test.go b/executor/stale_txn_test.go index 8ce913abdee1b..dbd692685e901 100644 --- a/executor/stale_txn_test.go +++ b/executor/stale_txn_test.go @@ -15,6 +15,7 @@ package executor_test import ( + "context" "fmt" "testing" "time" @@ -1051,6 +1052,16 @@ func TestStaleReadPrepare(t *testing.T) { tk.MustExec(fmt.Sprintf(`set transaction read only as of timestamp '%s'`, time1.Format("2006-1-2 15:04:05.000"))) _, err = tk.Exec("execute p1") require.Error(t, err) + tk.MustExec("execute p2") + + tk.MustExec("create table t1 (id int, v int)") + tk.MustExec("insert into t1 values (1,10)") + tk.MustExec("set @a=now(6)") + time.Sleep(5 * time.Millisecond) + tk.MustExec("update t1 set v=100 where id=1") + tk.MustQuery("select * from t1").Check(testkit.Rows("1 100")) + tk.MustExec("prepare s1 from 'select * from t1 as of timestamp @a where id=1'") + tk.MustQuery("execute s1").Check(testkit.Rows("1 10")) } func TestStmtCtxStaleFlag(t *testing.T) { @@ -1282,3 +1293,32 @@ func TestStaleReadNoExtraTSORequest(t *testing.T) { tk.MustQuery("select * from t") require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/session/assertTSONotRequest")) } + +func TestPlanCacheWithStaleReadByBinaryProto(t *testing.T) { + store, _, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (id int primary key, v int)") + tk.MustExec("insert into t1 values(1, 10)") + se := tk.Session() + tk.MustExec("set @a=now(6)") + time.Sleep(time.Millisecond * 5) + tk.MustExec("update t1 set v=100 where id=1") + + stmtID1, _, _, err := se.PrepareStmt("select * from t1 as of timestamp @a where id=1") + require.NoError(t, err) + + rs, err := se.ExecutePreparedStmt(context.TODO(), stmtID1, nil) + require.NoError(t, err) + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 10")) + + rs, err = se.ExecutePreparedStmt(context.TODO(), stmtID1, nil) + require.NoError(t, err) + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 10")) + + rs, err = se.ExecutePreparedStmt(context.TODO(), stmtID1, nil) + require.NoError(t, err) + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 10")) +} diff --git a/go.mod b/go.mod index 8de1512d205f7..3f6a992d31c47 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/failpoint v0.0.0-20220303073211-00fea37feb66 github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059 - github.com/pingcap/kvproto v0.0.0-20220314103629-10e688307221 + github.com/pingcap/kvproto v0.0.0-20220328072018-6e75c12dbd73 github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee github.com/pingcap/sysutil v0.0.0-20220114020952-ea68d2dbf5b4 github.com/pingcap/tidb-tools v6.0.0-alpha.0.20220309081549-563c2a342f9c+incompatible diff --git a/go.sum b/go.sum index ddc99c25aa9cf..d4a9b2d17d438 100644 --- a/go.sum +++ b/go.sum @@ -598,8 +598,8 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17Xtb github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20220302110454-c696585a961b/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/kvproto v0.0.0-20220304032058-ccd676426a27/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20220314103629-10e688307221 h1:QiHOVihPED67vDEZE6kP3cGrS55U1+QXbSTahGaEyOI= -github.com/pingcap/kvproto v0.0.0-20220314103629-10e688307221/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20220328072018-6e75c12dbd73 h1:jKixsi6Iw00hL0+o23hmr8BNzlsQP9pShHTOwyuf/Os= +github.com/pingcap/kvproto v0.0.0-20220328072018-6e75c12dbd73/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 15e45c2b5dbb2..4ce4a590e08bd 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -287,14 +287,16 @@ func (p *LogicalJoin) getEnforcedMergeJoin(prop *property.PhysicalProperty, sche return nil } for _, item := range prop.SortItems { - isExist := false + isExist, hasLeftColInProp, hasRightColInProp := false, false, false for joinKeyPos := 0; joinKeyPos < len(leftJoinKeys); joinKeyPos++ { var key *expression.Column if item.Col.Equal(p.ctx, leftJoinKeys[joinKeyPos]) { key = leftJoinKeys[joinKeyPos] + hasLeftColInProp = true } if item.Col.Equal(p.ctx, rightJoinKeys[joinKeyPos]) { key = rightJoinKeys[joinKeyPos] + hasRightColInProp = true } if key == nil { continue @@ -314,6 +316,13 @@ func (p *LogicalJoin) getEnforcedMergeJoin(prop *property.PhysicalProperty, sche if !isExist { return nil } + // If the output wants the order of the inner side. We should reject it since we might add null-extend rows of that side. + if p.JoinType == LeftOuterJoin && hasRightColInProp { + return nil + } + if p.JoinType == RightOuterJoin && hasLeftColInProp { + return nil + } } // Generate the enforced sort merge join leftKeys := getNewJoinKeysByOffsets(leftJoinKeys, offsets) diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index f741e255111b5..cf14249f7c376 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -6409,3 +6409,24 @@ func TestIssue33175(t *testing.T) { tk.MustQuery("select * from tmp2 where id <= -1 or id > 0 order by id desc;").Check(testkit.Rows("2", "1", "-1", "-2")) tk.MustQuery("select * from tmp2 where id <= -1 or id > 0 order by id asc;").Check(testkit.Rows("-2", "-1", "1", "2")) } + +func TestIssue33042(t *testing.T) { + store, _, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + + tk.MustExec("use test") + tk.MustExec("create table t1(id int primary key, col1 int)") + tk.MustExec("create table t2(id int primary key, col1 int)") + tk.MustQuery("explain format='brief' SELECT /*+ merge_join(t1, t2)*/ * FROM (t1 LEFT JOIN t2 ON t1.col1=t2.id) order by t2.id;").Check( + testkit.Rows( + "Sort 12500.00 root test.t2.id", + "└─MergeJoin 12500.00 root left outer join, left key:test.t1.col1, right key:test.t2.id", + " ├─TableReader(Build) 10000.00 root data:TableFullScan", + " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo", + " └─Sort(Probe) 10000.00 root test.t1.col1", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + ), + ) +} diff --git a/planner/core/testdata/integration_suite_out.json b/planner/core/testdata/integration_suite_out.json index 175a62de27805..6d946fbf8d06e 100644 --- a/planner/core/testdata/integration_suite_out.json +++ b/planner/core/testdata/integration_suite_out.json @@ -1332,7 +1332,7 @@ "└─IndexRangeScan 20.00 cop[tikv] table:tt, index:a(a) range:[10,10], [20,20], keep order:false, stats:pseudo" ], "Warnings": [ - "Warning 1105 IndexMerge is inapplicable." + "Warning 1105 IndexMerge is inapplicable" ] }, { @@ -1342,7 +1342,7 @@ "└─IndexRangeScan 6666.67 cop[tikv] table:tt, index:a(a) range:[-inf,10), [15,15], (20,+inf], keep order:false, stats:pseudo" ], "Warnings": [ - "Warning 1105 IndexMerge is inapplicable." + "Warning 1105 IndexMerge is inapplicable" ] } ] diff --git a/planner/core/testdata/plan_suite_out.json b/planner/core/testdata/plan_suite_out.json index 2cfdc44a79666..f5f0d241063ee 100644 --- a/planner/core/testdata/plan_suite_out.json +++ b/planner/core/testdata/plan_suite_out.json @@ -566,7 +566,7 @@ }, { "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1 left outer join t t2 on t1.a = t2.a left outer join t t3 on t2.a = t3.a", - "Best": "MergeLeftOuterJoin{MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->TableReader(Table(t))}(test.t.a,test.t.a)" + "Best": "MergeLeftOuterJoin{MergeLeftOuterJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t.a,test.t.a)->Sort->TableReader(Table(t))}(test.t.a,test.t.a)" }, { "SQL": "select /*+ TIDB_SMJ(t1,t2,t3)*/ * from t t1 left outer join t t2 on t1.a = t2.a left outer join t t3 on t1.a = t3.a", diff --git a/session/bootstrap.go b/session/bootstrap.go index 50fae971872d3..6d1ad56bc1d9b 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -583,13 +583,13 @@ const ( version84 = 84 // version85 updates bindings with status 'using' in mysql.bind_info table to 'enabled' status version85 = 85 - // version86 changes global variable `tidb_enable_top_sql` value from false to true. + // version86 update mysql.tables_priv from SET('Select','Insert','Update') to SET('Select','Insert','Update','References'). version86 = 86 ) // currentBootstrapVersion is defined as a variable, so we can modify its value for testing. // please make sure this is the largest version -var currentBootstrapVersion int64 = version86 +var currentBootstrapVersion int64 = version85 var ( bootstrapVersion = []func(Session, int64){ @@ -1759,8 +1759,7 @@ func upgradeToVer86(s Session, ver int64) { if ver >= version86 { return } - // Enable Top SQL by default after upgrade - mustExecute(s, "set @@global.tidb_enable_top_sql = 1") + doReentrantDDL(s, "ALTER TABLE mysql.tables_priv MODIFY COLUMN Column_priv SET('Select','Insert','Update','References')") } func writeOOMAction(s Session) { @@ -1915,10 +1914,6 @@ func doDMLWorks(s Session) { } value := fmt.Sprintf(`("%s", "%s")`, strings.ToLower(k), vVal) values = append(values, value) - - if v.GlobalConfigName != "" { - domain.GetDomain(s).NotifyGlobalConfigChange(v.GlobalConfigName, variable.OnOffToTrueFalse(vVal)) - } } } sql := fmt.Sprintf("INSERT HIGH_PRIORITY INTO %s.%s VALUES %s;", mysql.SystemDB, mysql.GlobalVariablesTable, diff --git a/session/bootstrap_test.go b/session/bootstrap_test.go index 659d237a63b71..581068a9f56bd 100644 --- a/session/bootstrap_test.go +++ b/session/bootstrap_test.go @@ -24,7 +24,6 @@ import ( "github.com/pingcap/tidb/bindinfo" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/sessionctx" @@ -1095,55 +1094,3 @@ func TestUpgradeToVer85(t *testing.T) { require.NoError(t, r.Close()) mustExec(t, se, "delete from mysql.bind_info where default_db = 'test'") } - -func TestUpgradeVersion86(t *testing.T) { - ctx := context.Background() - store, dom := createStoreAndBootstrap(t) - defer func() { require.NoError(t, store.Close()) }() - - seV85 := createSessionAndSetID(t, store) - txn, err := store.Begin() - require.NoError(t, err) - m := meta.NewMeta(txn) - err = m.FinishBootstrap(int64(85)) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - mustExec(t, seV85, "update mysql.tidb set variable_value='85' where variable_name='tidb_server_version'") - mustExec(t, seV85, "set @@global.tidb_enable_top_sql = 0") - mustExec(t, seV85, "commit") - unsetStoreBootstrapped(store.UUID()) - ver, err := getBootstrapVersion(seV85) - require.NoError(t, err) - require.Equal(t, int64(85), ver) - // Top SQL is disabled in old version TiDB by default. - r := mustExec(t, seV85, `SELECT @@global.tidb_enable_top_sql`) - req := r.NewChunk(nil) - require.NoError(t, r.Next(ctx, req)) - require.Equal(t, 1, req.NumRows()) - row := req.GetRow(0) - require.Equal(t, int64(0), row.GetInt64(0)) - dom.Close() - - // after upgrade. - domV86, err := BootstrapSession(store) - require.NoError(t, err) - defer domV86.Close() - seV86 := createSessionAndSetID(t, store) - ver, err = getBootstrapVersion(seV86) - require.NoError(t, err) - require.Equal(t, currentBootstrapVersion, ver) - r = mustExec(t, seV86, `SELECT @@global.tidb_enable_top_sql`) - req = r.NewChunk(nil) - require.NoError(t, r.Next(ctx, req)) - require.Equal(t, 1, req.NumRows()) - row = req.GetRow(0) - require.Equal(t, int64(1), row.GetInt64(0)) - - storeWithePD, ok := store.(kv.StorageWithPD) - require.True(t, ok) - items, err := storeWithePD.GetPDClient().LoadGlobalConfig(ctx, []string{"enable_resource_metering"}) - require.NoError(t, err) - require.Equal(t, 1, len(items)) - require.Equal(t, "true", items[0].Value) -} diff --git a/session/session.go b/session/session.go index 2e8449182ca93..51efe43531ee4 100644 --- a/session/session.go +++ b/session/session.go @@ -2246,23 +2246,17 @@ func (s *session) cachedPlanExec(ctx context.Context, // IsPointGetWithPKOrUniqueKeyByAutoCommit func (s *session) IsCachedExecOk(ctx context.Context, preparedStmt *plannercore.CachedPrepareStmt) (bool, error) { prepared := preparedStmt.PreparedAst - if prepared.CachedPlan == nil { + if prepared.CachedPlan == nil || preparedStmt.SnapshotTSEvaluator != nil { return false, nil } // check auto commit if !plannercore.IsAutoCommitTxn(s) { return false, nil } - // SnapshotTSEvaluator != nil, it is stale read - // stale read expect a stale infoschema - // so skip infoschema check - if preparedStmt.SnapshotTSEvaluator == nil { - // check schema version - is := s.GetInfoSchema().(infoschema.InfoSchema) - if prepared.SchemaVersion != is.SchemaMetaVersion() { - prepared.CachedPlan = nil - return false, nil - } + is := s.GetInfoSchema().(infoschema.InfoSchema) + if prepared.SchemaVersion != is.SchemaMetaVersion() { + prepared.CachedPlan = nil + return false, nil } // maybe we'd better check cached plan type here, current // only point select/update will be cached, see "getPhysicalPlan" func diff --git a/sessiontxn/txn_context_test.go b/sessiontxn/txn_context_test.go index 290569a8b8537..a0d7816e6b595 100644 --- a/sessiontxn/txn_context_test.go +++ b/sessiontxn/txn_context_test.go @@ -639,13 +639,6 @@ func TestTxnContextForStaleReadInPrepare(t *testing.T) { tk.MustExec("execute s2") }) - // plan cache for stmtID2 - doWithCheckPath(t, se, []string{"assertTxnManagerInCachedPlanExec", "assertTxnManagerInShortPointGetPlan"}, func() { - rs, err := se.ExecutePreparedStmt(context.TODO(), stmtID2, nil) - require.NoError(t, err) - tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 10")) - }) - // tx_read_ts in prepare se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, is1) doWithCheckPath(t, se, path, func() { @@ -656,13 +649,6 @@ func TestTxnContextForStaleReadInPrepare(t *testing.T) { doWithCheckPath(t, se, normalPathRecords, func() { tk.MustExec("execute s3") }) - - // plan cache for stmtID3 - doWithCheckPath(t, se, []string{"assertTxnManagerInCachedPlanExec", "assertTxnManagerInShortPointGetPlan"}, func() { - rs, err := se.ExecutePreparedStmt(context.TODO(), stmtID3, nil) - require.NoError(t, err) - tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 10")) - }) } func TestTxnContextPreparedStmtWithForUpdate(t *testing.T) { diff --git a/util/topsql/state/state.go b/util/topsql/state/state.go index 8c76f68059cbf..d86823e303c0f 100644 --- a/util/topsql/state/state.go +++ b/util/topsql/state/state.go @@ -18,7 +18,7 @@ import "go.uber.org/atomic" // Default Top-SQL state values. const ( - DefTiDBTopSQLEnable = true + DefTiDBTopSQLEnable = false DefTiDBTopSQLPrecisionSeconds = 1 DefTiDBTopSQLMaxTimeSeriesCount = 100 DefTiDBTopSQLMaxMetaCount = 5000