From f30108d88c458f3546bf2ffdb3f33b991437ff4e Mon Sep 17 00:00:00 2001 From: Zijie Lu Date: Wed, 17 Mar 2021 13:42:55 +0800 Subject: [PATCH 01/44] executor, expression: fix the incorrect result of AVG function (#23285) --- executor/aggregate_test.go | 31 +++++++++++++++++++ expression/aggregation/base_func.go | 7 ++++- .../transformation_rules_suite_out.json | 6 ++-- .../testdata/plan_suite_unexported_out.json | 24 +++++++------- 4 files changed, 52 insertions(+), 16 deletions(-) diff --git a/executor/aggregate_test.go b/executor/aggregate_test.go index b9c4bf49bca91..c7c55c2669e28 100644 --- a/executor/aggregate_test.go +++ b/executor/aggregate_test.go @@ -1373,3 +1373,34 @@ func (s *testSerialSuite) TestRandomPanicAggConsume(c *C) { c.Assert(err.Error(), Equals, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") } } + +func (s *testSuiteAgg) TestIssue23277(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("use test;") + tk.MustExec("drop table if exists t;") + + tk.MustExec("create table t(a tinyint(1));") + tk.MustExec("insert into t values (-120), (127);") + tk.MustQuery("select avg(a) from t group by a").Sort().Check(testkit.Rows("-120.0000", "127.0000")) + tk.MustExec("drop table t;") + + tk.MustExec("create table t(a smallint(1));") + tk.MustExec("insert into t values (-120), (127);") + tk.MustQuery("select avg(a) from t group by a").Sort().Check(testkit.Rows("-120.0000", "127.0000")) + tk.MustExec("drop table t;") + + tk.MustExec("create table t(a mediumint(1));") + tk.MustExec("insert into t values (-120), (127);") + tk.MustQuery("select avg(a) from t group by a").Sort().Check(testkit.Rows("-120.0000", "127.0000")) + tk.MustExec("drop table t;") + + tk.MustExec("create table t(a int(1));") + tk.MustExec("insert into t values (-120), (127);") + tk.MustQuery("select avg(a) from t group by a").Sort().Check(testkit.Rows("-120.0000", "127.0000")) + tk.MustExec("drop table t;") + + tk.MustExec("create table t(a bigint(1));") + tk.MustExec("insert into t values (-120), (127);") + tk.MustQuery("select avg(a) from t group by a").Sort().Check(testkit.Rows("-120.0000", "127.0000")) + tk.MustExec("drop table t;") +} diff --git a/expression/aggregation/base_func.go b/expression/aggregation/base_func.go index a0c609b374dac..6abd5e8cfc0aa 100644 --- a/expression/aggregation/base_func.go +++ b/expression/aggregation/base_func.go @@ -208,7 +208,12 @@ func (a *baseFuncDesc) typeInfer4Sum(ctx sessionctx.Context) { // Because child returns integer or decimal type. func (a *baseFuncDesc) typeInfer4Avg(ctx sessionctx.Context) { switch a.Args[0].GetType().Tp { - case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeYear, mysql.TypeNewDecimal: + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + a.RetTp = types.NewFieldType(mysql.TypeNewDecimal) + a.RetTp.Decimal = types.DivFracIncr + flen, _ := mysql.GetDefaultFieldLengthAndDecimal(a.Args[0].GetType().Tp) + a.RetTp.Flen = flen + types.DivFracIncr + case mysql.TypeYear, mysql.TypeNewDecimal: a.RetTp = types.NewFieldType(mysql.TypeNewDecimal) if a.Args[0].GetType().Decimal < 0 { a.RetTp.Decimal = mysql.MaxDecimalScale diff --git a/planner/cascades/testdata/transformation_rules_suite_out.json b/planner/cascades/testdata/transformation_rules_suite_out.json index 6dc0ca30024ac..38c92ac9869f4 100644 --- a/planner/cascades/testdata/transformation_rules_suite_out.json +++ b/planner/cascades/testdata/transformation_rules_suite_out.json @@ -2339,7 +2339,7 @@ "Group#2 Schema:[Column#13,Column#14,Column#15,test.t.b,Column#16,Column#17,Column#18,Column#19,Column#20,Column#14,Column#13]", " Selection_4 input:[Group#3], ge(Column#13, 0), ge(Column#14, 0)", "Group#3 Schema:[Column#13,Column#14,Column#15,test.t.b,Column#16,Column#17,Column#18,Column#19,Column#20,Column#14,Column#13]", - " Projection_8 input:[Group#4], 1->Column#13, cast(test.t.b, decimal(65,0) BINARY)->Column#14, cast(test.t.b, decimal(65,30) BINARY)->Column#15, test.t.b, cast(test.t.b, int(11))->Column#16, cast(test.t.b, int(11))->Column#17, ifnull(cast(test.t.b, bigint(21) UNSIGNED BINARY), 18446744073709551615)->Column#18, ifnull(cast(test.t.b, bigint(21) UNSIGNED BINARY), 0)->Column#19, ifnull(cast(test.t.b, bigint(21) UNSIGNED BINARY), 0)->Column#20, cast(test.t.b, decimal(65,0) BINARY)->Column#14, 1->Column#13", + " Projection_8 input:[Group#4], 1->Column#13, cast(test.t.b, decimal(65,0) BINARY)->Column#14, cast(test.t.b, decimal(15,4) BINARY)->Column#15, test.t.b, cast(test.t.b, int(11))->Column#16, cast(test.t.b, int(11))->Column#17, ifnull(cast(test.t.b, bigint(21) UNSIGNED BINARY), 18446744073709551615)->Column#18, ifnull(cast(test.t.b, bigint(21) UNSIGNED BINARY), 0)->Column#19, ifnull(cast(test.t.b, bigint(21) UNSIGNED BINARY), 0)->Column#20, cast(test.t.b, decimal(65,0) BINARY)->Column#14, 1->Column#13", "Group#4 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", " DataSource_1 table:t" ] @@ -2348,7 +2348,7 @@ "SQL": "select count(b), sum(b), avg(b), f, max(c), min(c), bit_and(c), bit_or(d), bit_xor(g) from t group by a", "Result": [ "Group#0 Schema:[Column#13,Column#14,Column#15,test.t.f,Column#16,Column#17,Column#18,Column#19,Column#20]", - " Projection_5 input:[Group#1], 1->Column#13, cast(test.t.b, decimal(65,0) BINARY)->Column#14, cast(test.t.b, decimal(65,30) BINARY)->Column#15, test.t.f, cast(test.t.c, int(11))->Column#16, cast(test.t.c, int(11))->Column#17, ifnull(cast(test.t.c, bigint(21) UNSIGNED BINARY), 18446744073709551615)->Column#18, ifnull(cast(test.t.d, bigint(21) UNSIGNED BINARY), 0)->Column#19, ifnull(cast(test.t.g, bigint(21) UNSIGNED BINARY), 0)->Column#20", + " Projection_5 input:[Group#1], 1->Column#13, cast(test.t.b, decimal(65,0) BINARY)->Column#14, cast(test.t.b, decimal(15,4) BINARY)->Column#15, test.t.f, cast(test.t.c, int(11))->Column#16, cast(test.t.c, int(11))->Column#17, ifnull(cast(test.t.c, bigint(21) UNSIGNED BINARY), 18446744073709551615)->Column#18, ifnull(cast(test.t.d, bigint(21) UNSIGNED BINARY), 0)->Column#19, ifnull(cast(test.t.g, bigint(21) UNSIGNED BINARY), 0)->Column#20", "Group#1 Schema:[test.t.a,test.t.b,test.t.c,test.t.d,test.t.f,test.t.g], UniqueKey:[test.t.f,test.t.f,test.t.g,test.t.a]", " DataSource_1 table:t" ] @@ -2424,7 +2424,7 @@ "Group#1 Schema:[Column#13,Column#14,Column#15]", " Aggregation_5 input:[Group#2], group by:Column#17, funcs:max(test.t.a), min(test.t.b), avg(Column#16)", "Group#2 Schema:[test.t.a,test.t.b,Column#16,Column#17]", - " Projection_4 input:[Group#3], test.t.a, test.t.b, cast(test.t.c, decimal(65,30) BINARY)->Column#16, plus(test.t.a, test.t.b)->Column#17", + " Projection_4 input:[Group#3], test.t.a, test.t.b, cast(test.t.c, decimal(15,4) BINARY)->Column#16, plus(test.t.a, test.t.b)->Column#17", "Group#3 Schema:[test.t.a,test.t.b,test.t.c]", " DataSource_1 table:t" ] diff --git a/planner/core/testdata/plan_suite_unexported_out.json b/planner/core/testdata/plan_suite_unexported_out.json index eab64f63a7ba8..a14246ddc1982 100644 --- a/planner/core/testdata/plan_suite_unexported_out.json +++ b/planner/core/testdata/plan_suite_unexported_out.json @@ -183,11 +183,11 @@ { "Name": "TestWindowFunction", "Cases": [ - "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(partition by test.t.a))->Projection", - "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(partition by test.t.b))->Projection", + "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.a))->Projection", + "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.b))->Projection", "IndexReader(Index(t.f)[[NULL,+inf]])->Projection->Sort->Window(avg(cast(Column#16, decimal(24,4) BINARY))->Column#17 over(partition by Column#15))->Projection", - "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(order by test.t.a, test.t.b desc range between unbounded preceding and current row))->Projection", - "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(partition by test.t.a))->Projection", + "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(order by test.t.a, test.t.b desc range between unbounded preceding and current row))->Projection", + "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.a))->Projection", "[planner:1054]Unknown column 'z' in 'field list'", "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(65,0) BINARY))->Column#14 over())->Sort->Projection", "IndexReader(Index(t.f)[[NULL,+inf]]->StreamAgg)->StreamAgg->Window(sum(Column#13)->Column#15 over())->Projection", @@ -206,7 +206,7 @@ "IndexReader(Index(t.f)[[NULL,+inf]])->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(rows between 1 preceding and 1 following))->Projection", "[planner:3583]Window '' cannot inherit 'w' since both contain an ORDER BY clause.", "[planner:3591]Window 'w1' is defined twice.", - "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(partition by test.t.a))->Projection", + "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.a))->Projection", "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", "[planner:1235]This version of TiDB doesn't yet support 'GROUPS'", "[planner:3584]Window '': frame start cannot be UNBOUNDED FOLLOWING.", @@ -227,7 +227,7 @@ "[planner:1210]Incorrect arguments to nth_value", "[planner:1210]Incorrect arguments to ntile", "IndexReader(Index(t.f)[[NULL,+inf]])->Window(ntile()->Column#14 over())->Projection", - "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(partition by test.t.b))->Projection", + "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.b))->Projection", "TableReader(Table(t))->Window(nth_value(test.t.i_date, 1)->Column#14 over())->Projection", "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(65,0) BINARY))->Column#15, sum(cast(test.t.c, decimal(65,0) BINARY))->Column#16 over(order by test.t.a range between unbounded preceding and current row))->Projection", "[planner:3593]You cannot use the window function 'sum' in this context.'", @@ -256,11 +256,11 @@ { "Name": "TestWindowParallelFunction", "Cases": [ - "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(partition by test.t.a))->Projection", - "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(partition by test.t.b))->Partition(execution info: concurrency:4, data sources:[TableReader_10])->Projection", + "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.a))->Projection", + "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.b))->Partition(execution info: concurrency:4, data sources:[TableReader_10])->Projection", "IndexReader(Index(t.f)[[NULL,+inf]])->Projection->Sort->Window(avg(cast(Column#16, decimal(24,4) BINARY))->Column#17 over(partition by Column#15))->Partition(execution info: concurrency:4, data sources:[Projection_8])->Projection", - "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(order by test.t.a, test.t.b desc range between unbounded preceding and current row))->Projection", - "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(partition by test.t.a))->Projection", + "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(order by test.t.a, test.t.b desc range between unbounded preceding and current row))->Projection", + "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.a))->Projection", "[planner:1054]Unknown column 'z' in 'field list'", "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(65,0) BINARY))->Column#14 over())->Sort->Projection", "IndexReader(Index(t.f)[[NULL,+inf]]->StreamAgg)->StreamAgg->Window(sum(Column#13)->Column#15 over())->Projection", @@ -279,7 +279,7 @@ "IndexReader(Index(t.f)[[NULL,+inf]])->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(rows between 1 preceding and 1 following))->Projection", "[planner:3583]Window '' cannot inherit 'w' since both contain an ORDER BY clause.", "[planner:3591]Window 'w1' is defined twice.", - "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(partition by test.t.a))->Projection", + "TableReader(Table(t))->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.a))->Projection", "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", "[planner:1235]This version of TiDB doesn't yet support 'GROUPS'", "[planner:3584]Window '': frame start cannot be UNBOUNDED FOLLOWING.", @@ -300,7 +300,7 @@ "[planner:1210]Incorrect arguments to nth_value", "[planner:1210]Incorrect arguments to ntile", "IndexReader(Index(t.f)[[NULL,+inf]])->Window(ntile()->Column#14 over())->Projection", - "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(65,30) BINARY))->Column#14 over(partition by test.t.b))->Partition(execution info: concurrency:4, data sources:[TableReader_10])->Projection", + "TableReader(Table(t))->Sort->Window(avg(cast(test.t.a, decimal(15,4) BINARY))->Column#14 over(partition by test.t.b))->Partition(execution info: concurrency:4, data sources:[TableReader_10])->Projection", "TableReader(Table(t))->Window(nth_value(test.t.i_date, 1)->Column#14 over())->Projection", "TableReader(Table(t))->Window(sum(cast(test.t.b, decimal(65,0) BINARY))->Column#15, sum(cast(test.t.c, decimal(65,0) BINARY))->Column#16 over(order by test.t.a range between unbounded preceding and current row))->Projection", "[planner:3593]You cannot use the window function 'sum' in this context.'", From 6219f55752c67c8bdce8d4a35de85b44f68a59d1 Mon Sep 17 00:00:00 2001 From: disksing Date: Wed, 17 Mar 2021 14:00:56 +0800 Subject: [PATCH 02/44] store/tikv: resolve util dependency (#23082) --- store/tikv/config/config.go | 37 +++++++++++++++++++++++++++++++++++++ store/tikv/region_cache.go | 5 ++--- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/store/tikv/config/config.go b/store/tikv/config/config.go index 18359145db11e..e5fdd8f4c662b 100644 --- a/store/tikv/config/config.go +++ b/store/tikv/config/config.go @@ -15,8 +15,10 @@ package config import ( "fmt" + "net/http" "net/url" "strings" + "sync" "sync/atomic" "github.com/pingcap/errors" @@ -144,3 +146,38 @@ func ParsePath(path string) (etcdAddrs []string, disableGC bool, err error) { etcdAddrs = strings.Split(u.Host, ",") return } + +var ( + internalClientInit sync.Once + internalHTTPClient *http.Client + internalHTTPSchema string +) + +// InternalHTTPClient is used by TiDB-Server to request other components. +func InternalHTTPClient() *http.Client { + internalClientInit.Do(initInternalClient) + return internalHTTPClient +} + +// InternalHTTPSchema specifies use http or https to request other components. +func InternalHTTPSchema() string { + internalClientInit.Do(initInternalClient) + return internalHTTPSchema +} + +func initInternalClient() { + clusterSecurity := GetGlobalConfig().Security + tlsCfg, err := clusterSecurity.ToTLSConfig() + if err != nil { + logutil.BgLogger().Fatal("could not load cluster ssl", zap.Error(err)) + } + if tlsCfg == nil { + internalHTTPSchema = "http" + internalHTTPClient = http.DefaultClient + return + } + internalHTTPSchema = "https" + internalHTTPClient = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsCfg}, + } +} diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go index ba99e18d3d882..2b7f2ba041420 100644 --- a/store/tikv/region_cache.go +++ b/store/tikv/region_cache.go @@ -34,7 +34,6 @@ import ( "github.com/pingcap/tidb/store/tikv/config" "github.com/pingcap/tidb/store/tikv/logutil" "github.com/pingcap/tidb/store/tikv/metrics" - "github.com/pingcap/tidb/util" pd "github.com/tikv/pd/client" atomic2 "go.uber.org/atomic" "go.uber.org/zap" @@ -1684,14 +1683,14 @@ func invokeKVStatusAPI(saddr string, timeout time.Duration) (l livenessState) { }() ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - url := fmt.Sprintf("%s://%s/status", util.InternalHTTPSchema(), saddr) + url := fmt.Sprintf("%s://%s/status", config.InternalHTTPSchema(), saddr) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { logutil.BgLogger().Info("[liveness] build kv status request fail", zap.String("store", saddr), zap.Error(err)) l = unreachable return } - resp, err := util.InternalHTTPClient().Do(req) + resp, err := config.InternalHTTPClient().Do(req) if err != nil { logutil.BgLogger().Info("[liveness] request kv status fail", zap.String("store", saddr), zap.Error(err)) l = unreachable From 4e95e7612e2ce2afefcaf6ce0f9aea046534b2c3 Mon Sep 17 00:00:00 2001 From: xhe Date: Wed, 17 Mar 2021 14:46:55 +0800 Subject: [PATCH 03/44] *: do not report error if objectID is invalid (#22951) --- executor/infoschema_reader.go | 25 ++++++++++++++++++------- infoschema/tables_test.go | 13 +++++++++++++ 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index c09d440b7ba33..8d5f44c785ba2 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -28,6 +28,7 @@ import ( "github.com/cznic/mathutil" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" "github.com/pingcap/parser/charset" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" @@ -1859,11 +1860,21 @@ func (e *memtableRetriever) setDataForPlacementPolicy(ctx sessionctx.Context) er continue } // Currently, only partitions have placement rules. + var tbName, dbName, ptName string + skip := true tb, db, part := is.FindTableByPartitionID(id) - if tb == nil { - return errors.Errorf("Can't find partition by id %d", id) - } - if checker != nil && !checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, db.Name.L, tb.Meta().Name.L, "", mysql.SelectPriv) { + if tb != nil && (checker == nil || checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, db.Name.L, tb.Meta().Name.L, "", mysql.SelectPriv)) { + dbName = db.Name.L + tbName = tb.Meta().Name.L + ptName = part.Name.L + skip = false + } + failpoint.Inject("outputInvalidPlacementRules", func(val failpoint.Value) { + if val.(bool) { + skip = false + } + }) + if skip { continue } for _, rule := range bundle.Rules { @@ -1875,9 +1886,9 @@ func (e *memtableRetriever) setDataForPlacementPolicy(ctx sessionctx.Context) er bundle.ID, bundle.Index, rule.ID, - db.Name.L, - tb.Meta().Name.L, - part.Name.L, + dbName, + tbName, + ptName, nil, string(rule.Role), rule.Count, diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index 699a2caa2caed..a952eff93db66 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -1474,6 +1474,19 @@ func (s *testTableSuite) TestPlacementPolicy(c *C) { is.SetBundle(bundle1) tk.MustQuery("select rule_id, schema_name, table_name, partition_name from information_schema.placement_policy order by partition_name, rule_id").Check(testkit.Rows( "0 test test_placement p0", "1 test test_placement p0", "0 test test_placement p1", "1 test test_placement p1")) + + // do not report error for invalid ObjectID + // check pingcap/tidb/issues/22950 + bundle1.ID = placement.GroupID(1) + tk.MustQuery("select rule_id from information_schema.placement_policy order by rule_id").Check(testkit.Rows( + "0", "1")) + + // test the failpoint for testing + fpName := "github.com/pingcap/tidb/executor/outputInvalidPlacementRules" + c.Assert(failpoint.Enable(fpName, "return(true)"), IsNil) + defer func() { c.Assert(failpoint.Disable(fpName), IsNil) }() + tk.MustQuery("select rule_id from information_schema.placement_policy order by rule_id").Check(testkit.Rows( + "0", "0", "1", "1")) } func (s *testTableSuite) TestInfoschemaClientErrors(c *C) { From 2890a8f62c32f35da89c9e2f4a6b3737c057bfdd Mon Sep 17 00:00:00 2001 From: guo-shaoge Date: Wed, 17 Mar 2021 15:02:55 +0800 Subject: [PATCH 04/44] executor: fix get var expr when session var is hex literal (#23241) --- expression/builtin_control_test.go | 2 +- expression/builtin_string_test.go | 2 +- expression/constant_test.go | 4 +- planner/core/common_plans.go | 22 +++++++++ planner/core/expression_rewriter.go | 3 +- planner/core/integration_test.go | 70 +++++++++++++++++++++++++++++ planner/core/prepare_test.go | 12 +++++ types/field_type.go | 4 +- 8 files changed, 112 insertions(+), 7 deletions(-) diff --git a/expression/builtin_control_test.go b/expression/builtin_control_test.go index 6ea1655e1c874..7f6e35aaa8626 100644 --- a/expression/builtin_control_test.go +++ b/expression/builtin_control_test.go @@ -116,7 +116,7 @@ func (s *testEvaluatorSuite) TestIfNull(c *C) { {tm, nil, tm, false, false}, {nil, duration, duration, false, false}, {nil, types.NewDecFromFloatForTest(123.123), types.NewDecFromFloatForTest(123.123), false, false}, - {nil, types.NewBinaryLiteralFromUint(0x01, -1), uint64(1), false, false}, + {nil, types.NewBinaryLiteralFromUint(0x01, -1), "\x01", false, false}, {nil, types.Set{Value: 1, Name: "abc"}, "abc", false, false}, {nil, jsonInt.GetMysqlJSON(), jsonInt.GetMysqlJSON(), false, false}, {"abc", nil, "abc", false, false}, diff --git a/expression/builtin_string_test.go b/expression/builtin_string_test.go index 57eb867ab6498..026f3c5ffd647 100644 --- a/expression/builtin_string_test.go +++ b/expression/builtin_string_test.go @@ -1149,7 +1149,7 @@ func (s *testEvaluatorSuite) TestHexFunc(c *C) { {-1, false, false, "FFFFFFFFFFFFFFFF"}, {-12.3, false, false, "FFFFFFFFFFFFFFF4"}, {-12.8, false, false, "FFFFFFFFFFFFFFF3"}, - {types.NewBinaryLiteralFromUint(0xC, -1), false, false, "C"}, + {types.NewBinaryLiteralFromUint(0xC, -1), false, false, "0C"}, {0x12, false, false, "12"}, {nil, true, false, ""}, {errors.New("must err"), false, true, ""}, diff --git a/expression/constant_test.go b/expression/constant_test.go index b0d6abbb5f3c6..5bf9ced9fa40f 100644 --- a/expression/constant_test.go +++ b/expression/constant_test.go @@ -254,8 +254,8 @@ func (*testExpressionSuite) TestDeferredParamNotNull(c *C) { c.Assert(mysql.TypeTimestamp, Equals, cstTime.GetType().Tp) c.Assert(mysql.TypeDuration, Equals, cstDuration.GetType().Tp) c.Assert(mysql.TypeBlob, Equals, cstBytes.GetType().Tp) - c.Assert(mysql.TypeBit, Equals, cstBinary.GetType().Tp) - c.Assert(mysql.TypeBit, Equals, cstBit.GetType().Tp) + c.Assert(mysql.TypeVarString, Equals, cstBinary.GetType().Tp) + c.Assert(mysql.TypeVarString, Equals, cstBit.GetType().Tp) c.Assert(mysql.TypeFloat, Equals, cstFloat32.GetType().Tp) c.Assert(mysql.TypeDouble, Equals, cstFloat64.GetType().Tp) c.Assert(mysql.TypeEnum, Equals, cstEnum.GetType().Tp) diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index 8b856d10dffa6..0fa0840a44f95 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -187,6 +187,21 @@ type Execute struct { Plan Plan } +// Check if result of GetVar expr is BinaryLiteral +// Because GetVar use String to represent BinaryLiteral, here we need to convert string back to BinaryLiteral. +func isGetVarBinaryLiteral(sctx sessionctx.Context, expr expression.Expression) (res bool) { + scalarFunc, ok := expr.(*expression.ScalarFunction) + if ok && scalarFunc.FuncName.L == ast.GetVar { + name, isNull, err := scalarFunc.GetArgs()[0].EvalString(sctx, chunk.Row{}) + if err != nil || isNull { + res = false + } else if dt, ok2 := sctx.GetSessionVars().Users[name]; ok2 { + res = (dt.Kind() == types.KindBinaryLiteral) + } + } + return res +} + // OptimizePreparedPlan optimizes the prepared statement. func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema) error { vars := sctx.GetSessionVars() @@ -228,6 +243,13 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont return err } param := prepared.Params[i].(*driver.ParamMarkerExpr) + if isGetVarBinaryLiteral(sctx, usingVar) { + binVal, convErr := val.ToBytes() + if convErr != nil { + return convErr + } + val.SetBinaryLiteral(types.BinaryLiteral(binVal)) + } param.Datum = val param.InExecute = true vars.PreparedParams = append(vars.PreparedParams, val) diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go index f10134bb849c2..5d6a11ad982a9 100644 --- a/planner/core/expression_rewriter.go +++ b/planner/core/expression_rewriter.go @@ -1382,7 +1382,8 @@ func (er *expressionRewriter) inToExpression(lLen int, not bool, tp *types.Field er.ctxStackAppend(expression.NewNull(), types.EmptyName) return } - if leftEt == types.ETInt { + containMut := expression.ContainMutableConst(er.sctx, args) + if !containMut && leftEt == types.ETInt { for i := 1; i < len(args); i++ { if c, ok := args[i].(*expression.Constant); ok { var isExceptional bool diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 1ce68cc2aa797..99dd359fdbb8b 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -2881,3 +2881,73 @@ func (s *testIntegrationSuite) TestIndexMergeTableFilter(c *C) { "10 1 1 10", )) } + +// #22949: test HexLiteral Used in GetVar expr +func (s *testIntegrationSuite) TestGetVarExprWithHexLiteral(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test;") + tk.MustExec("drop table if exists t1_no_idx;") + tk.MustExec("create table t1_no_idx(id int, col_bit bit(16));") + tk.MustExec("insert into t1_no_idx values(1, 0x3135);") + tk.MustExec("insert into t1_no_idx values(2, 0x0f);") + + tk.MustExec("prepare stmt from 'select id from t1_no_idx where col_bit = ?';") + tk.MustExec("set @a = 0x3135;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) + tk.MustExec("set @a = 0x0F;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("2")) + + // same test, but use IN expr + tk.MustExec("prepare stmt from 'select id from t1_no_idx where col_bit in (?)';") + tk.MustExec("set @a = 0x3135;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) + tk.MustExec("set @a = 0x0F;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("2")) + + // same test, but use table with index on col_bit + tk.MustExec("drop table if exists t2_idx;") + tk.MustExec("create table t2_idx(id int, col_bit bit(16), key(col_bit));") + tk.MustExec("insert into t2_idx values(1, 0x3135);") + tk.MustExec("insert into t2_idx values(2, 0x0f);") + + tk.MustExec("prepare stmt from 'select id from t2_idx where col_bit = ?';") + tk.MustExec("set @a = 0x3135;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) + tk.MustExec("set @a = 0x0F;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("2")) + + // same test, but use IN expr + tk.MustExec("prepare stmt from 'select id from t2_idx where col_bit in (?)';") + tk.MustExec("set @a = 0x3135;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) + tk.MustExec("set @a = 0x0F;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("2")) + + // test col varchar with GetVar + tk.MustExec("drop table if exists t_varchar;") + tk.MustExec("create table t_varchar(id int, col_varchar varchar(100), key(col_varchar));") + tk.MustExec("insert into t_varchar values(1, '15');") + tk.MustExec("prepare stmt from 'select id from t_varchar where col_varchar = ?';") + tk.MustExec("set @a = 0x3135;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) +} + +// test BitLiteral used with GetVar +func (s *testIntegrationSuite) TestGetVarExprWithBitLiteral(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test;") + tk.MustExec("drop table if exists t1_no_idx;") + tk.MustExec("create table t1_no_idx(id int, col_bit bit(16));") + tk.MustExec("insert into t1_no_idx values(1, 0x3135);") + tk.MustExec("insert into t1_no_idx values(2, 0x0f);") + + tk.MustExec("prepare stmt from 'select id from t1_no_idx where col_bit = ?';") + // 0b11000100110101 is 0x3135 + tk.MustExec("set @a = 0b11000100110101;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) + + // same test, but use IN expr + tk.MustExec("prepare stmt from 'select id from t1_no_idx where col_bit in (?)';") + tk.MustExec("set @a = 0b11000100110101;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) +} diff --git a/planner/core/prepare_test.go b/planner/core/prepare_test.go index 6cdc6af508c56..db2d4d619396c 100644 --- a/planner/core/prepare_test.go +++ b/planner/core/prepare_test.go @@ -674,6 +674,18 @@ func (s *testPrepareSerialSuite) TestConstPropAndPPDWithCache(c *C) { tk.MustQuery("execute stmt using @p0").Check(testkit.Rows( "0", )) + + // Need to check if contain mutable before RefineCompareConstant() in inToExpression(). + // Otherwise may hit wrong plan. + tk.MustExec("use test;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1(c1 tinyint unsigned);") + tk.MustExec("insert into t1 values(111);") + tk.MustExec("prepare stmt from 'select 1 from t1 where c1 in (?)';") + tk.MustExec("set @a = '1.1';") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows()) + tk.MustExec("set @a = '111';") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) } func (s *testPlanSerialSuite) TestPlanCacheUnionScan(c *C) { diff --git a/types/field_type.go b/types/field_type.go index a98c7634f5156..d5272fa699002 100644 --- a/types/field_type.go +++ b/types/field_type.go @@ -261,8 +261,8 @@ func DefaultTypeForValue(value interface{}, tp *FieldType, char string, collate tp.Flag |= mysql.UnsignedFlag SetBinChsClnFlag(tp) case BinaryLiteral: - tp.Tp = mysql.TypeBit - tp.Flen = len(x) * 8 + tp.Tp = mysql.TypeVarString + tp.Flen = len(x) tp.Decimal = 0 SetBinChsClnFlag(tp) tp.Flag &= ^mysql.BinaryFlag From 5b892a86b835e22611f2d2d2c8012ffcad95e76c Mon Sep 17 00:00:00 2001 From: Shirly Date: Wed, 17 Mar 2021 16:04:55 +0800 Subject: [PATCH 05/44] store/tikv:remove the usage of kv.TransactionOption (#23352) --- store/driver/tikv_driver.go | 16 +++++++++++++++- store/mockstore/unistore.go | 13 +++++++++++-- store/tikv/2pc_test.go | 4 ++-- store/tikv/kv.go | 28 +++++++--------------------- 4 files changed, 35 insertions(+), 26 deletions(-) diff --git a/store/driver/tikv_driver.go b/store/driver/tikv_driver.go index ad0ce7078ee50..af533a19c8f6f 100644 --- a/store/driver/tikv_driver.go +++ b/store/driver/tikv_driver.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/store/gcworker" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/config" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/logutil" pd "github.com/tikv/pd/client" @@ -304,10 +305,23 @@ func (s *tikvStore) Begin() (kv.Transaction, error) { // BeginWithOption begins a transaction with given option func (s *tikvStore) BeginWithOption(option kv.TransactionOption) (kv.Transaction, error) { - txn, err := s.KVStore.BeginWithOption(option) + txnScope := option.TxnScope + if txnScope == "" { + txnScope = oracle.GlobalTxnScope + } + var txn *tikv.KVTxn + var err error + if option.StartTS != nil { + txn, err = s.BeginWithStartTS(txnScope, *option.StartTS) + } else if option.PrevSec != nil { + txn, err = s.BeginWithExactStaleness(txnScope, *option.PrevSec) + } else { + txn, err = s.BeginWithTxnScope(txnScope) + } if err != nil { return nil, errors.Trace(err) } + return txn_driver.NewTiKVTxn(txn), err } diff --git a/store/mockstore/unistore.go b/store/mockstore/unistore.go index 6842fb59f8d9f..f1b68d6600256 100644 --- a/store/mockstore/unistore.go +++ b/store/mockstore/unistore.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/tidb/store/mockstore/unistore" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/config" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/util/execdetails" ) @@ -97,8 +98,16 @@ func (s *mockStorage) Begin() (kv.Transaction, error) { // BeginWithOption begins a transaction with given option func (s *mockStorage) BeginWithOption(option kv.TransactionOption) (kv.Transaction, error) { - txn, err := s.KVStore.BeginWithOption(option) - return newTiKVTxn(txn, err) + txnScope := option.TxnScope + if txnScope == "" { + txnScope = oracle.GlobalTxnScope + } + if option.StartTS != nil { + return newTiKVTxn(s.BeginWithStartTS(txnScope, *option.StartTS)) + } else if option.PrevSec != nil { + return newTiKVTxn(s.BeginWithExactStaleness(txnScope, *option.PrevSec)) + } + return newTiKVTxn(s.BeginWithTxnScope(txnScope)) } // GetSnapshot gets a snapshot that is able to read any data which data is <= ver. diff --git a/store/tikv/2pc_test.go b/store/tikv/2pc_test.go index ad89dbc48e0a7..3541c7bb3cd00 100644 --- a/store/tikv/2pc_test.go +++ b/store/tikv/2pc_test.go @@ -599,12 +599,12 @@ func (s *testCommitterSuite) TestRejectCommitTS(c *C) { // Use max.Uint64 to read the data and success. // That means the final commitTS > startTS+2, it's not the one we provide. // So we cover the rety commitTS logic. - txn1, err := s.store.BeginWithOption(kv.TransactionOption{}.SetTxnScope(oracle.GlobalTxnScope).SetStartTs(committer.startTS + 2)) + txn1, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, committer.startTS+2) c.Assert(err, IsNil) _, err = txn1.Get(bo.ctx, []byte("x")) c.Assert(kv.IsErrNotFound(err), IsTrue) - txn2, err := s.store.BeginWithOption(kv.TransactionOption{}.SetTxnScope(oracle.GlobalTxnScope).SetStartTs(math.MaxUint64)) + txn2, err := s.store.BeginWithStartTS(oracle.GlobalTxnScope, math.MaxUint64) c.Assert(err, IsNil) val, err := txn2.Get(bo.ctx, []byte("x")) c.Assert(err, IsNil) diff --git a/store/tikv/kv.go b/store/tikv/kv.go index 549b3211d4c53..6930f59d6a1ef 100644 --- a/store/tikv/kv.go +++ b/store/tikv/kv.go @@ -167,25 +167,11 @@ func (s *KVStore) runSafePointChecker() { // Begin a global transaction. func (s *KVStore) Begin() (*KVTxn, error) { - return s.beginWithTxnScope(oracle.GlobalTxnScope) + return s.BeginWithTxnScope(oracle.GlobalTxnScope) } -// BeginWithOption begins a transaction with given option -func (s *KVStore) BeginWithOption(option kv.TransactionOption) (*KVTxn, error) { - txnScope := option.TxnScope - if txnScope == "" { - txnScope = oracle.GlobalTxnScope - } - if option.StartTS != nil { - return s.beginWithStartTS(txnScope, *option.StartTS) - } else if option.PrevSec != nil { - return s.beginWithExactStaleness(txnScope, *option.PrevSec) - } - return s.beginWithTxnScope(txnScope) -} - -// beginWithTxnScope begins a transaction with the given txnScope (local or global) -func (s *KVStore) beginWithTxnScope(txnScope string) (*KVTxn, error) { +// BeginWithTxnScope begins a transaction with the given txnScope (local or global) +func (s *KVStore) BeginWithTxnScope(txnScope string) (*KVTxn, error) { txn, err := newTiKVTxn(s, txnScope) if err != nil { return nil, errors.Trace(err) @@ -193,8 +179,8 @@ func (s *KVStore) beginWithTxnScope(txnScope string) (*KVTxn, error) { return txn, nil } -// beginWithStartTS begins a transaction with startTS. -func (s *KVStore) beginWithStartTS(txnScope string, startTS uint64) (*KVTxn, error) { +// BeginWithStartTS begins a transaction with startTS. +func (s *KVStore) BeginWithStartTS(txnScope string, startTS uint64) (*KVTxn, error) { txn, err := newTiKVTxnWithStartTS(s, txnScope, startTS, s.nextReplicaReadSeed()) if err != nil { return nil, errors.Trace(err) @@ -202,8 +188,8 @@ func (s *KVStore) beginWithStartTS(txnScope string, startTS uint64) (*KVTxn, err return txn, nil } -// beginWithExactStaleness begins transaction with given staleness -func (s *KVStore) beginWithExactStaleness(txnScope string, prevSec uint64) (*KVTxn, error) { +// BeginWithExactStaleness begins transaction with given staleness +func (s *KVStore) BeginWithExactStaleness(txnScope string, prevSec uint64) (*KVTxn, error) { txn, err := newTiKVTxnWithExactStaleness(s, txnScope, prevSec) if err != nil { return nil, errors.Trace(err) From 9f7ed0f44ad8827effa36cf4aca5b3445fe39123 Mon Sep 17 00:00:00 2001 From: Zijie Lu Date: Wed, 17 Mar 2021 16:20:56 +0800 Subject: [PATCH 06/44] planner: push aggregation operators down to projection by default (#22090) --- cmd/explaintest/r/tpch.result | 98 +++++++++---------- planner/core/rule_aggregation_push_down.go | 2 +- .../integration_serial_suite_out.json | 68 ++++++------- planner/core/testdata/plan_suite_out.json | 17 ++-- .../testdata/plan_suite_unexported_out.json | 2 +- planner/core/testdata/stats_suite_out.json | 8 +- 6 files changed, 94 insertions(+), 101 deletions(-) diff --git a/cmd/explaintest/r/tpch.result b/cmd/explaintest/r/tpch.result index 11a84495b2b37..addf03e16b5b8 100644 --- a/cmd/explaintest/r/tpch.result +++ b/cmd/explaintest/r/tpch.result @@ -447,8 +447,8 @@ l_year; id estRows task access object operator info Sort 769.96 root tpch.nation.n_name, tpch.nation.n_name, Column#50 └─Projection 769.96 root tpch.nation.n_name, tpch.nation.n_name, Column#50, Column#52 - └─HashAgg 769.96 root group by:Column#50, tpch.nation.n_name, tpch.nation.n_name, funcs:sum(Column#51)->Column#52, funcs:firstrow(tpch.nation.n_name)->tpch.nation.n_name, funcs:firstrow(tpch.nation.n_name)->tpch.nation.n_name, funcs:firstrow(Column#50)->Column#50 - └─Projection 1957240.42 root tpch.nation.n_name, tpch.nation.n_name, extract(YEAR, tpch.lineitem.l_shipdate)->Column#50, mul(tpch.lineitem.l_extendedprice, minus(1, tpch.lineitem.l_discount))->Column#51 + └─HashAgg 769.96 root group by:Column#59, Column#60, Column#61, funcs:sum(Column#55)->Column#52, funcs:firstrow(Column#56)->tpch.nation.n_name, funcs:firstrow(Column#57)->tpch.nation.n_name, funcs:firstrow(Column#58)->Column#50 + └─Projection 1957240.42 root mul(tpch.lineitem.l_extendedprice, minus(1, tpch.lineitem.l_discount))->Column#55, tpch.nation.n_name, tpch.nation.n_name, extract(YEAR, tpch.lineitem.l_shipdate)->Column#58, tpch.nation.n_name, tpch.nation.n_name, extract(YEAR, tpch.lineitem.l_shipdate)->Column#61 └─Projection 1957240.42 root tpch.lineitem.l_extendedprice, tpch.lineitem.l_discount, tpch.lineitem.l_shipdate, tpch.nation.n_name, tpch.nation.n_name └─HashJoin 1957240.42 root inner join, equal:[eq(tpch.customer.c_nationkey, tpch.nation.n_nationkey)], other cond:or(and(eq(tpch.nation.n_name, "JAPAN"), eq(tpch.nation.n_name, "INDIA")), and(eq(tpch.nation.n_name, "INDIA"), eq(tpch.nation.n_name, "JAPAN"))) ├─TableReader(Build) 2.00 root data:Selection @@ -521,36 +521,35 @@ id estRows task access object operator info Sort 719.02 root Column#62 └─Projection 719.02 root Column#62, div(Column#64, Column#65)->Column#66 └─HashAgg 719.02 root group by:Column#78, funcs:sum(Column#75)->Column#64, funcs:sum(Column#76)->Column#65, funcs:firstrow(Column#77)->Column#62 - └─Projection 563136.02 root case(eq(tpch.nation.n_name, INDIA), Column#63, 0)->Column#75, Column#63, Column#62, Column#62 - └─Projection 563136.02 root extract(YEAR, tpch.orders.o_orderdate)->Column#62, mul(tpch.lineitem.l_extendedprice, minus(1, tpch.lineitem.l_discount))->Column#63, tpch.nation.n_name - └─Projection 563136.02 root tpch.lineitem.l_extendedprice, tpch.lineitem.l_discount, tpch.orders.o_orderdate, tpch.nation.n_name - └─HashJoin 563136.02 root inner join, equal:[eq(tpch.supplier.s_nationkey, tpch.nation.n_nationkey)] - ├─TableReader(Build) 25.00 root data:TableFullScan - │ └─TableFullScan 25.00 cop[tikv] table:n2 keep order:false - └─HashJoin(Probe) 563136.02 root inner join, equal:[eq(tpch.lineitem.l_suppkey, tpch.supplier.s_suppkey)] - ├─TableReader(Build) 500000.00 root data:TableFullScan - │ └─TableFullScan 500000.00 cop[tikv] table:supplier keep order:false - └─HashJoin(Probe) 563136.02 root inner join, equal:[eq(tpch.lineitem.l_partkey, tpch.part.p_partkey)] - ├─TableReader(Build) 61674.00 root data:Selection - │ └─Selection 61674.00 cop[tikv] eq(tpch.part.p_type, "SMALL PLATED COPPER") - │ └─TableFullScan 10000000.00 cop[tikv] table:part keep order:false - └─IndexHashJoin(Probe) 90788402.51 root inner join, inner:IndexLookUp, outer key:tpch.orders.o_orderkey, inner key:tpch.lineitem.l_orderkey, equal cond:eq(tpch.orders.o_orderkey, tpch.lineitem.l_orderkey) - ├─HashJoin(Build) 22413367.93 root inner join, equal:[eq(tpch.customer.c_custkey, tpch.orders.o_custkey)] - │ ├─HashJoin(Build) 1500000.00 root inner join, equal:[eq(tpch.nation.n_nationkey, tpch.customer.c_nationkey)] - │ │ ├─HashJoin(Build) 5.00 root inner join, equal:[eq(tpch.region.r_regionkey, tpch.nation.n_regionkey)] - │ │ │ ├─TableReader(Build) 1.00 root data:Selection - │ │ │ │ └─Selection 1.00 cop[tikv] eq(tpch.region.r_name, "ASIA") - │ │ │ │ └─TableFullScan 5.00 cop[tikv] table:region keep order:false - │ │ │ └─TableReader(Probe) 25.00 root data:TableFullScan - │ │ │ └─TableFullScan 25.00 cop[tikv] table:n1 keep order:false - │ │ └─TableReader(Probe) 7500000.00 root data:TableFullScan - │ │ └─TableFullScan 7500000.00 cop[tikv] table:customer keep order:false - │ └─TableReader(Probe) 22413367.93 root data:Selection - │ └─Selection 22413367.93 cop[tikv] ge(tpch.orders.o_orderdate, 1995-01-01 00:00:00.000000), le(tpch.orders.o_orderdate, 1996-12-31 00:00:00.000000) - │ └─TableFullScan 75000000.00 cop[tikv] table:orders keep order:false - └─IndexLookUp(Probe) 4.05 root - ├─IndexRangeScan(Build) 4.05 cop[tikv] table:lineitem, index:PRIMARY(L_ORDERKEY, L_LINENUMBER) range: decided by [eq(tpch.lineitem.l_orderkey, tpch.orders.o_orderkey)], keep order:false - └─TableRowIDScan(Probe) 4.05 cop[tikv] table:lineitem keep order:false + └─Projection 563136.02 root case(eq(tpch.nation.n_name, INDIA), mul(tpch.lineitem.l_extendedprice, minus(1, tpch.lineitem.l_discount)), 0)->Column#75, mul(tpch.lineitem.l_extendedprice, minus(1, tpch.lineitem.l_discount))->Column#76, extract(YEAR, tpch.orders.o_orderdate)->Column#77, extract(YEAR, tpch.orders.o_orderdate)->Column#78 + └─Projection 563136.02 root tpch.lineitem.l_extendedprice, tpch.lineitem.l_discount, tpch.orders.o_orderdate, tpch.nation.n_name + └─HashJoin 563136.02 root inner join, equal:[eq(tpch.supplier.s_nationkey, tpch.nation.n_nationkey)] + ├─TableReader(Build) 25.00 root data:TableFullScan + │ └─TableFullScan 25.00 cop[tikv] table:n2 keep order:false + └─HashJoin(Probe) 563136.02 root inner join, equal:[eq(tpch.lineitem.l_suppkey, tpch.supplier.s_suppkey)] + ├─TableReader(Build) 500000.00 root data:TableFullScan + │ └─TableFullScan 500000.00 cop[tikv] table:supplier keep order:false + └─HashJoin(Probe) 563136.02 root inner join, equal:[eq(tpch.lineitem.l_partkey, tpch.part.p_partkey)] + ├─TableReader(Build) 61674.00 root data:Selection + │ └─Selection 61674.00 cop[tikv] eq(tpch.part.p_type, "SMALL PLATED COPPER") + │ └─TableFullScan 10000000.00 cop[tikv] table:part keep order:false + └─IndexHashJoin(Probe) 90788402.51 root inner join, inner:IndexLookUp, outer key:tpch.orders.o_orderkey, inner key:tpch.lineitem.l_orderkey, equal cond:eq(tpch.orders.o_orderkey, tpch.lineitem.l_orderkey) + ├─HashJoin(Build) 22413367.93 root inner join, equal:[eq(tpch.customer.c_custkey, tpch.orders.o_custkey)] + │ ├─HashJoin(Build) 1500000.00 root inner join, equal:[eq(tpch.nation.n_nationkey, tpch.customer.c_nationkey)] + │ │ ├─HashJoin(Build) 5.00 root inner join, equal:[eq(tpch.region.r_regionkey, tpch.nation.n_regionkey)] + │ │ │ ├─TableReader(Build) 1.00 root data:Selection + │ │ │ │ └─Selection 1.00 cop[tikv] eq(tpch.region.r_name, "ASIA") + │ │ │ │ └─TableFullScan 5.00 cop[tikv] table:region keep order:false + │ │ │ └─TableReader(Probe) 25.00 root data:TableFullScan + │ │ │ └─TableFullScan 25.00 cop[tikv] table:n1 keep order:false + │ │ └─TableReader(Probe) 7500000.00 root data:TableFullScan + │ │ └─TableFullScan 7500000.00 cop[tikv] table:customer keep order:false + │ └─TableReader(Probe) 22413367.93 root data:Selection + │ └─Selection 22413367.93 cop[tikv] ge(tpch.orders.o_orderdate, 1995-01-01 00:00:00.000000), le(tpch.orders.o_orderdate, 1996-12-31 00:00:00.000000) + │ └─TableFullScan 75000000.00 cop[tikv] table:orders keep order:false + └─IndexLookUp(Probe) 4.05 root + ├─IndexRangeScan(Build) 4.05 cop[tikv] table:lineitem, index:PRIMARY(L_ORDERKEY, L_LINENUMBER) range: decided by [eq(tpch.lineitem.l_orderkey, tpch.orders.o_orderkey)], keep order:false + └─TableRowIDScan(Probe) 4.05 cop[tikv] table:lineitem keep order:false /* Q9 Product Type Profit Measure Query This query determines how much profit is made on a given line of parts, broken out by supplier nation and year. @@ -597,8 +596,8 @@ o_year desc; id estRows task access object operator info Sort 2406.00 root tpch.nation.n_name, Column#53:desc └─Projection 2406.00 root tpch.nation.n_name, Column#53, Column#55 - └─HashAgg 2406.00 root group by:Column#53, tpch.nation.n_name, funcs:sum(Column#54)->Column#55, funcs:firstrow(tpch.nation.n_name)->tpch.nation.n_name, funcs:firstrow(Column#53)->Column#53 - └─Projection 241379546.70 root tpch.nation.n_name, extract(YEAR, tpch.orders.o_orderdate)->Column#53, minus(mul(tpch.lineitem.l_extendedprice, minus(1, tpch.lineitem.l_discount)), mul(tpch.partsupp.ps_supplycost, tpch.lineitem.l_quantity))->Column#54 + └─HashAgg 2406.00 root group by:Column#67, Column#68, funcs:sum(Column#64)->Column#55, funcs:firstrow(Column#65)->tpch.nation.n_name, funcs:firstrow(Column#66)->Column#53 + └─Projection 241379546.70 root minus(mul(tpch.lineitem.l_extendedprice, minus(1, tpch.lineitem.l_discount)), mul(tpch.partsupp.ps_supplycost, tpch.lineitem.l_quantity))->Column#64, tpch.nation.n_name, extract(YEAR, tpch.orders.o_orderdate)->Column#66, tpch.nation.n_name, extract(YEAR, tpch.orders.o_orderdate)->Column#68 └─Projection 241379546.70 root tpch.lineitem.l_quantity, tpch.lineitem.l_extendedprice, tpch.lineitem.l_discount, tpch.partsupp.ps_supplycost, tpch.orders.o_orderdate, tpch.nation.n_name └─HashJoin 241379546.70 root inner join, equal:[eq(tpch.lineitem.l_orderkey, tpch.orders.o_orderkey)] ├─TableReader(Build) 75000000.00 root data:TableFullScan @@ -1160,20 +1159,19 @@ Sort 20000.00 root tpch.supplier.s_name │ └─TableReader(Probe) 500000.00 root data:TableFullScan │ └─TableFullScan 500000.00 cop[tikv] table:supplier keep order:false └─HashAgg(Probe) 257492.04 root group by:tpch.partsupp.ps_suppkey, funcs:firstrow(tpch.partsupp.ps_suppkey)->tpch.partsupp.ps_suppkey - └─Projection 257492.04 root tpch.partsupp.ps_suppkey - └─Selection 257492.04 root gt(cast(tpch.partsupp.ps_availqty, decimal(20,0) BINARY), mul(0.5, Column#44)) - └─HashAgg 321865.05 root group by:tpch.partsupp.ps_partkey, tpch.partsupp.ps_suppkey, funcs:firstrow(tpch.partsupp.ps_suppkey)->tpch.partsupp.ps_suppkey, funcs:firstrow(tpch.partsupp.ps_availqty)->tpch.partsupp.ps_availqty, funcs:sum(tpch.lineitem.l_quantity)->Column#44 - └─HashJoin 9711455.06 root left outer join, equal:[eq(tpch.partsupp.ps_partkey, tpch.lineitem.l_partkey) eq(tpch.partsupp.ps_suppkey, tpch.lineitem.l_suppkey)] - ├─IndexHashJoin(Build) 321865.05 root inner join, inner:IndexLookUp, outer key:tpch.part.p_partkey, inner key:tpch.partsupp.ps_partkey, equal cond:eq(tpch.part.p_partkey, tpch.partsupp.ps_partkey) - │ ├─TableReader(Build) 80007.93 root data:Selection - │ │ └─Selection 80007.93 cop[tikv] like(tpch.part.p_name, "green%", 92) - │ │ └─TableFullScan 10000000.00 cop[tikv] table:part keep order:false - │ └─IndexLookUp(Probe) 4.02 root - │ ├─IndexRangeScan(Build) 4.02 cop[tikv] table:partsupp, index:PRIMARY(PS_PARTKEY, PS_SUPPKEY) range: decided by [eq(tpch.partsupp.ps_partkey, tpch.part.p_partkey)], keep order:false - │ └─TableRowIDScan(Probe) 4.02 cop[tikv] table:partsupp keep order:false - └─TableReader(Probe) 44189356.65 root data:Selection - └─Selection 44189356.65 cop[tikv] ge(tpch.lineitem.l_shipdate, 1993-01-01 00:00:00.000000), lt(tpch.lineitem.l_shipdate, 1994-01-01) - └─TableFullScan 300005811.00 cop[tikv] table:lineitem keep order:false + └─Selection 257492.04 root gt(cast(tpch.partsupp.ps_availqty, decimal(20,0) BINARY), mul(0.5, Column#44)) + └─HashAgg 321865.05 root group by:tpch.partsupp.ps_partkey, tpch.partsupp.ps_suppkey, funcs:firstrow(tpch.partsupp.ps_suppkey)->tpch.partsupp.ps_suppkey, funcs:firstrow(tpch.partsupp.ps_availqty)->tpch.partsupp.ps_availqty, funcs:sum(tpch.lineitem.l_quantity)->Column#44 + └─HashJoin 9711455.06 root left outer join, equal:[eq(tpch.partsupp.ps_partkey, tpch.lineitem.l_partkey) eq(tpch.partsupp.ps_suppkey, tpch.lineitem.l_suppkey)] + ├─IndexHashJoin(Build) 321865.05 root inner join, inner:IndexLookUp, outer key:tpch.part.p_partkey, inner key:tpch.partsupp.ps_partkey, equal cond:eq(tpch.part.p_partkey, tpch.partsupp.ps_partkey) + │ ├─TableReader(Build) 80007.93 root data:Selection + │ │ └─Selection 80007.93 cop[tikv] like(tpch.part.p_name, "green%", 92) + │ │ └─TableFullScan 10000000.00 cop[tikv] table:part keep order:false + │ └─IndexLookUp(Probe) 4.02 root + │ ├─IndexRangeScan(Build) 4.02 cop[tikv] table:partsupp, index:PRIMARY(PS_PARTKEY, PS_SUPPKEY) range: decided by [eq(tpch.partsupp.ps_partkey, tpch.part.p_partkey)], keep order:false + │ └─TableRowIDScan(Probe) 4.02 cop[tikv] table:partsupp keep order:false + └─TableReader(Probe) 44189356.65 root data:Selection + └─Selection 44189356.65 cop[tikv] ge(tpch.lineitem.l_shipdate, 1993-01-01 00:00:00.000000), lt(tpch.lineitem.l_shipdate, 1994-01-01) + └─TableFullScan 300005811.00 cop[tikv] table:lineitem keep order:false /* Q21 Suppliers Who Kept Orders Waiting Query This query identifies certain suppliers who were not able to ship required parts in a timely manner. @@ -1298,8 +1296,8 @@ cntrycode; id estRows task access object operator info Sort 1.00 root Column#27 └─Projection 1.00 root Column#27, Column#28, Column#29 - └─HashAgg 1.00 root group by:Column#27, funcs:count(1)->Column#28, funcs:sum(tpch.customer.c_acctbal)->Column#29, funcs:firstrow(Column#27)->Column#27 - └─Projection 0.00 root substring(tpch.customer.c_phone, 1, 2)->Column#27, tpch.customer.c_acctbal + └─HashAgg 1.00 root group by:Column#33, funcs:count(1)->Column#28, funcs:sum(Column#31)->Column#29, funcs:firstrow(Column#32)->Column#27 + └─Projection 0.00 root tpch.customer.c_acctbal, substring(tpch.customer.c_phone, 1, 2)->Column#32, substring(tpch.customer.c_phone, 1, 2)->Column#33 └─HashJoin 0.00 root anti semi join, equal:[eq(tpch.customer.c_custkey, tpch.orders.o_custkey)] ├─TableReader(Build) 75000000.00 root data:TableFullScan │ └─TableFullScan 75000000.00 cop[tikv] table:orders keep order:false diff --git a/planner/core/rule_aggregation_push_down.go b/planner/core/rule_aggregation_push_down.go index 800915e345448..b3dd0e4b95295 100644 --- a/planner/core/rule_aggregation_push_down.go +++ b/planner/core/rule_aggregation_push_down.go @@ -424,7 +424,7 @@ func (a *aggregationPushDownSolver) aggPushDown(p LogicalPlan) (_ LogicalPlan, e p = proj } } - } else if proj, ok1 := child.(*LogicalProjection); ok1 && p.SCtx().GetSessionVars().AllowAggPushDown { + } else if proj, ok1 := child.(*LogicalProjection); ok1 { // TODO: This optimization is not always reasonable. We have not supported pushing projection to kv layer yet, // so we must do this optimization. for i, gbyItem := range agg.GroupByItems { diff --git a/planner/core/testdata/integration_serial_suite_out.json b/planner/core/testdata/integration_serial_suite_out.json index 09df62e2b7d27..736bcf4da5dfd 100644 --- a/planner/core/testdata/integration_serial_suite_out.json +++ b/planner/core/testdata/integration_serial_suite_out.json @@ -1204,9 +1204,8 @@ "Plan": [ "HashAgg 1.00 root funcs:count(Column#7)->Column#5", "└─TableReader 1.00 root data:HashAgg", - " └─HashAgg 1.00 batchCop[tiflash] funcs:count(Column#4)->Column#7", - " └─Projection 10000.00 batchCop[tiflash] plus(test.t.id, 1)->Column#4", - " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + " └─HashAgg 1.00 batchCop[tiflash] funcs:count(plus(test.t.id, 1))->Column#7", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, { @@ -1223,18 +1222,17 @@ "Plan": [ "HashAgg 1.00 root funcs:sum(Column#7)->Column#5", "└─TableReader 1.00 root data:HashAgg", - " └─HashAgg 1.00 batchCop[tiflash] funcs:sum(Column#4)->Column#7", - " └─Projection 10000.00 batchCop[tiflash] plus(test.t.id, 1)->Column#4", - " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + " └─HashAgg 1.00 batchCop[tiflash] funcs:sum(plus(test.t.id, 1))->Column#7", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, { "SQL": "desc format = 'brief' select /*+ stream_agg()*/ count(b) from (select id + 1 as b from t)A", "Plan": [ - "StreamAgg 1.00 root funcs:count(Column#4)->Column#5", - "└─Projection 10000.00 root plus(test.t.id, 1)->Column#4", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tiflash] table:t keep order:false, stats:pseudo" + "StreamAgg 1.00 root funcs:count(Column#7)->Column#5", + "└─TableReader 1.00 root data:StreamAgg", + " └─StreamAgg 1.00 batchCop[tiflash] funcs:count(plus(test.t.id, 1))->Column#7", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, { @@ -1250,10 +1248,9 @@ "SQL": "desc format = 'brief' select /*+ stream_agg()*/ sum(b) from (select id + 1 as b from t)A", "Plan": [ "StreamAgg 1.00 root funcs:sum(Column#7)->Column#5", - "└─Projection 10000.00 root cast(Column#4, decimal(41,0) BINARY)->Column#7", - " └─Projection 10000.00 root plus(test.t.id, 1)->Column#4", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tiflash] table:t keep order:false, stats:pseudo" + "└─TableReader 1.00 root data:StreamAgg", + " └─StreamAgg 1.00 batchCop[tiflash] funcs:sum(plus(test.t.id, 1))->Column#7", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, { @@ -1345,12 +1342,11 @@ { "SQL": "desc format = 'brief' select /*+ hash_agg()*/ count(b) from (select id + 1 as b from t)A", "Plan": [ - "HashAgg 1.00 root funcs:count(Column#7)->Column#5", + "HashAgg 1.00 root funcs:count(Column#8)->Column#5", "└─TableReader 1.00 root data:ExchangeSender", " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1.00 batchCop[tiflash] funcs:count(Column#4)->Column#7", - " └─Projection 10000.00 batchCop[tiflash] plus(test.t.id, 1)->Column#4", - " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + " └─HashAgg 1.00 batchCop[tiflash] funcs:count(plus(test.t.id, 1))->Column#8", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, { @@ -1366,21 +1362,20 @@ { "SQL": "desc format = 'brief' select /*+ hash_agg()*/ sum(b) from (select id + 1 as b from t)A", "Plan": [ - "HashAgg 1.00 root funcs:sum(Column#7)->Column#5", + "HashAgg 1.00 root funcs:sum(Column#8)->Column#5", "└─TableReader 1.00 root data:ExchangeSender", " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1.00 batchCop[tiflash] funcs:sum(Column#4)->Column#7", - " └─Projection 10000.00 batchCop[tiflash] plus(test.t.id, 1)->Column#4", - " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + " └─HashAgg 1.00 batchCop[tiflash] funcs:sum(plus(test.t.id, 1))->Column#8", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, { "SQL": "desc format = 'brief' select /*+ stream_agg()*/ count(b) from (select id + 1 as b from t)A", "Plan": [ - "StreamAgg 1.00 root funcs:count(Column#4)->Column#5", - "└─Projection 10000.00 root plus(test.t.id, 1)->Column#4", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tiflash] table:t keep order:false, stats:pseudo" + "StreamAgg 1.00 root funcs:count(Column#7)->Column#5", + "└─TableReader 1.00 root data:StreamAgg", + " └─StreamAgg 1.00 batchCop[tiflash] funcs:count(plus(test.t.id, 1))->Column#7", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, { @@ -1396,10 +1391,9 @@ "SQL": "desc format = 'brief' select /*+ stream_agg()*/ sum(b) from (select id + 1 as b from t)A", "Plan": [ "StreamAgg 1.00 root funcs:sum(Column#7)->Column#5", - "└─Projection 10000.00 root cast(Column#4, decimal(41,0) BINARY)->Column#7", - " └─Projection 10000.00 root plus(test.t.id, 1)->Column#4", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tiflash] table:t keep order:false, stats:pseudo" + "└─TableReader 1.00 root data:StreamAgg", + " └─StreamAgg 1.00 batchCop[tiflash] funcs:sum(plus(test.t.id, 1))->Column#7", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, { @@ -1512,12 +1506,11 @@ { "SQL": "desc format = 'brief' select /*+ hash_agg()*/ count(b) from (select id + 1 as b from t)A", "Plan": [ - "HashAgg 1.00 root funcs:count(Column#7)->Column#5", + "HashAgg 1.00 root funcs:count(Column#8)->Column#5", "└─TableReader 1.00 root data:ExchangeSender", " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1.00 batchCop[tiflash] funcs:count(Column#4)->Column#7", - " └─Projection 10000.00 batchCop[tiflash] plus(test.t.id, 1)->Column#4", - " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + " └─HashAgg 1.00 batchCop[tiflash] funcs:count(plus(test.t.id, 1))->Column#8", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, { @@ -1533,12 +1526,11 @@ { "SQL": "desc format = 'brief' select /*+ hash_agg()*/ sum(b) from (select id + 1 as b from t)A", "Plan": [ - "HashAgg 1.00 root funcs:sum(Column#7)->Column#5", + "HashAgg 1.00 root funcs:sum(Column#8)->Column#5", "└─TableReader 1.00 root data:ExchangeSender", " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", - " └─HashAgg 1.00 batchCop[tiflash] funcs:sum(Column#4)->Column#7", - " └─Projection 10000.00 batchCop[tiflash] plus(test.t.id, 1)->Column#4", - " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + " └─HashAgg 1.00 batchCop[tiflash] funcs:sum(plus(test.t.id, 1))->Column#8", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" ] }, { diff --git a/planner/core/testdata/plan_suite_out.json b/planner/core/testdata/plan_suite_out.json index 161ed9bb69001..2677b074a7ee7 100644 --- a/planner/core/testdata/plan_suite_out.json +++ b/planner/core/testdata/plan_suite_out.json @@ -1854,13 +1854,16 @@ { "SQL": "select distinct DATE_FORMAT(timestamp, '%Y-%m-%d %H') as tt from tc ;", "Plan": [ - "HashAgg 16000.00 root group by:Column#3, funcs:firstrow(Column#3)->Column#3", - "└─Projection 20000.00 root date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#3", - " └─PartitionUnion 20000.00 root ", - " ├─TableReader 10000.00 root data:TableFullScan", - " │ └─TableFullScan 10000.00 cop[tikv] table:tc, partition:p2020072312 keep order:false, stats:pseudo", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:tc, partition:p2020072313 keep order:false, stats:pseudo" + "HashAgg 16000.00 root group by:Column#5, funcs:firstrow(Column#6)->Column#3", + "└─PartitionUnion 16000.00 root ", + " ├─HashAgg 8000.00 root group by:Column#15, funcs:firstrow(Column#13)->Column#6, funcs:firstrow(Column#14)->Column#5", + " │ └─Projection 10000.00 root date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#13, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#14, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#15", + " │ └─TableReader 10000.00 root data:TableFullScan", + " │ └─TableFullScan 10000.00 cop[tikv] table:tc, partition:p2020072312 keep order:false, stats:pseudo", + " └─HashAgg 8000.00 root group by:Column#18, funcs:firstrow(Column#16)->Column#6, funcs:firstrow(Column#17)->Column#5", + " └─Projection 10000.00 root date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#16, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#17, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#18", + " └─TableReader 10000.00 root data:TableFullScan", + " └─TableFullScan 10000.00 cop[tikv] table:tc, partition:p2020072313 keep order:false, stats:pseudo" ], "Result": null } diff --git a/planner/core/testdata/plan_suite_unexported_out.json b/planner/core/testdata/plan_suite_unexported_out.json index a14246ddc1982..fb45c07f644e6 100644 --- a/planner/core/testdata/plan_suite_unexported_out.json +++ b/planner/core/testdata/plan_suite_unexported_out.json @@ -51,7 +51,7 @@ "Join{DataScan(t1)->DataScan(t2)}->Projection", "Join{DataScan(t1)->DataScan(t2)}->Projection", "LeftHashJoin{LeftHashJoin{TableReader(Table(t))->IndexLookUp(Index(t.c_d_e)[[666,666]], Table(t))}(test.t.a,test.t.b)->IndexReader(Index(t.c_d_e)[[42,42]])}(test.t.b,test.t.a)->Sel([or(Column#25, Column#38)])->Projection->Delete", - "LeftHashJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])->HashAgg}(test.t.b,test.t.c)->Update" + "LeftHashJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]]->HashAgg)->HashAgg}(test.t.b,test.t.c)->Update" ] }, { diff --git a/planner/core/testdata/stats_suite_out.json b/planner/core/testdata/stats_suite_out.json index e284758839d89..ff1abdaac8e95 100644 --- a/planner/core/testdata/stats_suite_out.json +++ b/planner/core/testdata/stats_suite_out.json @@ -170,8 +170,8 @@ { "SQL": "select count(c3) from (select a as c1, b as c2, a+1 as c3 from t1) as tmp group by c2, c1", "Plan": [ - "StreamAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#4)->Column#5", - "└─Projection 4.00 root test.t1.a, test.t1.b, plus(test.t1.a, 1)->Column#4", + "StreamAgg 4.00 root group by:Column#10, Column#11, funcs:count(Column#9)->Column#5", + "└─Projection 4.00 root plus(test.t1.a, 1)->Column#9, test.t1.b, test.t1.a", " └─IndexReader 4.00 root index:IndexFullScan", " └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true" ] @@ -179,8 +179,8 @@ { "SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b > (select t2.b from t2 where t2.a = t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b", "Plan": [ - "StreamAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#7)->Column#8", - "└─Projection 4.00 root test.t1.a, test.t1.b, gt(test.t1.b, test.t2.b)->Column#7", + "StreamAgg 4.00 root group by:Column#11, Column#12, funcs:count(Column#10)->Column#8", + "└─Projection 4.00 root gt(test.t1.b, test.t2.b)->Column#10, test.t1.a, test.t1.b", " └─Apply 4.00 root CARTESIAN left outer join", " ├─IndexReader(Build) 4.00 root index:IndexFullScan", " │ └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true", From bb128642216b3bfc2d2f34cd750ef0d69bc961aa Mon Sep 17 00:00:00 2001 From: Tjianke <34013484+Tjianke@users.noreply.github.com> Date: Wed, 17 Mar 2021 16:36:55 +0800 Subject: [PATCH 07/44] sessionctx: fix err check (#23000) --- sessionctx/binloginfo/binloginfo_test.go | 34 +++++++++++++----- sessionctx/variable/varsutil_test.go | 45 ++++++++++++++++-------- 2 files changed, 55 insertions(+), 24 deletions(-) diff --git a/sessionctx/binloginfo/binloginfo_test.go b/sessionctx/binloginfo/binloginfo_test.go index a64d2684aec26..a6bb7a758a5e7 100644 --- a/sessionctx/binloginfo/binloginfo_test.go +++ b/sessionctx/binloginfo/binloginfo_test.go @@ -15,6 +15,7 @@ package binloginfo_test import ( "context" + "fmt" "net" "os" "strconv" @@ -50,7 +51,10 @@ import ( func TestT(t *testing.T) { CustomVerboseFlag = true logLevel := os.Getenv("log_level") - logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false)) + err := logutil.InitLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false)) + if err != nil { + t.Fatal(err) + } TestingT(t) } @@ -103,7 +107,10 @@ func (s *testBinlogSuite) SetUpSuite(c *C) { s.serv = grpc.NewServer(grpc.MaxRecvMsgSize(maxRecvMsgSize)) s.pump = new(mockBinlogPump) binlog.RegisterPumpServer(s.serv, s.pump) - go s.serv.Serve(l) + go func() { + err := s.serv.Serve(l) + c.Assert(err, IsNil) + }() opt := grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("unix", addr, timeout) }) @@ -122,11 +129,16 @@ func (s *testBinlogSuite) SetUpSuite(c *C) { } func (s *testBinlogSuite) TearDownSuite(c *C) { - s.ddl.Stop() + err := s.ddl.Stop() + c.Assert(err, IsNil) s.serv.Stop() - os.Remove(s.unixFile) + err = os.Remove(s.unixFile) + if err != nil { + c.Assert(err, ErrorMatches, fmt.Sprintf("remove %v: no such file or directory", s.unixFile)) + } s.domain.Close() - s.store.Close() + err = s.store.Close() + c.Assert(err, IsNil) } func (s *testBinlogSuite) TestBinlog(c *C) { @@ -307,7 +319,8 @@ func getLatestBinlogPrewriteValue(c *C, pump *mockBinlogPump) *binlog.PrewriteVa for i := len(pump.mu.payloads) - 1; i >= 0; i-- { payload := pump.mu.payloads[i] bin = new(binlog.Binlog) - bin.Unmarshal(payload) + err := bin.Unmarshal(payload) + c.Assert(err, IsNil) if bin.Tp == binlog.BinlogType_Prewrite { break } @@ -315,7 +328,8 @@ func getLatestBinlogPrewriteValue(c *C, pump *mockBinlogPump) *binlog.PrewriteVa pump.mu.Unlock() c.Assert(bin, NotNil) preVal := new(binlog.PrewriteValue) - preVal.Unmarshal(bin.PrewriteValue) + err := preVal.Unmarshal(bin.PrewriteValue) + c.Assert(err, IsNil) return preVal } @@ -324,7 +338,8 @@ func getLatestDDLBinlog(c *C, pump *mockBinlogPump, ddlQuery string) (preDDL, co for i := len(pump.mu.payloads) - 1; i >= 0; i-- { payload := pump.mu.payloads[i] bin := new(binlog.Binlog) - bin.Unmarshal(payload) + err := bin.Unmarshal(payload) + c.Assert(err, IsNil) if bin.Tp == binlog.BinlogType_Commit && bin.DdlJobId > 0 { commitDDL = bin } @@ -353,7 +368,8 @@ func checkBinlogCount(c *C, pump *mockBinlogPump) { for i := length - 1; i >= 0; i-- { payload := pump.mu.payloads[i] bin = new(binlog.Binlog) - bin.Unmarshal(payload) + err := bin.Unmarshal(payload) + c.Assert(err, IsNil) if bin.Tp == binlog.BinlogType_Prewrite { if bin.DdlJobId != 0 { ddlCount++ diff --git a/sessionctx/variable/varsutil_test.go b/sessionctx/variable/varsutil_test.go index b5ad1c5b20486..3668e16db1c72 100644 --- a/sessionctx/variable/varsutil_test.go +++ b/sessionctx/variable/varsutil_test.go @@ -149,7 +149,8 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) { c.Assert(err, IsNil) c.Assert(val, Equals, "STRICT_TRANS_TABLES") c.Assert(v.StrictSQLMode, IsTrue) - SetSessionSystemVar(v, "sql_mode", types.NewStringDatum("")) + err = SetSessionSystemVar(v, "sql_mode", types.NewStringDatum("")) + c.Assert(err, IsNil) c.Assert(v.StrictSQLMode, IsFalse) err = SetSessionSystemVar(v, "character_set_connection", types.NewStringDatum("utf8")) @@ -191,7 +192,8 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) { c.Assert(err, IsNil) c.Assert(v.TimeZone.String(), Equals, tt.expect) if tt.compareValue { - SetSessionSystemVar(v, TimeZone, types.NewStringDatum(tt.input)) + err = SetSessionSystemVar(v, TimeZone, types.NewStringDatum(tt.input)) + c.Assert(err, IsNil) t1 := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) t2 := time.Date(2000, 1, 1, 0, 0, 0, 0, v.TimeZone) c.Assert(t2.Sub(t1), Equals, tt.diff) @@ -203,7 +205,8 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) { // Test case for sql mode. for str, mode := range mysql.Str2SQLMode { - SetSessionSystemVar(v, "sql_mode", types.NewStringDatum(str)) + err = SetSessionSystemVar(v, "sql_mode", types.NewStringDatum(str)) + c.Assert(err, IsNil) if modeParts, exists := mysql.CombinationSQLMode[str]; exists { for _, part := range modeParts { mode |= mysql.Str2SQLMode[part] @@ -224,17 +227,20 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) { c.Assert(terror.ErrorEqual(err, ErrWrongValueForVar), IsTrue) // Combined sql_mode - SetSessionSystemVar(v, "sql_mode", types.NewStringDatum("REAL_AS_FLOAT,ANSI_QUOTES")) + err = SetSessionSystemVar(v, "sql_mode", types.NewStringDatum("REAL_AS_FLOAT,ANSI_QUOTES")) + c.Assert(err, IsNil) c.Assert(v.SQLMode, Equals, mysql.ModeRealAsFloat|mysql.ModeANSIQuotes) // Test case for tidb_index_serial_scan_concurrency. c.Assert(v.IndexSerialScanConcurrency(), Equals, DefIndexSerialScanConcurrency) - SetSessionSystemVar(v, TiDBIndexSerialScanConcurrency, types.NewStringDatum("4")) + err = SetSessionSystemVar(v, TiDBIndexSerialScanConcurrency, types.NewStringDatum("4")) + c.Assert(err, IsNil) c.Assert(v.IndexSerialScanConcurrency(), Equals, 4) // Test case for tidb_batch_insert. c.Assert(v.BatchInsert, IsFalse) - SetSessionSystemVar(v, TiDBBatchInsert, types.NewStringDatum("1")) + err = SetSessionSystemVar(v, TiDBBatchInsert, types.NewStringDatum("1")) + c.Assert(err, IsNil) c.Assert(v.BatchInsert, IsTrue) c.Assert(v.InitChunkSize, Equals, 32) @@ -253,25 +259,29 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) { c.Assert(err, IsNil) c.Assert(val, Equals, string(bVal)) - SetSessionSystemVar(v, TiDBEnableStreaming, types.NewStringDatum("1")) + err = SetSessionSystemVar(v, TiDBEnableStreaming, types.NewStringDatum("1")) + c.Assert(err, IsNil) val, err = GetSessionSystemVar(v, TiDBEnableStreaming) c.Assert(err, IsNil) c.Assert(val, Equals, "ON") c.Assert(v.EnableStreaming, Equals, true) - SetSessionSystemVar(v, TiDBEnableStreaming, types.NewStringDatum("0")) + err = SetSessionSystemVar(v, TiDBEnableStreaming, types.NewStringDatum("0")) + c.Assert(err, IsNil) val, err = GetSessionSystemVar(v, TiDBEnableStreaming) c.Assert(err, IsNil) c.Assert(val, Equals, "OFF") c.Assert(v.EnableStreaming, Equals, false) c.Assert(v.OptimizerSelectivityLevel, Equals, DefTiDBOptimizerSelectivityLevel) - SetSessionSystemVar(v, TiDBOptimizerSelectivityLevel, types.NewIntDatum(1)) + err = SetSessionSystemVar(v, TiDBOptimizerSelectivityLevel, types.NewIntDatum(1)) + c.Assert(err, IsNil) c.Assert(v.OptimizerSelectivityLevel, Equals, 1) err = SetSessionSystemVar(v, TiDBDDLReorgWorkerCount, types.NewIntDatum(-1)) c.Assert(terror.ErrorEqual(err, ErrWrongValueForVar), IsTrue) - SetSessionSystemVar(v, TiDBDDLReorgWorkerCount, types.NewIntDatum(int64(maxDDLReorgWorkerCount)+1)) + err = SetSessionSystemVar(v, TiDBDDLReorgWorkerCount, types.NewIntDatum(int64(maxDDLReorgWorkerCount)+1)) + c.Assert(err, NotNil) c.Assert(terror.ErrorEqual(err, ErrWrongValueForVar), IsTrue) err = SetSessionSystemVar(v, TiDBRetryLimit, types.NewStringDatum("3")) @@ -318,12 +328,14 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) { c.Assert(val, Equals, "OFF") c.Assert(config.GetGlobalConfig().CheckMb4ValueInUTF8, Equals, false) - SetSessionSystemVar(v, TiDBLowResolutionTSO, types.NewStringDatum("1")) + err = SetSessionSystemVar(v, TiDBLowResolutionTSO, types.NewStringDatum("1")) + c.Assert(err, IsNil) val, err = GetSessionSystemVar(v, TiDBLowResolutionTSO) c.Assert(err, IsNil) c.Assert(val, Equals, "ON") c.Assert(v.LowResolutionTSO, Equals, true) - SetSessionSystemVar(v, TiDBLowResolutionTSO, types.NewStringDatum("0")) + err = SetSessionSystemVar(v, TiDBLowResolutionTSO, types.NewStringDatum("0")) + c.Assert(err, IsNil) val, err = GetSessionSystemVar(v, TiDBLowResolutionTSO) c.Assert(err, IsNil) c.Assert(val, Equals, "OFF") @@ -417,17 +429,20 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) { c.Assert(val, Equals, "5.0") c.Assert(v.ConcurrencyFactor, Equals, 5.0) - SetSessionSystemVar(v, TiDBReplicaRead, types.NewStringDatum("follower")) + err = SetSessionSystemVar(v, TiDBReplicaRead, types.NewStringDatum("follower")) + c.Assert(err, IsNil) val, err = GetSessionSystemVar(v, TiDBReplicaRead) c.Assert(err, IsNil) c.Assert(val, Equals, "follower") c.Assert(v.GetReplicaRead(), Equals, kv.ReplicaReadFollower) - SetSessionSystemVar(v, TiDBReplicaRead, types.NewStringDatum("leader")) + err = SetSessionSystemVar(v, TiDBReplicaRead, types.NewStringDatum("leader")) + c.Assert(err, IsNil) val, err = GetSessionSystemVar(v, TiDBReplicaRead) c.Assert(err, IsNil) c.Assert(val, Equals, "leader") c.Assert(v.GetReplicaRead(), Equals, kv.ReplicaReadLeader) - SetSessionSystemVar(v, TiDBReplicaRead, types.NewStringDatum("leader-and-follower")) + err = SetSessionSystemVar(v, TiDBReplicaRead, types.NewStringDatum("leader-and-follower")) + c.Assert(err, IsNil) val, err = GetSessionSystemVar(v, TiDBReplicaRead) c.Assert(err, IsNil) c.Assert(val, Equals, "leader-and-follower") From 1ac53c546d9e242b0986695df9d082485cfc5c48 Mon Sep 17 00:00:00 2001 From: Kenan Yao Date: Wed, 17 Mar 2021 16:52:55 +0800 Subject: [PATCH 08/44] planner: fix wrong PointGet / TableDual plan reused in plan cache (#23238) --- executor/explainfor_test.go | 3 - executor/prepared_test.go | 2 - executor/seqtest/prepared_test.go | 4 +- planner/core/common_plans.go | 3 + planner/core/find_best_task.go | 2 +- planner/core/prepare_test.go | 93 +++++++++++++++++++++++++++++++ planner/core/stats.go | 16 +++--- 7 files changed, 107 insertions(+), 16 deletions(-) diff --git a/executor/explainfor_test.go b/executor/explainfor_test.go index 6589144a60669..955b1605cf727 100644 --- a/executor/explainfor_test.go +++ b/executor/explainfor_test.go @@ -479,7 +479,4 @@ func (s *testPrepareSerialSuite) TestPointGetUserVarPlanCache(c *C) { tk.MustQuery("execute stmt using @a").Check(testkit.Rows( "2 4 2 2", )) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows( - "1", - )) } diff --git a/executor/prepared_test.go b/executor/prepared_test.go index 52e6157835424..4900a1c77df3a 100644 --- a/executor/prepared_test.go +++ b/executor/prepared_test.go @@ -254,7 +254,6 @@ func (s *testSerialSuite) TestPlanCacheClusterIndex(c *C) { tk.MustExec(`set @v1 = 'a', @v2 = 'b'`) tk.MustQuery(`execute stmt1 using @v1`).Check(testkit.Rows("a 1 a 1")) tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 b 2")) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) // case 2: tk.MustExec(`drop table if exists ta, tb`) @@ -266,7 +265,6 @@ func (s *testSerialSuite) TestPlanCacheClusterIndex(c *C) { tk.MustExec(`set @v1 = 'a', @v2 = 'b'`) tk.MustQuery(`execute stmt1 using @v1`).Check(testkit.Rows("a 1 1 1")) tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 2 2")) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 2 2")) tkProcess = tk.Se.ShowProcess() ps = []*util.ProcessInfo{tkProcess} diff --git a/executor/seqtest/prepared_test.go b/executor/seqtest/prepared_test.go index 32bf549751d1f..5583586dc8385 100644 --- a/executor/seqtest/prepared_test.go +++ b/executor/seqtest/prepared_test.go @@ -490,14 +490,14 @@ func (s *seqTestSuite) TestPreparedInsert(c *C) { err = counter.Write(pb) c.Assert(err, IsNil) hit := pb.GetCounter().GetValue() - c.Check(hit, Equals, float64(3)) + c.Check(hit, Equals, float64(2)) } tk.MustExec(`set @a=3; execute stmt_insert_select using @a;`) if flag { err = counter.Write(pb) c.Assert(err, IsNil) hit := pb.GetCounter().GetValue() - c.Check(hit, Equals, float64(4)) + c.Check(hit, Equals, float64(2)) } result = tk.MustQuery("select id, c1 from prepare_test where id = ?", 101) diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index 0fa0840a44f95..ffb42db8d87b2 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -425,6 +425,9 @@ REBUILD: // short paths for these executions, currently "point select" and "point update" func (e *Execute) tryCachePointPlan(ctx context.Context, sctx sessionctx.Context, preparedStmt *CachedPrepareStmt, is infoschema.InfoSchema, p Plan) error { + if sctx.GetSessionVars().StmtCtx.OptimDependOnMutableConst { + return nil + } var ( prepared = preparedStmt.PreparedAst ok bool diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index 4c4d18a15ea2b..f6c07ce9da0b6 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -528,7 +528,7 @@ func (ds *DataSource) skylinePruning(prop *property.PhysicalProperty) []*candida continue } // if we already know the range of the scan is empty, just return a TableDual - if len(path.Ranges) == 0 && !ds.ctx.GetSessionVars().StmtCtx.UseCache { + if len(path.Ranges) == 0 { return []*candidatePath{{path: path}} } if path.StoreType != kv.TiFlash && (prop.TaskTp == property.CopTiFlashLocalReadTaskType || prop.TaskTp == property.CopTiFlashGlobalReadTaskType) { diff --git a/planner/core/prepare_test.go b/planner/core/prepare_test.go index db2d4d619396c..446f4913686f4 100644 --- a/planner/core/prepare_test.go +++ b/planner/core/prepare_test.go @@ -1036,3 +1036,96 @@ func (s *testPlanSerialSuite) TestPlanCacheSnapshot(c *C) { tk.MustQuery("execute stmt using @p").Check(testkit.Rows("1")) tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) } + +func (s *testPlanSerialSuite) TestPlanCachePointGetAndTableDual(c *C) { + store, _, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + tk := testkit.NewTestKit(c, store) + orgEnable := core.PreparedPlanCacheEnabled() + defer func() { + store.Close() + core.SetPreparedPlanCache(orgEnable) + }() + core.SetPreparedPlanCache(true) + + tk.Se, err = session.CreateSession4TestWithOpt(store, &session.Opt{ + PreparedPlanCache: kvcache.NewSimpleLRUCache(100, 0.1, math.MaxUint64), + }) + c.Assert(err, IsNil) + + tk.MustExec("use test") + tk.MustExec("drop table if exists t0, t1, t2, t3, t4") + + tk.MustExec("create table t0(c1 varchar(20), c2 varchar(20), c3 bigint(20), primary key(c1, c2))") + tk.MustExec("insert into t0 values('0000','7777',1)") + tk.MustExec("prepare s0 from 'select * from t0 where c1=? and c2>=? and c2<=?'") + tk.MustExec("set @a0='0000', @b0='9999'") + // TableDual plan would be built, we should not cache it. + tk.MustQuery("execute s0 using @a0, @b0, @a0").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + // Must not reuse the previous TableDual plan. + tk.MustQuery("execute s0 using @a0, @a0, @b0").Check(testkit.Rows("0000 7777 1")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + + tk.MustExec("create table t1(c1 varchar(20), c2 varchar(20), c3 bigint(20), primary key(c1, c2))") + tk.MustExec("insert into t1 values('0000','7777',1)") + tk.MustExec("prepare s1 from 'select * from t1 where c1=? and c2>=? and c2<=?'") + tk.MustExec("set @a1='0000', @b1='9999'") + // PointGet plan would be built, we should not cache it. + tk.MustQuery("execute s1 using @a1, @b1, @b1").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + // Must not reuse the previous PointGet plan. + tk.MustQuery("execute s1 using @a1, @a1, @b1").Check(testkit.Rows("0000 7777 1")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + + tk.MustExec("create table t2(c1 bigint(20) primary key, c2 varchar(20))") + tk.MustExec("insert into t2 values(1,'7777')") + tk.MustExec("prepare s2 from 'select * from t2 where c1>=? and c1<=?'") + tk.MustExec("set @a2=0, @b2=9") + // PointGet plan would be built, we should not cache it. + tk.MustQuery("execute s2 using @a2, @a2").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + // Must not reuse the previous PointGet plan. + tk.MustQuery("execute s2 using @a2, @b2").Check(testkit.Rows("1 7777")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + + tk.MustExec("create table t3(c1 int, c2 int, c3 int, unique key(c1), key(c2))") + tk.MustExec("insert into t3 values(2,1,1)") + tk.MustExec("prepare s3 from 'select /*+ use_index_merge(t3) */ * from t3 where (c1 >= ? and c1 <= ?) or c2 > 1'") + tk.MustExec("set @a3=1,@b3=3") + // PointGet partial plan would be built, we should not cache it. + tk.MustQuery("execute s3 using @a3,@a3").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + // Must not reuse the previous IndexMerge with partial PointGet plan. + tk.MustQuery("execute s3 using @a3,@b3").Check(testkit.Rows("2 1 1")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + + tk.MustExec("prepare s3 from 'select /*+ use_index_merge(t3) */ * from t3 where (c1 >= ? and c1 <= ?) or c2 > 1'") + tk.MustExec("set @a3=1,@b3=3") + // TableDual partial plan would be built, we should not cache it. + tk.MustQuery("execute s3 using @b3,@a3").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + // Must not reuse the previous IndexMerge with partial TableDual plan. + tk.MustQuery("execute s3 using @a3,@b3").Check(testkit.Rows("2 1 1")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + + tk.MustExec("create table t4(c1 int primary key, c2 int, c3 int, key(c2))") + tk.MustExec("insert into t4 values(2,1,1)") + tk.MustExec("prepare s4 from 'select /*+ use_index_merge(t4) */ * from t4 where (c1 >= ? and c1 <= ?) or c2 > 1'") + tk.MustExec("set @a4=1,@b4=3") + // PointGet partial plan would be built, we should not cache it. + tk.MustQuery("execute s4 using @a4,@a4").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + // Must not reuse the previous IndexMerge with partial PointGet plan. + tk.MustQuery("execute s4 using @a4,@b4").Check(testkit.Rows("2 1 1")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + + tk.MustExec("prepare s4 from 'select /*+ use_index_merge(t4) */ * from t4 where (c1 >= ? and c1 <= ?) or c2 > 1'") + tk.MustExec("set @a4=1,@b4=3") + // TableDual partial plan would be built, we should not cache it. + tk.MustQuery("execute s4 using @b4,@a4").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + // Must not reuse the previous IndexMerge with partial TableDual plan. + tk.MustQuery("execute s4 using @a4,@b4").Check(testkit.Rows("2 1 1")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) +} diff --git a/planner/core/stats.go b/planner/core/stats.go index b9c3f7563a47a..2e2d62fe9b714 100644 --- a/planner/core/stats.go +++ b/planner/core/stats.go @@ -285,6 +285,7 @@ func (ds *DataSource) DeriveStats(childStats []*property.StatsInfo, selfSchema * if noIntervalRanges || len(path.Ranges) == 0 { ds.possibleAccessPaths[0] = path ds.possibleAccessPaths = ds.possibleAccessPaths[:1] + ds.ctx.GetSessionVars().StmtCtx.OptimDependOnMutableConst = true break } continue @@ -294,6 +295,7 @@ func (ds *DataSource) DeriveStats(childStats []*property.StatsInfo, selfSchema * if (noIntervalRanges && path.Index.Unique) || len(path.Ranges) == 0 { ds.possibleAccessPaths[0] = path ds.possibleAccessPaths = ds.possibleAccessPaths[:1] + ds.ctx.GetSessionVars().StmtCtx.OptimDependOnMutableConst = true break } } @@ -506,10 +508,8 @@ func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, us logutil.BgLogger().Debug("can not derive statistics of a path", zap.Error(err)) continue } - // If `AccessConds` is empty, we ignore the access path. - // If the path contains a full range, ignore it also. This can happen when `AccessConds` is constant true, and - // it comes from the result of a subquery, so it is not folded. - if len(path.AccessConds) == 0 || ranger.HasFullRange(path.Ranges) { + // If the path contains a full range, ignore it. + if ranger.HasFullRange(path.Ranges) { continue } // If we have point or empty range, just remove other possible paths. @@ -520,6 +520,7 @@ func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, us results[0] = path results = results[:1] } + ds.ctx.GetSessionVars().StmtCtx.OptimDependOnMutableConst = true break } } else { @@ -533,10 +534,8 @@ func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, us continue } noIntervalRanges := ds.deriveIndexPathStats(path, conditions, true) - // If `AccessConds` is empty, we ignore the access path. - // If the path contains a full range, ignore it also. This can happen when `AccessConds` is constant true, and - // it comes from the result of a subquery, so it is not folded. - if len(path.AccessConds) == 0 || ranger.HasFullRange(path.Ranges) { + // If the path contains a full range, ignore it. + if ranger.HasFullRange(path.Ranges) { continue } // If we have empty range, or point range on unique index, just remove other possible paths. @@ -547,6 +546,7 @@ func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, us results[0] = path results = results[:1] } + ds.ctx.GetSessionVars().StmtCtx.OptimDependOnMutableConst = true break } } From 080ed4ee7b5b492fcc17bdc881100f3478aa7118 Mon Sep 17 00:00:00 2001 From: Shirly Date: Wed, 17 Mar 2021 17:13:30 +0800 Subject: [PATCH 09/44] store/tikv:move mockCommitErrorEnable from kv to tikv since it is only used in tikv (#23355) Co-authored-by: Ti Chi Robot <71242396+ti-chi-bot@users.noreply.github.com> Co-authored-by: disksing --- ddl/table.go | 3 ++- kv/txn.go | 19 ------------------- kv/txn_test.go | 13 ------------- session/session.go | 4 ++-- store/tikv/kv.go | 2 +- store/tikv/test_util.go | 20 ++++++++++++++++++++ store/tikv/tikv_test.go | 13 +++++++++++++ store/tikv/txn.go | 4 ++-- 8 files changed, 40 insertions(+), 38 deletions(-) diff --git a/ddl/table.go b/ddl/table.go index 263ad12fe460a..668de3ac41c05 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/tablecodec" @@ -328,7 +329,7 @@ func (w *worker) onRecoverTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver in failpoint.Inject("mockRecoverTableCommitErr", func(val failpoint.Value) { if val.(bool) && atomic.CompareAndSwapUint32(&mockRecoverTableCommitErrOnce, 0, 1) { - kv.MockCommitErrorEnable() + tikv.MockCommitErrorEnable() } }) diff --git a/kv/txn.go b/kv/txn.go index 00dad8a7d3b19..c5c21b8348263 100644 --- a/kv/txn.go +++ b/kv/txn.go @@ -17,7 +17,6 @@ import ( "context" "math" "math/rand" - "sync/atomic" "time" "github.com/pingcap/parser/terror" @@ -94,24 +93,6 @@ func BackOff(attempts uint) int { return int(sleep) } -// mockCommitErrorEnable uses to enable `mockCommitError` and only mock error once. -var mockCommitErrorEnable = int64(0) - -// MockCommitErrorEnable exports for gofail testing. -func MockCommitErrorEnable() { - atomic.StoreInt64(&mockCommitErrorEnable, 1) -} - -// MockCommitErrorDisable exports for gofail testing. -func MockCommitErrorDisable() { - atomic.StoreInt64(&mockCommitErrorEnable, 0) -} - -// IsMockCommitErrorEnable exports for gofail testing. -func IsMockCommitErrorEnable() bool { - return atomic.LoadInt64(&mockCommitErrorEnable) == 1 -} - // TxnInfo is used to keep track the info of a committed transaction (mainly for diagnosis and testing) type TxnInfo struct { TxnScope string `json:"txn_scope"` diff --git a/kv/txn_test.go b/kv/txn_test.go index 38e85ad354bfd..475a956ffdb36 100644 --- a/kv/txn_test.go +++ b/kv/txn_test.go @@ -77,16 +77,3 @@ func (s *testTxnSuite) TestRetryExceedCountError(c *C) { }) c.Assert(err, NotNil) } - -func (s *testTxnSuite) TestBasicFunc(c *C) { - if IsMockCommitErrorEnable() { - defer MockCommitErrorEnable() - } else { - defer MockCommitErrorDisable() - } - - MockCommitErrorEnable() - c.Assert(IsMockCommitErrorEnable(), IsTrue) - MockCommitErrorDisable() - c.Assert(IsMockCommitErrorEnable(), IsFalse) -} diff --git a/session/session.go b/session/session.go index 4763407ca5886..e37d3bcbac962 100644 --- a/session/session.go +++ b/session/session.go @@ -453,8 +453,8 @@ func (s *session) doCommit(ctx context.Context) error { // mockCommitError and mockGetTSErrorInRetry use to test PR #8743. failpoint.Inject("mockCommitError", func(val failpoint.Value) { - if val.(bool) && kv.IsMockCommitErrorEnable() { - kv.MockCommitErrorDisable() + if val.(bool) && tikv.IsMockCommitErrorEnable() { + tikv.MockCommitErrorDisable() failpoint.Return(kv.ErrTxnRetryable) } }) diff --git a/store/tikv/kv.go b/store/tikv/kv.go index 6930f59d6a1ef..d59081bb36b9d 100644 --- a/store/tikv/kv.go +++ b/store/tikv/kv.go @@ -254,7 +254,7 @@ func (s *KVStore) getTimestampWithRetry(bo *Backoffer, txnScope string) (uint64, // Before PR #8743, we don't cleanup txn after meet error such as error like: PD server timeout // This may cause duplicate data to be written. failpoint.Inject("mockGetTSErrorInRetry", func(val failpoint.Value) { - if val.(bool) && !kv.IsMockCommitErrorEnable() { + if val.(bool) && !IsMockCommitErrorEnable() { err = ErrPDServerTimeout.GenWithStackByArgs("mock PD timeout") } }) diff --git a/store/tikv/test_util.go b/store/tikv/test_util.go index ea309f33fd789..7f4623ac20b00 100644 --- a/store/tikv/test_util.go +++ b/store/tikv/test_util.go @@ -14,6 +14,8 @@ package tikv import ( + "sync/atomic" + "github.com/google/uuid" "github.com/pingcap/errors" pd "github.com/tikv/pd/client" @@ -42,3 +44,21 @@ func NewTestTiKVStore(client Client, pdClient pd.Client, clientHijack func(Clien tikvStore.mock = true return tikvStore, errors.Trace(err) } + +// mockCommitErrorEnable uses to enable `mockCommitError` and only mock error once. +var mockCommitErrorEnable = int64(0) + +// MockCommitErrorEnable exports for gofail testing. +func MockCommitErrorEnable() { + atomic.StoreInt64(&mockCommitErrorEnable, 1) +} + +// MockCommitErrorDisable exports for gofail testing. +func MockCommitErrorDisable() { + atomic.StoreInt64(&mockCommitErrorEnable, 0) +} + +// IsMockCommitErrorEnable exports for gofail testing. +func IsMockCommitErrorEnable() bool { + return atomic.LoadInt64(&mockCommitErrorEnable) == 1 +} diff --git a/store/tikv/tikv_test.go b/store/tikv/tikv_test.go index a97e0bcdae8c2..81af86f998ed3 100644 --- a/store/tikv/tikv_test.go +++ b/store/tikv/tikv_test.go @@ -41,3 +41,16 @@ type testTiKVSuite struct { } var _ = Suite(&testTiKVSuite{}) + +func (s *testTiKVSuite) TestBasicFunc(c *C) { + if IsMockCommitErrorEnable() { + defer MockCommitErrorEnable() + } else { + defer MockCommitErrorDisable() + } + + MockCommitErrorEnable() + c.Assert(IsMockCommitErrorEnable(), IsTrue) + MockCommitErrorDisable() + c.Assert(IsMockCommitErrorEnable(), IsFalse) +} diff --git a/store/tikv/txn.go b/store/tikv/txn.go index 382586cd746f9..d0d4b0703880b 100644 --- a/store/tikv/txn.go +++ b/store/tikv/txn.go @@ -222,8 +222,8 @@ func (txn *KVTxn) Commit(ctx context.Context) error { defer txn.close() failpoint.Inject("mockCommitError", func(val failpoint.Value) { - if val.(bool) && kv.IsMockCommitErrorEnable() { - kv.MockCommitErrorDisable() + if val.(bool) && IsMockCommitErrorEnable() { + MockCommitErrorDisable() failpoint.Return(errors.New("mock commit error")) } }) From 77713d228d32aba0c5e0deae04dc0f2f4f9704ab Mon Sep 17 00:00:00 2001 From: Lei Zhao Date: Wed, 17 Mar 2021 18:32:55 +0800 Subject: [PATCH 10/44] store/tikv: forward requests by unary call (#23362) --- store/tikv/client.go | 15 ++++- store/tikv/client_test.go | 85 +++++++++++++++++++++++++--- store/tikv/config/config.go | 12 ++++ store/tikv/mock_tikv_service_test.go | 56 +++++++++++++++++- store/tikv/region_cache.go | 4 +- store/tikv/tikvrpc/tikvrpc.go | 37 ++++++------ 6 files changed, 178 insertions(+), 31 deletions(-) diff --git a/store/tikv/client.go b/store/tikv/client.go index 910ee19bad5f3..f79a8fb25c3a8 100644 --- a/store/tikv/client.go +++ b/store/tikv/client.go @@ -47,6 +47,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" ) // MaxRecvMsgSize set max gRPC receive message size received from server. If any message size is larger than @@ -70,6 +71,9 @@ const ( grpcInitialConnWindowSize = 1 << 30 ) +// forwardMetadataKey is the key of gRPC metadata which represents a forwarded request. +const forwardMetadataKey = "tikv-forwarded-host" + // Client is a client that sends RPC. // It should not be used after calling Close(). type Client interface { @@ -354,6 +358,7 @@ func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.R c.recycleMu.Unlock() } + // enableBatch means TiDB can send BatchCommands to the connection. It doesn't mean TiDB must do it. // TiDB will not send batch commands to TiFlash, to resolve the conflict with Batch Cop Request. enableBatch := req.StoreTp != kv.TiDB && req.StoreTp != kv.TiFlash c.recycleMu.RLock() @@ -363,9 +368,13 @@ func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.R return nil, errors.Trace(err) } + // TiDB uses [gRPC-metadata](https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) to + // indicate a request needs forwarding. gRPC doesn't support setting a metadata for each request in a stream, + // so we don't use BatchCommands for forwarding for now. + canBatch := enableBatch && req.ForwardedHost == "" // TiDB RPC server supports batch RPC, but batch connection will send heart beat, It's not necessary since // request to TiDB is not high frequency. - if config.GetGlobalConfig().TiKVClient.MaxBatchSize > 0 && enableBatch { + if config.GetGlobalConfig().TiKVClient.MaxBatchSize > 0 && canBatch { if batchReq := req.ToBatchCommandsRequest(); batchReq != nil { defer trace.StartRegion(ctx, req.Type.String()).End() return sendBatchRequest(ctx, addr, connArray.batchConn, batchReq, timeout) @@ -387,6 +396,10 @@ func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.R client := tikvpb.NewTikvClient(clientConn) + // Set metadata for request forwarding. Needn't forward DebugReq. + if req.ForwardedHost != "" { + ctx = metadata.AppendToOutgoingContext(ctx, forwardMetadataKey, req.ForwardedHost) + } switch req.Type { case tikvrpc.CmdBatchCop: return c.getBatchCopStreamResponse(ctx, client, req, timeout, connArray) diff --git a/store/tikv/client_test.go b/store/tikv/client_test.go index 1ce1326731fd4..fed26779f5196 100644 --- a/store/tikv/client_test.go +++ b/store/tikv/client_test.go @@ -17,16 +17,19 @@ import ( "context" "fmt" "sync" + "sync/atomic" "testing" "time" . "github.com/pingcap/check" "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/coprocessor" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/tikvpb" "github.com/pingcap/tidb/store/tikv/config" "github.com/pingcap/tidb/store/tikv/tikvrpc" + "google.golang.org/grpc/metadata" ) func TestT(t *testing.T) { @@ -46,15 +49,10 @@ var _ = Suite(&testClientSuite{}) var _ = SerialSuites(&testClientFailSuite{}) var _ = SerialSuites(&testClientSerialSuite{}) -func setMaxBatchSize(size uint) { - newConf := config.DefaultConfig() - newConf.TiKVClient.MaxBatchSize = size - config.StoreGlobalConfig(&newConf) -} - func (s *testClientSerialSuite) TestConn(c *C) { - maxBatchSize := config.GetGlobalConfig().TiKVClient.MaxBatchSize - setMaxBatchSize(0) + defer config.UpdateGlobal(func(conf *config.Config) { + conf.TiKVClient.MaxBatchSize = 0 + })() client := NewRPCClient(config.Security{}) @@ -70,7 +68,6 @@ func (s *testClientSerialSuite) TestConn(c *C) { conn3, err := client.getConnArray(addr, true) c.Assert(err, NotNil) c.Assert(conn3, IsNil) - setMaxBatchSize(maxBatchSize) } func (s *testClientSuite) TestRemoveCanceledRequests(c *C) { @@ -229,3 +226,73 @@ func (s *testClientSuite) TestCollapseResolveLock(c *C) { default: } } + +func (s *testClientSuite) TestForwardMetadata(c *C) { + server, port := startMockTikvService() + c.Assert(port > 0, IsTrue) + defer server.Stop() + addr := fmt.Sprintf("%s:%d", "127.0.0.1", port) + + // Enable batch and limit the connection count to 1 so that + // there is only one BatchCommands stream. + defer config.UpdateGlobal(func(conf *config.Config) { + conf.TiKVClient.MaxBatchSize = 128 + conf.TiKVClient.GrpcConnectionCount = 1 + })() + rpcClient := NewRPCClient(config.Security{}) + defer rpcClient.closeConns() + + var checkCnt uint64 + // Check no corresponding metadata if ForwardedHost is empty. + server.setMetaChecker(func(ctx context.Context) error { + atomic.AddUint64(&checkCnt, 1) + // gRPC may set some metadata by default, e.g. "context-type". + md, ok := metadata.FromIncomingContext(ctx) + if ok { + vals := md.Get(forwardMetadataKey) + c.Assert(len(vals), Equals, 0) + } + return nil + }) + + // Prewrite represents unary-unary call. + prewriteReq := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, &kvrpcpb.PrewriteRequest{}) + for i := 0; i < 3; i++ { + _, err := rpcClient.SendRequest(context.Background(), addr, prewriteReq, 10*time.Second) + c.Assert(err, IsNil) + } + // checkCnt should be 1 because BatchCommands is a stream-stream call. + c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(1)) + + // CopStream represents unary-stream call. + copStreamReq := tikvrpc.NewRequest(tikvrpc.CmdCopStream, &coprocessor.Request{}) + _, err := rpcClient.SendRequest(context.Background(), addr, copStreamReq, 10*time.Second) + c.Assert(err, IsNil) + c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(2)) + + checkCnt = 0 + forwardedHost := "127.0.0.1:6666" + // Check the metadata exists. + server.setMetaChecker(func(ctx context.Context) error { + atomic.AddUint64(&checkCnt, 1) + // gRPC may set some metadata by default, e.g. "context-type". + md, ok := metadata.FromIncomingContext(ctx) + c.Assert(ok, IsTrue) + vals := md.Get(forwardMetadataKey) + c.Assert(vals, DeepEquals, []string{forwardedHost}) + return nil + }) + + prewriteReq.ForwardedHost = forwardedHost + for i := 0; i < 3; i++ { + _, err = rpcClient.SendRequest(context.Background(), addr, prewriteReq, 10*time.Second) + c.Assert(err, IsNil) + } + // checkCnt should be 3 because we don't use BatchCommands for redirection for now. + c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(3)) + + copStreamReq.ForwardedHost = forwardedHost + _, err = rpcClient.SendRequest(context.Background(), addr, copStreamReq, 10*time.Second) + c.Assert(err, IsNil) + c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(4)) +} diff --git a/store/tikv/config/config.go b/store/tikv/config/config.go index e5fdd8f4c662b..6719c1bca1262 100644 --- a/store/tikv/config/config.go +++ b/store/tikv/config/config.go @@ -121,6 +121,18 @@ func StoreGlobalConfig(config *Config) { globalConf.Store(config) } +// UpdateGlobal updates the global config, and provide a restore function that can be used to restore to the original. +func UpdateGlobal(f func(conf *Config)) func() { + g := GetGlobalConfig() + restore := func() { + StoreGlobalConfig(g) + } + newConf := *g + f(&newConf) + StoreGlobalConfig(&newConf) + return restore +} + // ParsePath parses this path. // Path example: tikv://etcd-node1:port,etcd-node2:port?cluster=1&disableGC=false func ParsePath(path string) (etcdAddrs []string, disableGC bool, err error) { diff --git a/store/tikv/mock_tikv_service_test.go b/store/tikv/mock_tikv_service_test.go index d86c90bb10d41..5ad7b023b1e9d 100644 --- a/store/tikv/mock_tikv_service_test.go +++ b/store/tikv/mock_tikv_service_test.go @@ -1,10 +1,14 @@ package tikv import ( + "context" "fmt" "net" + "sync" "time" + "github.com/pingcap/kvproto/pkg/coprocessor" + "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/tikvpb" "github.com/pingcap/tidb/store/tikv/logutil" "go.uber.org/zap" @@ -13,9 +17,33 @@ import ( type server struct { tikvpb.TikvServer + grpcServer *grpc.Server + // metaChecker check the metadata of each request. Now only requests + // which need redirection set it. + metaChecker struct { + sync.Mutex + check func(context.Context) error + } +} + +func (s *server) KvPrewrite(ctx context.Context, req *kvrpcpb.PrewriteRequest) (*kvrpcpb.PrewriteResponse, error) { + if err := s.checkMetadata(ctx); err != nil { + return nil, err + } + return &kvrpcpb.PrewriteResponse{}, nil +} + +func (s *server) CoprocessorStream(req *coprocessor.Request, ss tikvpb.Tikv_CoprocessorStreamServer) error { + if err := s.checkMetadata(ss.Context()); err != nil { + return err + } + return ss.Send(&coprocessor.Response{}) } func (s *server) BatchCommands(ss tikvpb.Tikv_BatchCommandsServer) error { + if err := s.checkMetadata(ss.Context()); err != nil { + return err + } for { req, err := ss.Recv() if err != nil { @@ -43,8 +71,27 @@ func (s *server) BatchCommands(ss tikvpb.Tikv_BatchCommandsServer) error { } } +func (s *server) setMetaChecker(check func(context.Context) error) { + s.metaChecker.Lock() + s.metaChecker.check = check + s.metaChecker.Unlock() +} + +func (s *server) checkMetadata(ctx context.Context) error { + s.metaChecker.Lock() + defer s.metaChecker.Unlock() + if s.metaChecker.check != nil { + return s.metaChecker.check(ctx) + } + return nil +} + +func (s *server) Stop() { + s.grpcServer.Stop() +} + // Try to start a gRPC server and retrun the server instance and binded port. -func startMockTikvService() (*grpc.Server, int) { +func startMockTikvService() (*server, int) { port := -1 lis, err := net.Listen("tcp", fmt.Sprintf("%s:%d", "127.0.0.1", 0)) if err != nil { @@ -53,8 +100,11 @@ func startMockTikvService() (*grpc.Server, int) { return nil, port } port = lis.Addr().(*net.TCPAddr).Port + + server := &server{} s := grpc.NewServer(grpc.ConnectionTimeout(time.Minute)) - tikvpb.RegisterTikvServer(s, &server{}) + tikvpb.RegisterTikvServer(s, server) + server.grpcServer = s go func() { if err = s.Serve(lis); err != nil { logutil.BgLogger().Error( @@ -63,5 +113,5 @@ func startMockTikvService() (*grpc.Server, int) { ) } }() - return s, port + return server, port } diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go index 2b7f2ba041420..ae6832c18625e 100644 --- a/store/tikv/region_cache.go +++ b/store/tikv/region_cache.go @@ -236,8 +236,8 @@ type RegionCache struct { mu struct { sync.RWMutex // mutex protect cached region - regions map[RegionVerID]*Region // cached regions be organized as regionVerID to region ref mapping - sorted *btree.BTree // cache regions be organized as sorted key to region ref mapping + regions map[RegionVerID]*Region // cached regions are organized as regionVerID to region ref mapping + sorted *btree.BTree // cache regions are organized as sorted key to region ref mapping } storeMu struct { sync.RWMutex diff --git a/store/tikv/tikvrpc/tikvrpc.go b/store/tikv/tikvrpc/tikvrpc.go index ed4da00e146a9..bba680274f2c0 100644 --- a/store/tikv/tikvrpc/tikvrpc.go +++ b/store/tikv/tikvrpc/tikvrpc.go @@ -176,6 +176,11 @@ type Request struct { ReplicaReadType kv.ReplicaReadType // different from `kvrpcpb.Context.ReplicaRead` ReplicaReadSeed *uint32 // pointer to follower read seed in snapshot/coprocessor StoreTp kv.StoreType + // ForwardedHost is the address of a store which will handle the request. It's different from + // the address the request sent to. + // If it's not empty, the store which receive the request will forward it to + // the forwarded host. It's useful when network partition occurs. + ForwardedHost string } // NewRequest returns new kv rpc request. @@ -202,6 +207,22 @@ func NewReplicaReadRequest(typ CmdType, pointer interface{}, replicaReadType kv. return req } +// EnableStaleRead enables stale read +func (req *Request) EnableStaleRead() { + req.StaleRead = true + req.ReplicaReadType = kv.ReplicaReadMixed + req.ReplicaRead = false +} + +// IsDebugReq check whether the req is debug req. +func (req *Request) IsDebugReq() bool { + switch req.Type { + case CmdDebugGetRegionProperties: + return true + } + return false +} + // Get returns GetRequest in request. func (req *Request) Get() *kvrpcpb.GetRequest { return req.Req.(*kvrpcpb.GetRequest) @@ -397,13 +418,6 @@ func (req *Request) TxnHeartBeat() *kvrpcpb.TxnHeartBeatRequest { return req.Req.(*kvrpcpb.TxnHeartBeatRequest) } -// EnableStaleRead enables stale read -func (req *Request) EnableStaleRead() { - req.StaleRead = true - req.ReplicaReadType = kv.ReplicaReadMixed - req.ReplicaRead = false -} - // ToBatchCommandsRequest converts the request to an entry in BatchCommands request. func (req *Request) ToBatchCommandsRequest() *tikvpb.BatchCommandsRequest_Request { switch req.Type { @@ -463,15 +477,6 @@ func (req *Request) ToBatchCommandsRequest() *tikvpb.BatchCommandsRequest_Reques return nil } -// IsDebugReq check whether the req is debug req. -func (req *Request) IsDebugReq() bool { - switch req.Type { - case CmdDebugGetRegionProperties: - return true - } - return false -} - // Response wraps all kv/coprocessor responses. type Response struct { Resp interface{} From 30be22a57a13eb89c9a4fb10dc57b0ae93471932 Mon Sep 17 00:00:00 2001 From: iamhlbx Date: Wed, 17 Mar 2021 18:48:55 +0800 Subject: [PATCH 11/44] expression: fix unused code in util.go (#23301) --- expression/util.go | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/expression/util.go b/expression/util.go index d13f7f1e446e3..9819dbb447ac1 100644 --- a/expression/util.go +++ b/expression/util.go @@ -654,44 +654,6 @@ func PopRowFirstArg(ctx sessionctx.Context, e Expression) (ret Expression, err e return } -// exprStack is a stack of expressions. -type exprStack struct { - stack []Expression -} - -// pop pops an expression from the stack. -func (s *exprStack) pop() Expression { - if s.len() == 0 { - return nil - } - lastIdx := s.len() - 1 - expr := s.stack[lastIdx] - s.stack = s.stack[:lastIdx] - return expr -} - -// popN pops n expressions from the stack. -// If n greater than stack length or n is negative, it pops all the expressions. -func (s *exprStack) popN(n int) []Expression { - if n > s.len() || n < 0 { - n = s.len() - } - idx := s.len() - n - exprs := s.stack[idx:] - s.stack = s.stack[:idx] - return exprs -} - -// push pushes one expression to the stack. -func (s *exprStack) push(expr Expression) { - s.stack = append(s.stack, expr) -} - -// len returns the length of th stack. -func (s *exprStack) len() int { - return len(s.stack) -} - // DatumToConstant generates a Constant expression from a Datum. func DatumToConstant(d types.Datum, tp byte, flag uint) *Constant { t := types.NewFieldType(tp) From fd706ab76bd09ac859aa0a4de7fe9e07da3c5508 Mon Sep 17 00:00:00 2001 From: Shirly Date: Wed, 17 Mar 2021 19:32:55 +0800 Subject: [PATCH 12/44] go.mod:update br to the latest version (#23379) --- go.mod | 9 ++++- go.sum | 120 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 125 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 0af92b79c3392..96a7691ba76d8 100644 --- a/go.mod +++ b/go.mod @@ -2,9 +2,11 @@ module github.com/pingcap/tidb require ( github.com/BurntSushi/toml v0.3.1 + github.com/DATA-DOG/go-sqlmock v1.5.0 // indirect github.com/HdrHistogram/hdrhistogram-go v0.9.0 // indirect github.com/Jeffail/gabs/v2 v2.5.1 github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d + github.com/carlmjohnson/flagext v0.21.0 // indirect github.com/cheggaaa/pb/v3 v3.0.4 // indirect github.com/codahale/hdrhistogram v0.9.0 // indirect github.com/coocood/freecache v1.1.1 @@ -27,7 +29,7 @@ require ( github.com/gorilla/mux v1.7.4 github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 - github.com/klauspost/compress v1.10.5 // indirect + github.com/joho/sqltocsv v0.0.0-20210208114054-cb2c3a95fb99 // indirect github.com/klauspost/cpuid v1.2.1 github.com/kr/text v0.2.0 // indirect github.com/mattn/go-runewidth v0.0.10 // indirect @@ -40,7 +42,7 @@ require ( github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 github.com/pierrec/lz4 v2.5.2+incompatible // indirect github.com/pingcap/badger v1.5.1-0.20200908111422-2e78ee155d19 - github.com/pingcap/br v4.0.0-beta.2.0.20210302095941-59e4efeaeb47+incompatible + github.com/pingcap/br v5.0.0-nightly.0.20210317100924-d95f9fdfcd29+incompatible github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce @@ -65,6 +67,8 @@ require ( github.com/uber-go/atomic v1.4.0 github.com/uber/jaeger-client-go v2.22.1+incompatible github.com/uber/jaeger-lib v2.4.0+incompatible // indirect + github.com/xitongsys/parquet-go v1.6.0 // indirect + github.com/xitongsys/parquet-go-source v0.0.0-20201108113611-f372b7d813be // indirect github.com/zhangjinpeng1987/raft v0.0.0-20200819064223-df31bb68a018 // indirect go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b go.uber.org/atomic v1.7.0 @@ -84,6 +88,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect honnef.co/go/tools v0.1.3 // indirect + modernc.org/mathutil v1.2.2 // indirect sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 ) diff --git a/go.sum b/go.sum index 42fffae3abdc0..ed705264b9304 100644 --- a/go.sum +++ b/go.sum @@ -7,21 +7,47 @@ cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTj cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0 h1:0E3eE8MX426vUOs7aHfI7aN1BrIzzzf4ccKCSfSjGmc= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3UzkWdp5tH1WMcg= github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= github.com/Jeffail/gabs/v2 v2.5.1 h1:ANfZYjpMlfTTKebycu4X1AgkVWumFVDYQl7JwOr4mDk= @@ -46,9 +72,13 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/apache/thrift v0.0.0-20181112125854-24918abba929/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714 h1:Jz3KVLYY5+JO7rDiX0sAuRGtuv2vG01r17Y9nLMWNUw= +github.com/apache/thrift v0.13.1-0.20201008052519-daf620915714/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/appleboy/gin-jwt/v2 v2.6.3/go.mod h1:MfPYA4ogzvOcVkRwAxT7quHOtQmVKDpTwxyUrC2DNw0= github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aws/aws-sdk-go v1.30.19/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.35.3 h1:r0puXncSaAfRt7Btml2swUo74Kao+vKhO3VLjwDjK54= github.com/aws/aws-sdk-go v1.35.3/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -60,6 +90,8 @@ github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d h1:rQ github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets= +github.com/carlmjohnson/flagext v0.21.0 h1:/c4uK3ie786Z7caXLcIMvePNSSiH3bQVGDvmGLMme60= +github.com/carlmjohnson/flagext v0.21.0/go.mod h1:Eenv0epIUAr4NuedNmkzI8WmBmjIxZC239XcKxYS2ac= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -77,6 +109,7 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.9.0 h1:9GjrtRI+mLEFPtTfR/AZhcxp+Ii8NZYWq5104FbZQY0= github.com/codahale/hdrhistogram v0.9.0/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/colinmarc/hdfs/v2 v2.1.1/go.mod h1:M3x+k8UKKmxtFu++uAZ0OtDU8jR3jnaZIAc6yK4Ue0c= github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64 h1:W1SHiII3e0jVwvaQFglwu3kS9NLxOeTpvik7MbKCyuQ= github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64/go.mod h1:F86k/6c7aDUdwSUevnLpHS/3Q9hzYCE99jGk2xsHnt0= github.com/coocood/freecache v1.1.1 h1:uukNF7QKCZEdZ9gAV7WQzvh0SbjwdMF6m3x3rxEkaPc= @@ -162,6 +195,7 @@ github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxm github.com/go-echarts/go-echarts v1.0.0/go.mod h1:qbmyAb/Rl1f2w7wKba1D4LoNq4U164yO4/wedFbcWyo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -211,16 +245,24 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -238,6 +280,8 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200407044318-7d83b28da2e9 h1:K+lX49/3eURCE1IjlaZN//u6c+9nfDAMnyQ9E2dsJbY= github.com/google/pprof v0.0.0-20200407044318-7d83b28da2e9/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -267,6 +311,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69/go.mod h1:YLEMZOtU+AZ7dhN9T/IpGhXVGly2bvkJQ+zxj3WeVQo= +github.com/hashicorp/go-uuid v0.0.0-20180228145832-27454136f036/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -278,15 +323,19 @@ github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73t github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/joho/sqltocsv v0.0.0-20210208114054-cb2c3a95fb99 h1:yYV8KMzsc4ius4P8A9BK13cY6nFDL3jlVX25HvEXOkw= +github.com/joho/sqltocsv v0.0.0-20210208114054-cb2c3a95fb99/go.mod h1:mAVCUAYtW9NG31eB30umMSLKcDt6mCUWSjoSn5qBh0k= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/joomcode/errorx v1.0.1/go.mod h1:kgco15ekB6cs+4Xjzo7SPeXzx38PbJzBwbnu9qfVNHQ= @@ -306,6 +355,7 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc= github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= @@ -335,6 +385,8 @@ github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaa github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -371,6 +423,7 @@ github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFW github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/ncw/directio v1.0.4 h1:CojwI07mCEmRkajgx42Pf8jyCwTs1ji9/Ij9/PJG12k= github.com/ncw/directio v1.0.4/go.mod h1:CKGdcN7StAaqjT7Qack3lAXeX4pjnyc46YeqZH1yWVY= +github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= @@ -395,6 +448,7 @@ github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7l github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= @@ -407,8 +461,8 @@ github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUM github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/badger v1.5.1-0.20200908111422-2e78ee155d19 h1:IXpGy7y9HyoShAFmzW2OPF0xCA5EOoSTyZHwsgYk9Ro= github.com/pingcap/badger v1.5.1-0.20200908111422-2e78ee155d19/go.mod h1:LyrqUOHZrUDf9oGi1yoz1+qw9ckSIhQb5eMa1acOLNQ= -github.com/pingcap/br v4.0.0-beta.2.0.20210302095941-59e4efeaeb47+incompatible h1:0B1CQlmaky9VEa1STBH/WM81wLOuFJ2Rmb5APHzPefU= -github.com/pingcap/br v4.0.0-beta.2.0.20210302095941-59e4efeaeb47+incompatible/go.mod h1:ymVmo50lQydxib0tmK5hHk4oteB7hZ0IMCArunwy3UQ= +github.com/pingcap/br v5.0.0-nightly.0.20210317100924-d95f9fdfcd29+incompatible h1:K3DXUdxw67vH8nehT2yYavJIgYbNxl3hw0zZIkQdoyw= +github.com/pingcap/br v5.0.0-nightly.0.20210317100924-d95f9fdfcd29+incompatible/go.mod h1:ymVmo50lQydxib0tmK5hHk4oteB7hZ0IMCArunwy3UQ= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= @@ -483,6 +537,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= @@ -520,6 +576,7 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= @@ -588,6 +645,13 @@ github.com/vmihailenco/msgpack/v5 v5.0.0-beta.1/go.mod h1:xlngVLeyQ/Qi05oQxhQ+oT github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xitongsys/parquet-go v1.5.1/go.mod h1:xUxwM8ELydxh4edHGegYq1pA8NnMKDx0K/GyB0o2bww= +github.com/xitongsys/parquet-go v1.6.0 h1:j6YrTVZdQx5yywJLIOklZcKVsCoSD1tqOVRXyTBFSjs= +github.com/xitongsys/parquet-go v1.6.0/go.mod h1:pheqtXeHQFzxJk45lRQ0UIGIivKnLXvialZSFWs81A8= +github.com/xitongsys/parquet-go-source v0.0.0-20190524061010-2b72cbee77d5/go.mod h1:xxCx7Wpym/3QCo6JhujJX51dzSXrwmb0oH6FQb39SEA= +github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0/go.mod h1:HYhIKsdns7xz80OgkbgJYrtQY7FjHWHKH6cvN7+czGE= +github.com/xitongsys/parquet-go-source v0.0.0-20201108113611-f372b7d813be h1:33jqDHcXK6vfgtLossgwZmTXyLCdPZU3/KZ3988bk3Q= +github.com/xitongsys/parquet-go-source v0.0.0-20201108113611-f372b7d813be/go.mod h1:SQSSW1CBj/egoUhnaTXihUlDayvpp01Fn8qwuEpK5bY= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -605,6 +669,8 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -631,6 +697,7 @@ go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -640,6 +707,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= @@ -652,6 +720,10 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -664,11 +736,14 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= @@ -694,8 +769,11 @@ golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -709,6 +787,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -739,14 +819,19 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -755,6 +840,7 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 h1:46ULzRKLh1CwgRq2dC5SlBzEqqNCi8rreOZnNrbqcIY= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -765,6 +851,8 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -794,8 +882,16 @@ golang.org/x/tools v0.0.0-20191114200427-caa0b0f7d508/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200527183253-8e7acdbce89d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -816,6 +912,9 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0 h1:TgDr+1inK2XVUKZx3BYAqQg/GwucGdBkzZjWaTg/I+A= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -837,6 +936,12 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb h1:ADPHZzpzM4tk4V4S5cnCrr5SwzvlrPRmqqCuJDB8UTs= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -847,6 +952,7 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= @@ -864,6 +970,11 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -888,10 +999,15 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +modernc.org/mathutil v1.2.2 h1:+yFk8hBprV+4c0U9GjFtL+dV3N8hOJ8JCituQcMShFY= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 h1:ucqkfpjg9WzSUubAO62csmucvxl4/JeW3F4I4909XkM= From e4bb9116b993295f5e31b556385f4d8095a8a5b5 Mon Sep 17 00:00:00 2001 From: Jack Yu Date: Thu, 18 Mar 2021 09:47:51 +0800 Subject: [PATCH 13/44] tests: format code (#23378) --- tests/globalkilltest/global_kill_test.go | 6 +++--- tests/graceshutdown/graceshutdown_test.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/globalkilltest/global_kill_test.go b/tests/globalkilltest/global_kill_test.go index 4bd8cae68b99a..4223ca4f01d29 100644 --- a/tests/globalkilltest/global_kill_test.go +++ b/tests/globalkilltest/global_kill_test.go @@ -29,7 +29,7 @@ import ( _ "github.com/go-sql-driver/mysql" "github.com/juju/errors" . "github.com/pingcap/check" - "github.com/pingcap/log" + "github.com/pingcap/log" zaplog "github.com/pingcap/log" "github.com/pingcap/tidb/util/logutil" "go.etcd.io/etcd/clientv3" @@ -343,7 +343,7 @@ func (s *TestGlobalKillSuite) TestWithoutPD(c *C) { db, err := s.connectTiDB(port) c.Assert(err, IsNil) - defer func(){ + defer func() { err := db.Close() c.Assert(err, IsNil) }() @@ -371,7 +371,7 @@ func (s *TestGlobalKillSuite) TestOneTiDB(c *C) { db, err := s.connectTiDB(port) c.Assert(err, IsNil) - defer func(){ + defer func() { err := db.Close() c.Assert(err, IsNil) }() diff --git a/tests/graceshutdown/graceshutdown_test.go b/tests/graceshutdown/graceshutdown_test.go index ca7f39093401b..66fd422c30493 100644 --- a/tests/graceshutdown/graceshutdown_test.go +++ b/tests/graceshutdown/graceshutdown_test.go @@ -134,7 +134,7 @@ func (s *TestGracefulShutdownSuite) TestGracefulShutdown(c *C) { db, err := s.connectTiDB(port) c.Assert(err, IsNil) - defer func(){ + defer func() { err := db.Close() c.Assert(err, IsNil) }() From 9f388deb62b7c8ee81a472b93f8fb69ab0b4c1a4 Mon Sep 17 00:00:00 2001 From: xhe Date: Thu, 18 Mar 2021 10:02:55 +0800 Subject: [PATCH 14/44] *: remove uselss reloadFunc (#23367) --- config/config.go | 2 +- tidb-server/main.go | 42 +----------------------------------------- 2 files changed, 2 insertions(+), 42 deletions(-) diff --git a/config/config.go b/config/config.go index 3078a6041ea19..8e1a4fe1d4214 100644 --- a/config/config.go +++ b/config/config.go @@ -730,7 +730,7 @@ var IsOOMActionSetByUser bool // The function enforceCmdArgs is used to merge the config file with command arguments: // For example, if you start TiDB by the command "./tidb-server --port=3000", the port number should be // overwritten to 3000 and ignore the port number in the config file. -func InitializeConfig(confPath string, configCheck, configStrict bool, reloadFunc ConfReloadFunc, enforceCmdArgs func(*Config)) { +func InitializeConfig(confPath string, configCheck, configStrict bool, enforceCmdArgs func(*Config)) { cfg := GetGlobalConfig() var err error if confPath != "" { diff --git a/tidb-server/main.go b/tidb-server/main.go index f9cb021a894e8..9c4e55e42fe32 100644 --- a/tidb-server/main.go +++ b/tidb-server/main.go @@ -51,7 +51,6 @@ import ( "github.com/pingcap/tidb/store/driver" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/store/tikv" - "github.com/pingcap/tidb/store/tikv/storeutil" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/domainutil" @@ -165,7 +164,7 @@ func main() { } registerStores() registerMetrics() - config.InitializeConfig(*configPath, *configCheck, *configStrict, reloadConfig, overrideConfig) + config.InitializeConfig(*configPath, *configCheck, *configStrict, overrideConfig) if config.GetGlobalConfig().OOMUseTmpStorage { config.GetGlobalConfig().UpdateTempStoragePath() err := disk.InitializeTempDir() @@ -365,45 +364,6 @@ func flagBoolean(name string, defaultVal bool, usage string) *bool { return flag.Bool(name, defaultVal, usage) } -func reloadConfig(nc, c *config.Config) { - // Just a part of config items need to be reload explicitly. - // Some of them like OOMAction are always used by getting from global config directly - // like config.GetGlobalConfig().OOMAction. - // These config items will become available naturally after the global config pointer - // is updated in function ReloadGlobalConfig. - if nc.Performance.ServerMemoryQuota != c.Performance.ServerMemoryQuota { - plannercore.PreparedPlanCacheMaxMemory.Store(nc.Performance.ServerMemoryQuota) - } - if nc.Performance.CrossJoin != c.Performance.CrossJoin { - plannercore.AllowCartesianProduct.Store(nc.Performance.CrossJoin) - } - if nc.Performance.FeedbackProbability != c.Performance.FeedbackProbability { - statistics.FeedbackProbability.Store(nc.Performance.FeedbackProbability) - } - if nc.Performance.QueryFeedbackLimit != c.Performance.QueryFeedbackLimit { - statistics.MaxQueryFeedbackCount.Store(int64(nc.Performance.QueryFeedbackLimit)) - } - if nc.Performance.PseudoEstimateRatio != c.Performance.PseudoEstimateRatio { - statistics.RatioOfPseudoEstimate.Store(nc.Performance.PseudoEstimateRatio) - } - if nc.Performance.MaxProcs != c.Performance.MaxProcs { - runtime.GOMAXPROCS(int(nc.Performance.MaxProcs)) - metrics.MaxProcs.Set(float64(runtime.GOMAXPROCS(0))) - } - if nc.TiKVClient.StoreLimit != c.TiKVClient.StoreLimit { - storeutil.StoreLimit.Store(nc.TiKVClient.StoreLimit) - } - - if nc.PreparedPlanCache.Enabled != c.PreparedPlanCache.Enabled { - plannercore.SetPreparedPlanCache(nc.PreparedPlanCache.Enabled) - } - if nc.Log.Level != c.Log.Level { - if err := logutil.SetLevel(nc.Log.Level); err != nil { - logutil.BgLogger().Error("update log level error", zap.Error(err)) - } - } -} - // overrideConfig considers command arguments and overrides some config items in the Config. func overrideConfig(cfg *config.Config) { actualFlags := make(map[string]bool) From 3cfb984211901c3416ec539658ac86d2cb64e81f Mon Sep 17 00:00:00 2001 From: Tjianke <34013484+Tjianke@users.noreply.github.com> Date: Thu, 18 Mar 2021 10:28:56 +0800 Subject: [PATCH 15/44] server: fix err check (#22999) --- server/conn_test.go | 27 ++++--- server/http_handler_test.go | 99 ++++++++++++++++------- server/server_test.go | 130 +++++++++++++++++++++--------- server/statistics_handler_test.go | 45 ++++++++--- server/tidb_test.go | 108 ++++++++++++++++++------- 5 files changed, 297 insertions(+), 112 deletions(-) diff --git a/server/conn_test.go b/server/conn_test.go index 320cc3e70a019..be3d6210e87f4 100644 --- a/server/conn_test.go +++ b/server/conn_test.go @@ -219,15 +219,19 @@ func (ts *ConnTestSuite) TestInitialHandshake(c *C) { c.Assert(err, IsNil) expected := new(bytes.Buffer) - expected.WriteByte(0x0a) // Protocol - expected.WriteString(mysql.ServerVersion) // Version - expected.WriteByte(0x00) // NULL - binary.Write(expected, binary.LittleEndian, uint32(1)) // Connection ID - expected.Write([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00}) // Salt - binary.Write(expected, binary.LittleEndian, uint16(defaultCapability&0xFFFF)) // Server Capability - expected.WriteByte(uint8(mysql.DefaultCollationID)) // Server Language - binary.Write(expected, binary.LittleEndian, mysql.ServerStatusAutocommit) // Server Status - binary.Write(expected, binary.LittleEndian, uint16((defaultCapability>>16)&0xFFFF)) // Extended Server Capability + expected.WriteByte(0x0a) // Protocol + expected.WriteString(mysql.ServerVersion) // Version + expected.WriteByte(0x00) // NULL + err = binary.Write(expected, binary.LittleEndian, uint32(1)) // Connection ID + c.Assert(err, IsNil) + expected.Write([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00}) // Salt + err = binary.Write(expected, binary.LittleEndian, uint16(defaultCapability&0xFFFF)) // Server Capability + c.Assert(err, IsNil) + expected.WriteByte(uint8(mysql.DefaultCollationID)) // Server Language + err = binary.Write(expected, binary.LittleEndian, mysql.ServerStatusAutocommit) // Server Status + c.Assert(err, IsNil) + err = binary.Write(expected, binary.LittleEndian, uint16((defaultCapability>>16)&0xFFFF)) // Extended Server Capability + c.Assert(err, IsNil) expected.WriteByte(0x15) // Authentication Plugin Length expected.Write([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}) // Unused expected.Write([]byte{0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x00}) // Salt @@ -486,7 +490,10 @@ func (ts *ConnTestSuite) TestDispatchClientProtocol41(c *C) { func (ts *ConnTestSuite) testDispatch(c *C, inputs []dispatchInput, capability uint32) { store, err := mockstore.NewMockStore() c.Assert(err, IsNil) - defer store.Close() + defer func() { + err := store.Close() + c.Assert(err, IsNil) + }() dom, err := session.BootstrapSession(store) c.Assert(err, IsNil) defer dom.Close() diff --git a/server/http_handler_test.go b/server/http_handler_test.go index 734549b095391..5e5850178742c 100644 --- a/server/http_handler_test.go +++ b/server/http_handler_test.go @@ -483,7 +483,10 @@ func (ts *basicHTTPHandlerTestSuite) startServer(c *C) { ts.port = getPortFromTCPAddr(server.listener.Addr()) ts.statusPort = getPortFromTCPAddr(server.statusListener.Addr()) ts.server = server - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() ts.waitUntilServerOnline() } @@ -506,7 +509,10 @@ func (ts *basicHTTPHandlerTestSuite) stopServer(c *C) { func (ts *basicHTTPHandlerTestSuite) prepareData(c *C) { db, err := sql.Open("mysql", ts.getDSN()) c.Assert(err, IsNil, Commentf("Error connecting")) - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() dbt := &DBTest{c, db} dbt.mustExec("create database tidb;") @@ -536,9 +542,12 @@ partition by range (a) txn2, err := dbt.db.Begin() c.Assert(err, IsNil) - txn2.Exec("insert into tidb.pt values (42, '123')") - txn2.Exec("insert into tidb.pt values (256, 'b')") - txn2.Exec("insert into tidb.pt values (666, 'def')") + _, err = txn2.Exec("insert into tidb.pt values (42, '123')") + c.Assert(err, IsNil) + _, err = txn2.Exec("insert into tidb.pt values (256, 'b')") + c.Assert(err, IsNil) + _, err = txn2.Exec("insert into tidb.pt values (666, 'def')") + c.Assert(err, IsNil) err = txn2.Commit() c.Assert(err, IsNil) dbt.mustExec("drop table if exists t") @@ -673,7 +682,10 @@ func (ts *HTTPHandlerTestSuite) TestTiFlashReplica(c *C) { db, err := sql.Open("mysql", ts.getDSN()) c.Assert(err, IsNil, Commentf("Error connecting")) - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() dbt := &DBTest{c, db} defer func(originGC bool) { @@ -704,7 +716,11 @@ func (ts *HTTPHandlerTestSuite) TestTiFlashReplica(c *C) { c.Assert(len(data), Equals, 0) c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount", `return(true)`), IsNil) - defer failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount") + defer func() { + err = failpoint.Disable("github.com/pingcap/tidb/infoschema/mockTiFlashStoreCount") + c.Assert(err, IsNil) + }() + dbt.mustExec("use tidb") dbt.mustExec("alter table test set tiflash replica 2 location labels 'a','b';") @@ -740,7 +756,8 @@ func (ts *HTTPHandlerTestSuite) TestTiFlashReplica(c *C) { decoder = json.NewDecoder(resp.Body) err = decoder.Decode(&data) c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) c.Assert(len(data), Equals, 1) c.Assert(data[0].ReplicaCount, Equals, uint64(2)) c.Assert(strings.Join(data[0].LocationLabels, ","), Equals, "a,b") @@ -754,7 +771,8 @@ func (ts *HTTPHandlerTestSuite) TestTiFlashReplica(c *C) { decoder = json.NewDecoder(resp.Body) err = decoder.Decode(&data) c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) c.Assert(len(data), Equals, 1) c.Assert(data[0].ReplicaCount, Equals, uint64(2)) c.Assert(strings.Join(data[0].LocationLabels, ","), Equals, "a,b") @@ -781,7 +799,8 @@ func (ts *HTTPHandlerTestSuite) TestTiFlashReplica(c *C) { decoder = json.NewDecoder(resp.Body) err = decoder.Decode(&data) c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) c.Assert(len(data), Equals, 3) c.Assert(data[0].ReplicaCount, Equals, uint64(2)) c.Assert(strings.Join(data[0].LocationLabels, ","), Equals, "a,b") @@ -795,13 +814,15 @@ func (ts *HTTPHandlerTestSuite) TestTiFlashReplica(c *C) { req = fmt.Sprintf(`{"id":%d,"region_count":3,"flash_region_count":3}`, pid1) resp, err = ts.postStatus("/tiflash/replica", "application/json", bytes.NewBuffer([]byte(req))) c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) resp, err = ts.fetchStatus("/tiflash/replica") c.Assert(err, IsNil) decoder = json.NewDecoder(resp.Body) err = decoder.Decode(&data) c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) c.Assert(len(data), Equals, 3) c.Assert(data[0].Available, Equals, false) c.Assert(data[1].Available, Equals, true) @@ -811,18 +832,21 @@ func (ts *HTTPHandlerTestSuite) TestTiFlashReplica(c *C) { req = fmt.Sprintf(`{"id":%d,"region_count":3,"flash_region_count":3}`, pid0) resp, err = ts.postStatus("/tiflash/replica", "application/json", bytes.NewBuffer([]byte(req))) c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) req = fmt.Sprintf(`{"id":%d,"region_count":3,"flash_region_count":3}`, pid2) resp, err = ts.postStatus("/tiflash/replica", "application/json", bytes.NewBuffer([]byte(req))) c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) checkFunc = func() { resp, err = ts.fetchStatus("/tiflash/replica") c.Assert(err, IsNil) decoder = json.NewDecoder(resp.Body) err = decoder.Decode(&data) c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) c.Assert(len(data), Equals, 3) c.Assert(data[0].Available, Equals, true) c.Assert(data[1].Available, Equals, true) @@ -1050,7 +1074,10 @@ func (ts *HTTPHandlerTestSuite) TestGetSchema(c *C) { db, err := sql.Open("mysql", ts.getDSN()) c.Assert(err, IsNil, Commentf("Error connecting")) - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() dbt := &DBTest{c, db} dbt.mustExec("create database if not exists test;") @@ -1098,7 +1125,8 @@ func (ts *HTTPHandlerTestSuite) TestAllHistory(c *C) { store := domain.GetDomain(s.(sessionctx.Context)).Store() txn, _ := store.Begin() txnMeta := meta.NewMeta(txn) - txnMeta.GetAllHistoryDDLJobs() + _, err = txnMeta.GetAllHistoryDDLJobs() + c.Assert(err, IsNil) data, _ := txnMeta.GetAllHistoryDDLJobs() err = decoder.Decode(&jobs) @@ -1163,7 +1191,10 @@ func (ts *HTTPHandlerTestSuite) TestPostSettings(c *C) { // test check_mb4_value_in_utf8 db, err := sql.Open("mysql", ts.getDSN()) c.Assert(err, IsNil, Commentf("Error connecting")) - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() dbt := &DBTest{c, db} dbt.mustExec("create database tidb_test;") @@ -1179,7 +1210,8 @@ func (ts *HTTPHandlerTestSuite) TestPostSettings(c *C) { c.Assert(err, IsNil) _, err = txn1.Exec("insert t2 values (unhex('F0A48BAE'));") c.Assert(err, NotNil) - txn1.Commit() + err = txn1.Commit() + c.Assert(err, IsNil) // Disable CheckMb4ValueInUTF8. form = make(url.Values) @@ -1198,8 +1230,10 @@ func (ts *HTTPHandlerTestSuite) TestPprof(c *C) { for retry := 0; retry < retryTime; retry++ { resp, err := ts.fetchStatus("/debug/pprof/heap") if err == nil { - ioutil.ReadAll(resp.Body) - resp.Body.Close() + _, err = ioutil.ReadAll(resp.Body) + c.Assert(err, IsNil) + err = resp.Body.Close() + c.Assert(err, IsNil) return } time.Sleep(time.Millisecond * 10) @@ -1308,7 +1342,10 @@ func (ts *HTTPHandlerTestSuite) TestZipInfoForSQL(c *C) { db, err := sql.Open("mysql", ts.getDSN()) c.Assert(err, IsNil, Commentf("Error connecting")) - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() dbt := &DBTest{c, db} dbt.mustExec("use test") @@ -1398,31 +1435,37 @@ func (ts *HTTPHandlerTestSuite) TestTestHandler(c *C) { resp, err = ts.fetchStatus("/test/gc/gc") c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) c.Assert(resp.StatusCode, Equals, http.StatusBadRequest) resp, err = ts.fetchStatus("/test/gc/resolvelock") c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) c.Assert(resp.StatusCode, Equals, http.StatusBadRequest) resp, err = ts.fetchStatus("/test/gc/resolvelock?safepoint=a") c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) c.Assert(resp.StatusCode, Equals, http.StatusBadRequest) resp, err = ts.fetchStatus("/test/gc/resolvelock?physical=1") c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) c.Assert(resp.StatusCode, Equals, http.StatusBadRequest) resp, err = ts.fetchStatus("/test/gc/resolvelock?physical=true") c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) c.Assert(resp.StatusCode, Equals, http.StatusBadRequest) resp, err = ts.fetchStatus("/test/gc/resolvelock?safepoint=10000&physical=true") c.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + c.Assert(err, IsNil) c.Assert(resp.StatusCode, Equals, http.StatusOK) } diff --git a/server/server_test.go b/server/server_test.go index ad165e03eabcd..1d97e510c3770 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -60,7 +60,10 @@ func TestT(t *testing.T) { } CustomVerboseFlag = true logLevel := os.Getenv("log_level") - logutil.InitZapLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false)) + err := logutil.InitZapLogger(logutil.NewLogConfig(logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false)) + if err != nil { + t.Fatal(err) + } TestingT(t) } @@ -123,14 +126,19 @@ func (cli *testServerClient) getDSN(overriders ...configOverrider) string { func (cli *testServerClient) runTests(c *C, overrider configOverrider, tests ...func(dbt *DBTest)) { db, err := sql.Open("mysql", cli.getDSN(overrider)) c.Assert(err, IsNil, Commentf("Error connecting")) - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() - db.Exec("DROP TABLE IF EXISTS test") + _, err = db.Exec("DROP TABLE IF EXISTS test") + c.Assert(err, IsNil) dbt := &DBTest{c, db} for _, test := range tests { test(dbt) - dbt.db.Exec("DROP TABLE IF EXISTS test") + // fixed query error + _, _ = dbt.db.Exec("DROP TABLE IF EXISTS test") } } @@ -141,7 +149,10 @@ func (cli *testServerClient) runTestsOnNewDB(c *C, overrider configOverrider, db }) db, err := sql.Open("mysql", dsn) c.Assert(err, IsNil, Commentf("Error connecting")) - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() _, err = db.Exec(fmt.Sprintf("DROP DATABASE IF EXISTS `%s`;", dbName)) if err != nil { @@ -163,7 +174,8 @@ func (cli *testServerClient) runTestsOnNewDB(c *C, overrider configOverrider, db dbt := &DBTest{c, db} for _, test := range tests { test(dbt) - dbt.db.Exec("DROP TABLE IF EXISTS test") + // to fix : no db selected + _, _ = dbt.db.Exec("DROP TABLE IF EXISTS test") } } @@ -241,7 +253,8 @@ func (cli *testServerClient) runTestRegression(c *C, overrider configOverrider, // Read rows = dbt.mustQuery("SELECT val FROM test") if rows.Next() { - rows.Scan(&out) + err = rows.Scan(&out) + c.Assert(err, IsNil) dbt.Check(out, IsTrue) dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data")) } else { @@ -258,7 +271,8 @@ func (cli *testServerClient) runTestRegression(c *C, overrider configOverrider, // Check Update rows = dbt.mustQuery("SELECT val FROM test") if rows.Next() { - rows.Scan(&out) + err = rows.Scan(&out) + c.Assert(err, IsNil) dbt.Check(out, IsFalse) dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data")) } else { @@ -873,22 +887,26 @@ func (cli *testServerClient) runTestLoadData(c *C, server *Server) { dbt.Check(bb.String, DeepEquals, "") dbt.Check(cc, DeepEquals, 1) dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b, &cc) + err = rows.Scan(&a, &b, &cc) + c.Assert(err, IsNil) dbt.Check(a, DeepEquals, "xxx row2_col1") dbt.Check(b, DeepEquals, "- row2_col2") dbt.Check(cc, DeepEquals, 2) dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b, &cc) + err = rows.Scan(&a, &b, &cc) + c.Assert(err, IsNil) dbt.Check(a, DeepEquals, "xxxy row3_col1") dbt.Check(b, DeepEquals, "- row3_col2") dbt.Check(cc, DeepEquals, 3) dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b, &cc) + err = rows.Scan(&a, &b, &cc) + c.Assert(err, IsNil) dbt.Check(a, DeepEquals, "xxx row4_col1") dbt.Check(b, DeepEquals, "- ") dbt.Check(cc, DeepEquals, 4) dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b, &cc) + err = rows.Scan(&a, &b, &cc) + c.Assert(err, IsNil) dbt.Check(a, DeepEquals, "xxx row5_col1") dbt.Check(b, DeepEquals, "- ") dbt.Check(cc, DeepEquals, 5) @@ -908,22 +926,26 @@ func (cli *testServerClient) runTestLoadData(c *C, server *Server) { dbt.Assert(affectedRows, Equals, int64(4)) rows = dbt.mustQuery("select * from test") dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b, &cc) + err = rows.Scan(&a, &b, &cc) + c.Assert(err, IsNil) dbt.Check(a, DeepEquals, "row1_col1") dbt.Check(b, DeepEquals, "row1_col2\t1abc") dbt.Check(cc, DeepEquals, 6) dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b, &cc) + err = rows.Scan(&a, &b, &cc) + c.Assert(err, IsNil) dbt.Check(a, DeepEquals, "row2_col1") dbt.Check(b, DeepEquals, "row2_col2\t") dbt.Check(cc, DeepEquals, 7) dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b, &cc) + err = rows.Scan(&a, &b, &cc) + c.Assert(err, IsNil) dbt.Check(a, DeepEquals, "row4_col1") dbt.Check(b, DeepEquals, "\t\t900") dbt.Check(cc, DeepEquals, 8) dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b, &cc) + err = rows.Scan(&a, &b, &cc) + c.Assert(err, IsNil) dbt.Check(a, DeepEquals, "row5_col1") dbt.Check(b, DeepEquals, "\trow5_col3") dbt.Check(cc, DeepEquals, 9) @@ -995,11 +1017,13 @@ func (cli *testServerClient) runTestLoadData(c *C, server *Server) { dbt.Check(str, DeepEquals, "abc") dbt.Check(id, DeepEquals, 123) dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&str, &id) + err = rows.Scan(&str, &id) + c.Assert(err, IsNil) dbt.Check(str, DeepEquals, "def") dbt.Check(id, DeepEquals, 456) dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&str, &id) + err = rows.Scan(&str, &id) + c.Assert(err, IsNil) dbt.Check(str, DeepEquals, "hig") dbt.Check(id, DeepEquals, 789) dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data")) @@ -1045,13 +1069,15 @@ func (cli *testServerClient) runTestLoadData(c *C, server *Server) { dbt.Check(c.String, Equals, "0000-00-00") dbt.Check(d.String, Equals, "0000-00-00") dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b, &c, &d) + err = rows.Scan(&a, &b, &c, &d) + dbt.Check(err, IsNil) dbt.Check(a.String, Equals, "0000-00-00") dbt.Check(b.String, Equals, "0000-00-00") dbt.Check(c.String, Equals, "0000-00-00") dbt.Check(d.String, Equals, "0000-00-00") dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b, &c, &d) + err = rows.Scan(&a, &b, &c, &d) + dbt.Check(err, IsNil) dbt.Check(a.String, Equals, "2003-03-03") dbt.Check(b.String, Equals, "2003-03-03") dbt.Check(c.String, Equals, "2003-03-03") @@ -1095,11 +1121,13 @@ func (cli *testServerClient) runTestLoadData(c *C, server *Server) { dbt.Check(a.String, Equals, "field1") dbt.Check(b.String, Equals, "field2") dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b) + err = rows.Scan(&a, &b) + c.Assert(err, IsNil) dbt.Check(a.String, Equals, `a"b`) dbt.Check(b.String, Equals, `cd"ef`) dbt.Check(rows.Next(), IsTrue, Commentf("unexpected data")) - rows.Scan(&a, &b) + err = rows.Scan(&a, &b) + c.Assert(err, IsNil) dbt.Check(a.String, Equals, `a"b`) dbt.Check(b.String, Equals, `c"d"e`) dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data")) @@ -1497,7 +1525,8 @@ func (cli *testServerClient) runTestAuth(c *C) { c.Assert(err, IsNil) _, err = db.Query("USE information_schema;") c.Assert(err, NotNil, Commentf("Wrong password should be failed")) - db.Close() + err = db.Close() + c.Assert(err, IsNil) // Test for loading active roles. db, err = sql.Open("mysql", cli.getDSN(func(config *mysql.Config) { @@ -1512,7 +1541,8 @@ func (cli *testServerClient) runTestAuth(c *C) { err = rows.Scan(&outA) c.Assert(err, IsNil) c.Assert(outA, Equals, "`authtest_r1`@`%`") - db.Close() + err = db.Close() + c.Assert(err, IsNil) // Test login use IP that not exists in mysql.user. cli.runTests(c, nil, func(dbt *DBTest) { @@ -1532,7 +1562,10 @@ func (cli *testServerClient) runTestIssue3662(c *C) { config.DBName = "non_existing_schema" })) c.Assert(err, IsNil) - defer db.Close() + go func() { + err := db.Close() + c.Assert(err, IsNil) + }() // According to documentation, "Open may just validate its arguments without // creating a connection to the database. To verify that the data source name @@ -1547,7 +1580,10 @@ func (cli *testServerClient) runTestIssue3680(c *C) { config.User = "non_existing_user" })) c.Assert(err, IsNil) - defer db.Close() + go func() { + err := db.Close() + c.Assert(err, IsNil) + }() // According to documentation, "Open may just validate its arguments without // creating a connection to the database. To verify that the data source name @@ -1591,7 +1627,10 @@ func (cli *testServerClient) runTestIssue3682(c *C) { config.DBName = "non_existing_schema" })) c.Assert(err, IsNil) - defer db.Close() + go func() { + err := db.Close() + c.Assert(err, IsNil) + }() err = db.Ping() c.Assert(err, NotNil) c.Assert(err.Error(), Equals, "Error 1045: Access denied for user 'issue3682'@'127.0.0.1' (using password: YES)") @@ -1659,7 +1698,8 @@ func (cli *testServerClient) runFailedTestMultiStatements(c *C) { var out int rows = dbt.mustQuery("SELECT value FROM test WHERE id=1;") if rows.Next() { - rows.Scan(&out) + err = rows.Scan(&out) + c.Assert(err, IsNil) c.Assert(out, Equals, 5) if rows.Next() { @@ -1683,7 +1723,8 @@ func (cli *testServerClient) runFailedTestMultiStatements(c *C) { c.Assert(count, Equals, int64(1)) rows = dbt.mustQuery("SELECT value FROM test WHERE id=1;") if rows.Next() { - rows.Scan(&out) + err = rows.Scan(&out) + c.Assert(err, IsNil) c.Assert(out, Equals, 5) if rows.Next() { @@ -1720,7 +1761,8 @@ func (cli *testServerClient) runTestMultiStatements(c *C) { var out int rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;") if rows.Next() { - rows.Scan(&out) + err = rows.Scan(&out) + c.Assert(err, IsNil) c.Assert(out, Equals, 5) if rows.Next() { @@ -1771,7 +1813,10 @@ func (cli *testServerClient) runTestTLSConnection(t *C, overrider configOverride dsn := cli.getDSN(overrider) db, err := sql.Open("mysql", dsn) t.Assert(err, IsNil) - defer db.Close() + go func() { + err := db.Close() + t.Assert(err, IsNil) + }() _, err = db.Exec("USE test") if err != nil { return errors.Annotate(err, "dsn:"+dsn) @@ -1782,7 +1827,10 @@ func (cli *testServerClient) runTestTLSConnection(t *C, overrider configOverride func (cli *testServerClient) runReloadTLS(t *C, overrider configOverrider, errorNoRollback bool) error { db, err := sql.Open("mysql", cli.getDSN(overrider)) t.Assert(err, IsNil) - defer db.Close() + go func() { + err := db.Close() + t.Assert(err, IsNil) + }() sql := "alter instance reload tls" if errorNoRollback { sql += " no rollback on error" @@ -1818,7 +1866,8 @@ func (cli *testServerClient) getMetrics(t *C) []byte { t.Assert(err, IsNil) content, err := ioutil.ReadAll(resp.Body) t.Assert(err, IsNil) - resp.Body.Close() + err = resp.Body.Close() + t.Assert(err, IsNil) return content } @@ -1842,7 +1891,10 @@ func (cli *testServerClient) waitUntilServerOnline() { time.Sleep(time.Millisecond * 10) db, err := sql.Open("mysql", cli.getDSN()) if err == nil { - db.Close() + err = db.Close() + if err != nil { + panic(err) + } break } } @@ -1854,8 +1906,14 @@ func (cli *testServerClient) waitUntilServerOnline() { // fetch http status resp, err := cli.fetchStatus("/status") if err == nil { - ioutil.ReadAll(resp.Body) - resp.Body.Close() + _, err = ioutil.ReadAll(resp.Body) + if err != nil { + panic(err) + } + err = resp.Body.Close() + if err != nil { + panic(err) + } break } time.Sleep(time.Millisecond * 10) diff --git a/server/statistics_handler_test.go b/server/statistics_handler_test.go index 3391c6e756e38..fadff5106eabb 100644 --- a/server/statistics_handler_test.go +++ b/server/statistics_handler_test.go @@ -62,7 +62,10 @@ func (ds *testDumpStatsSuite) startServer(c *C) { ds.port = getPortFromTCPAddr(server.listener.Addr()) ds.statusPort = getPortFromTCPAddr(server.statusListener.Addr()) ds.server = server - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() ds.waitUntilServerOnline() do, err := session.GetDomain(ds.store) @@ -105,7 +108,8 @@ func (ds *testDumpStatsSuite) TestDumpStatsAPI(c *C) { js, err := ioutil.ReadAll(resp.Body) c.Assert(err, IsNil) - fp.Write(js) + _, err = fp.Write(js) + c.Assert(err, IsNil) ds.checkData(c, path) ds.checkCorrelation(c) @@ -137,21 +141,26 @@ func (ds *testDumpStatsSuite) TestDumpStatsAPI(c *C) { js, err = ioutil.ReadAll(resp1.Body) c.Assert(err, IsNil) - fp1.Write(js) + _, err = fp1.Write(js) + c.Assert(err, IsNil) ds.checkData(c, path1) } func (ds *testDumpStatsSuite) prepareData(c *C) { db, err := sql.Open("mysql", ds.getDSN()) c.Assert(err, IsNil, Commentf("Error connecting")) - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() dbt := &DBTest{c, db} h := ds.sh.do.StatsHandle() dbt.mustExec("create database tidb") dbt.mustExec("use tidb") dbt.mustExec("create table test (a int, b varchar(20))") - h.HandleDDLEvent(<-h.DDLEventCh()) + err = h.HandleDDLEvent(<-h.DDLEventCh()) + c.Assert(err, IsNil) dbt.mustExec("create index c on test (a, b)") dbt.mustExec("insert test values (1, 's')") c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) @@ -165,7 +174,10 @@ func (ds *testDumpStatsSuite) prepareData(c *C) { func (ds *testDumpStatsSuite) prepare4DumpHistoryStats(c *C) { db, err := sql.Open("mysql", ds.getDSN()) c.Assert(err, IsNil, Commentf("Error connecting")) - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() dbt := &DBTest{c, db} @@ -185,13 +197,17 @@ func (ds *testDumpStatsSuite) checkCorrelation(c *C) { db, err := sql.Open("mysql", ds.getDSN()) c.Assert(err, IsNil, Commentf("Error connecting")) dbt := &DBTest{c, db} - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() dbt.mustExec("use tidb") rows := dbt.mustQuery("SELECT tidb_table_id FROM information_schema.tables WHERE table_name = 'test' AND table_schema = 'tidb'") var tableID int64 if rows.Next() { - rows.Scan(&tableID) + err = rows.Scan(&tableID) + c.Assert(err, IsNil) dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data")) } else { dbt.Error("no data") @@ -200,7 +216,8 @@ func (ds *testDumpStatsSuite) checkCorrelation(c *C) { rows = dbt.mustQuery("select correlation from mysql.stats_histograms where table_id = ? and hist_id = 1 and is_index = 0", tableID) if rows.Next() { var corr float64 - rows.Scan(&corr) + err = rows.Scan(&corr) + c.Assert(err, IsNil) dbt.Check(corr, Equals, float64(1)) dbt.Check(rows.Next(), IsFalse, Commentf("unexpected data")) } else { @@ -216,7 +233,10 @@ func (ds *testDumpStatsSuite) checkData(c *C, path string) { })) c.Assert(err, IsNil, Commentf("Error connecting")) dbt := &DBTest{c, db} - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() dbt.mustExec("use tidb") dbt.mustExec("drop stats test") @@ -239,7 +259,10 @@ func (ds *testDumpStatsSuite) checkData(c *C, path string) { func (ds *testDumpStatsSuite) clearData(c *C, path string) { db, err := sql.Open("mysql", ds.getDSN()) c.Assert(err, IsNil, Commentf("Error connecting")) - defer db.Close() + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() dbt := &DBTest{c, db} dbt.mustExec("drop database tidb") diff --git a/server/tidb_test.go b/server/tidb_test.go index 2a727d2d30d3a..19fab0036f51f 100644 --- a/server/tidb_test.go +++ b/server/tidb_test.go @@ -98,7 +98,10 @@ func (ts *tidbTestSuiteBase) SetUpSuite(c *C) { ts.port = getPortFromTCPAddr(server.listener.Addr()) ts.statusPort = getPortFromTCPAddr(server.statusListener.Addr()) ts.server = server - go ts.server.Run() + go func() { + err := ts.server.Run() + c.Assert(err, IsNil) + }() ts.waitUntilServerOnline() } @@ -258,7 +261,10 @@ func (ts *tidbTestSuite) TestStatusAPIWithTLS(c *C) { c.Assert(err, IsNil) cli.port = getPortFromTCPAddr(server.listener.Addr()) cli.statusPort = getPortFromTCPAddr(server.statusListener.Addr()) - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() time.Sleep(time.Millisecond * 100) // https connection should work. @@ -307,7 +313,10 @@ func (ts *tidbTestSuite) TestStatusAPIWithTLSCNCheck(c *C) { c.Assert(err, IsNil) cli.port = getPortFromTCPAddr(server.listener.Addr()) cli.statusPort = getPortFromTCPAddr(server.statusListener.Addr()) - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() time.Sleep(time.Millisecond * 100) hc := newTLSHttpClient(c, caPath, @@ -358,7 +367,10 @@ func (ts *tidbTestSuite) TestSocketForwarding(c *C) { server, err := NewServer(cfg, ts.tidbdrv) c.Assert(err, IsNil) cli.port = getPortFromTCPAddr(server.listener.Addr()) - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() time.Sleep(time.Millisecond * 100) defer server.Close() @@ -381,7 +393,10 @@ func (ts *tidbTestSuite) TestSocket(c *C) { server, err := NewServer(cfg, ts.tidbdrv) c.Assert(err, IsNil) - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() time.Sleep(time.Millisecond * 100) defer server.Close() @@ -449,15 +464,27 @@ func generateCert(sn int, commonName string, parentCert *x509.Certificate, paren if err != nil { return nil, nil, errors.Trace(err) } - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() + err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + if err != nil { + return nil, nil, errors.Trace(err) + } + err = certOut.Close() + if err != nil { + return nil, nil, errors.Trace(err) + } keyOut, err := os.OpenFile(outKeyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { return nil, nil, errors.Trace(err) } - pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}) - keyOut.Close() + err = pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}) + if err != nil { + return nil, nil, errors.Trace(err) + } + err = keyOut.Close() + if err != nil { + return nil, nil, errors.Trace(err) + } return cert, privateKey, nil } @@ -485,8 +512,7 @@ func registerTLSConfig(configName string, caCertPath string, clientCertPath stri ServerName: serverName, InsecureSkipVerify: !verifyServer, } - mysql.RegisterTLSConfig(configName, tlsConfig) - return nil + return mysql.RegisterTLSConfig(configName, tlsConfig) } func (ts *tidbTestSuite) TestSystemTimeZone(c *C) { @@ -514,12 +540,18 @@ func (ts *tidbTestSerialSuite) TestTLS(c *C) { c.Assert(err, IsNil) defer func() { - os.Remove("/tmp/ca-key.pem") - os.Remove("/tmp/ca-cert.pem") - os.Remove("/tmp/server-key.pem") - os.Remove("/tmp/server-cert.pem") - os.Remove("/tmp/client-key.pem") - os.Remove("/tmp/client-cert.pem") + err := os.Remove("/tmp/ca-key.pem") + c.Assert(err, IsNil) + err = os.Remove("/tmp/ca-cert.pem") + c.Assert(err, IsNil) + err = os.Remove("/tmp/server-key.pem") + c.Assert(err, IsNil) + err = os.Remove("/tmp/server-cert.pem") + c.Assert(err, IsNil) + err = os.Remove("/tmp/client-key.pem") + c.Assert(err, IsNil) + err = os.Remove("/tmp/client-cert.pem") + c.Assert(err, IsNil) }() // Start the server without TLS. @@ -533,7 +565,10 @@ func (ts *tidbTestSerialSuite) TestTLS(c *C) { server, err := NewServer(cfg, ts.tidbdrv) c.Assert(err, IsNil) cli.port = getPortFromTCPAddr(server.listener.Addr()) - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() time.Sleep(time.Millisecond * 100) err = cli.runTestTLSConnection(c, connOverrider) // We should get ErrNoTLS. c.Assert(err, NotNil) @@ -555,7 +590,10 @@ func (ts *tidbTestSerialSuite) TestTLS(c *C) { server, err = NewServer(cfg, ts.tidbdrv) c.Assert(err, IsNil) cli.port = getPortFromTCPAddr(server.listener.Addr()) - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() time.Sleep(time.Millisecond * 100) err = cli.runTestTLSConnection(c, connOverrider) // We should establish connection successfully. c.Assert(err, IsNil) @@ -582,7 +620,10 @@ func (ts *tidbTestSerialSuite) TestTLS(c *C) { server, err = NewServer(cfg, ts.tidbdrv) c.Assert(err, IsNil) cli.port = getPortFromTCPAddr(server.listener.Addr()) - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() time.Sleep(time.Millisecond * 100) // The client does not provide a certificate, the connection should succeed. err = cli.runTestTLSConnection(c, nil) @@ -641,7 +682,10 @@ func (ts *tidbTestSerialSuite) TestReloadTLS(c *C) { server, err := NewServer(cfg, ts.tidbdrv) c.Assert(err, IsNil) cli.port = getPortFromTCPAddr(server.listener.Addr()) - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() time.Sleep(time.Millisecond * 100) // The client provides a valid certificate. connOverrider := func(config *mysql.Config) { @@ -660,8 +704,10 @@ func (ts *tidbTestSerialSuite) TestReloadTLS(c *C) { c.NotAfter = time.Now().Add(1 * time.Hour).UTC() }) c.Assert(err, IsNil) - os.Rename("/tmp/server-key-reload2.pem", "/tmp/server-key-reload.pem") - os.Rename("/tmp/server-cert-reload2.pem", "/tmp/server-cert-reload.pem") + err = os.Rename("/tmp/server-key-reload2.pem", "/tmp/server-key-reload.pem") + c.Assert(err, IsNil) + err = os.Rename("/tmp/server-cert-reload2.pem", "/tmp/server-cert-reload.pem") + c.Assert(err, IsNil) connOverrider = func(config *mysql.Config) { config.TLSConfig = "skip-verify" } @@ -685,8 +731,10 @@ func (ts *tidbTestSerialSuite) TestReloadTLS(c *C) { c.NotAfter = c.NotBefore.Add(1 * time.Hour).UTC() }) c.Assert(err, IsNil) - os.Rename("/tmp/server-key-reload3.pem", "/tmp/server-key-reload.pem") - os.Rename("/tmp/server-cert-reload3.pem", "/tmp/server-cert-reload.pem") + err = os.Rename("/tmp/server-key-reload3.pem", "/tmp/server-key-reload.pem") + c.Assert(err, IsNil) + err = os.Rename("/tmp/server-cert-reload3.pem", "/tmp/server-cert-reload.pem") + c.Assert(err, IsNil) connOverrider = func(config *mysql.Config) { config.TLSConfig = "skip-verify" } @@ -745,7 +793,10 @@ func (ts *tidbTestSerialSuite) TestErrorNoRollback(c *C) { server, err := NewServer(cfg, ts.tidbdrv) c.Assert(err, IsNil) cli.port = getPortFromTCPAddr(server.listener.Addr()) - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() time.Sleep(time.Millisecond * 100) connOverrider := func(config *mysql.Config) { config.TLSConfig = "client-cert-rollback-test" @@ -1008,7 +1059,10 @@ func (ts *tidbTestSuite) TestGracefulShutdown(c *C) { c.Assert(server, NotNil) cli.port = getPortFromTCPAddr(server.listener.Addr()) cli.statusPort = getPortFromTCPAddr(server.statusListener.Addr()) - go server.Run() + go func() { + err := server.Run() + c.Assert(err, IsNil) + }() time.Sleep(time.Millisecond * 100) _, err = cli.fetchStatus("/status") // server is up From da0b6eb20c950d623f0f5d1a4a10e0d6ed613682 Mon Sep 17 00:00:00 2001 From: Jack Yu Date: Thu, 18 Mar 2021 11:30:55 +0800 Subject: [PATCH 16/44] *: fix processlist.txnstart when tidb_snapshot is set (#23381) --- session/session.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/session/session.go b/session/session.go index e37d3bcbac962..5c0f4ea71d4fd 100644 --- a/session/session.go +++ b/session/session.go @@ -1089,6 +1089,11 @@ func (s *session) SetProcessInfo(sql string, t time.Time, command byte, maxExecu if command != mysql.ComSleep || s.GetSessionVars().InTxn() { curTxnStartTS = s.sessionVars.TxnCtx.StartTS } + // Set curTxnStartTS to SnapshotTS directly when the session is trying to historic read. + // It will avoid the session meet GC lifetime too short error. + if s.GetSessionVars().SnapshotTS != 0 { + curTxnStartTS = s.GetSessionVars().SnapshotTS + } p := s.currentPlan if explain, ok := p.(*plannercore.Explain); ok && explain.Analyze && explain.TargetPlan != nil { p = explain.TargetPlan From 1ab3b489f468ec4f85716d178acbd6dd625d1fbb Mon Sep 17 00:00:00 2001 From: cfzjywxk Date: Thu, 18 Mar 2021 12:52:55 +0800 Subject: [PATCH 17/44] txn: fix the ttlmanager and cleanup logic for 1pc and async commit (#23342) --- session/pessimistic_test.go | 32 ++++++++++++++++++++++++++++++++ store/tikv/2pc.go | 28 +++++++++++++++++++++------- store/tikv/lock_resolver.go | 3 ++- store/tikv/txn.go | 7 +------ 4 files changed, 56 insertions(+), 14 deletions(-) diff --git a/session/pessimistic_test.go b/session/pessimistic_test.go index 8cafb18621eb2..a49b01c348a3a 100644 --- a/session/pessimistic_test.go +++ b/session/pessimistic_test.go @@ -2458,3 +2458,35 @@ func (s *testPessimisticSuite) TestIssue21498(c *C) { tk.MustQuery("select * from t1").Check(testkit.Rows("5 12 100")) } } + +func (s *testPessimisticSuite) TestAsyncCommitCalTSFail(c *C) { + atomic.StoreUint64(&tikv.ManagedLockTTL, 5000) + defer func() { + atomic.StoreUint64(&tikv.ManagedLockTTL, 300) + }() + defer config.RestoreFunc()() + config.UpdateGlobal(func(conf *config.Config) { + conf.TiKVClient.AsyncCommit.SafeWindow = time.Second + conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0 + }) + + tk := s.newAsyncCommitTestKitWithInit(c) + tk2 := s.newAsyncCommitTestKitWithInit(c) + + tk.MustExec("drop table if exists tk") + tk.MustExec("create table tk (c1 int primary key, c2 int)") + tk.MustExec("insert into tk values (1, 1)") + + tk.MustExec("set tidb_enable_1pc = true") + tk.MustExec("begin pessimistic") + tk.MustQuery("select * from tk for update").Check(testkit.Rows("1 1")) + c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/failCheckSchemaValid", "return"), IsNil) + c.Assert(tk.ExecToErr("commit"), NotNil) + c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/failCheckSchemaValid"), IsNil) + + // The lock should not be blocked. + tk2.MustExec("set innodb_lock_wait_timeout = 5") + tk2.MustExec("begin pessimistic") + tk2.MustExec("update tk set c2 = c2 + 1") + tk2.MustExec("commit") +} diff --git a/store/tikv/2pc.go b/store/tikv/2pc.go index c2e75d436bf76..1ee8398576a2b 100644 --- a/store/tikv/2pc.go +++ b/store/tikv/2pc.go @@ -899,15 +899,21 @@ func (c *twoPhaseCommitter) cleanup(ctx context.Context) { }) cleanupKeysCtx := context.WithValue(context.Background(), TxnStartKey, ctx.Value(TxnStartKey)) - err := c.cleanupMutations(NewBackofferWithVars(cleanupKeysCtx, cleanupMaxBackoff, c.txn.vars), c.mutations) + var err error + if !c.isOnePC() { + err = c.cleanupMutations(NewBackofferWithVars(cleanupKeysCtx, cleanupMaxBackoff, c.txn.vars), c.mutations) + } else if c.isPessimistic { + err = c.pessimisticRollbackMutations(NewBackofferWithVars(cleanupKeysCtx, cleanupMaxBackoff, c.txn.vars), c.mutations) + } + if err != nil { metrics.SecondaryLockCleanupFailureCounterRollback.Inc() - logutil.Logger(ctx).Info("2PC cleanup failed", - zap.Error(err), - zap.Uint64("txnStartTS", c.startTS)) + logutil.Logger(ctx).Info("2PC cleanup failed", zap.Error(err), zap.Uint64("txnStartTS", c.startTS), + zap.Bool("isPessimistic", c.isPessimistic), zap.Bool("isOnePC", c.isOnePC())) } else { logutil.Logger(ctx).Info("2PC clean up done", - zap.Uint64("txnStartTS", c.startTS)) + zap.Uint64("txnStartTS", c.startTS), zap.Bool("isPessimistic", c.isPessimistic), + zap.Bool("isOnePC", c.isOnePC())) } c.cleanWg.Done() }() @@ -920,6 +926,9 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) { if c.isOnePC() { // The error means the 1PC transaction failed. if err != nil { + if c.getUndeterminedErr() == nil { + c.cleanup(ctx) + } metrics.OnePCTxnCounterError.Inc() } else { metrics.OnePCTxnCounterOk.Inc() @@ -1162,7 +1171,6 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) { zap.Uint64("startTS", c.startTS), zap.Uint64("commitTS", c.commitTS), zap.Uint64("sessionID", c.sessionID)) go func() { - defer c.ttlManager.close() failpoint.Inject("asyncCommitDoNothing", func() { failpoint.Return() }) @@ -1376,6 +1384,12 @@ func (c *twoPhaseCommitter) getCommitTS(ctx context.Context, commitDetail *execd // this transaction using the related schema changes. func (c *twoPhaseCommitter) checkSchemaValid(ctx context.Context, checkTS uint64, startInfoSchema SchemaVer, tryAmend bool) (*RelatedSchemaChange, bool, error) { + failpoint.Inject("failCheckSchemaValid", func() { + logutil.Logger(ctx).Info("[failpoint] injected fail schema check", + zap.Uint64("txnStartTS", c.startTS)) + err := errors.Errorf("mock check schema valid failure") + failpoint.Return(nil, false, err) + }) checker, ok := c.txn.us.GetOption(kv.SchemaChecker).(schemaLeaseChecker) if !ok { if c.sessionID > 0 { @@ -1411,7 +1425,7 @@ func (c *twoPhaseCommitter) calculateMaxCommitTS(ctx context.Context) error { currentTS := oracle.EncodeTSO(int64(time.Since(c.txn.startTime)/time.Millisecond)) + c.startTS _, _, err := c.checkSchemaValid(ctx, currentTS, c.txn.txnInfoSchema, true) if err != nil { - logutil.Logger(ctx).Error("Schema changed for async commit txn", + logutil.Logger(ctx).Info("Schema changed for async commit txn", zap.Error(err), zap.Uint64("startTS", c.startTS)) return errors.Trace(err) diff --git a/store/tikv/lock_resolver.go b/store/tikv/lock_resolver.go index 5279b74beeba7..af40548ebf970 100644 --- a/store/tikv/lock_resolver.go +++ b/store/tikv/lock_resolver.go @@ -171,7 +171,8 @@ func (l *Lock) String() string { prettyWriteKey(buf, l.Key) buf.WriteString(", primary: ") prettyWriteKey(buf, l.Primary) - return fmt.Sprintf("%s, txnStartTS: %d, lockForUpdateTS:%d, minCommitTs:%d, ttl: %d, type: %s", buf.String(), l.TxnID, l.LockForUpdateTS, l.MinCommitTS, l.TTL, l.LockType) + return fmt.Sprintf("%s, txnStartTS: %d, lockForUpdateTS:%d, minCommitTs:%d, ttl: %d, type: %s, UseAsyncCommit: %t", + buf.String(), l.TxnID, l.LockForUpdateTS, l.MinCommitTS, l.TTL, l.LockType, l.UseAsyncCommit) } // NewLock creates a new *Lock. diff --git a/store/tikv/txn.go b/store/tikv/txn.go index d0d4b0703880b..90393147c1059 100644 --- a/store/tikv/txn.go +++ b/store/tikv/txn.go @@ -248,12 +248,7 @@ func (txn *KVTxn) Commit(ctx context.Context) error { } txn.committer = committer } - defer func() { - // For async commit transactions, the ttl manager will be closed in the asynchronous commit goroutine. - if !committer.isAsyncCommit() { - committer.ttlManager.close() - } - }() + defer committer.ttlManager.close() initRegion := trace.StartRegion(ctx, "InitKeys") err = committer.initKeysAndMutations() From 389bc5fe608b6e35db1c8e4f05e40809917843ab Mon Sep 17 00:00:00 2001 From: Kenan Yao Date: Thu, 18 Mar 2021 14:03:36 +0800 Subject: [PATCH 18/44] sessionctx: hide extended stats variable in SHOW VARIABLES temporarily (#23345) --- sessionctx/variable/tidb_vars.go | 1 + statistics/handle/handle_test.go | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 5400eac9f944d..8acc5841bd1a6 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -713,6 +713,7 @@ var FeatureSwitchVariables = []string{ TiDBAnalyzeVersion, TiDBPartitionPruneMode, TiDBIntPrimaryKeyDefaultAsClustered, + TiDBEnableExtendedStats, } // FilterImplicitFeatureSwitch is used to filter result of show variables, these switches should be turn blind to users. diff --git a/statistics/handle/handle_test.go b/statistics/handle/handle_test.go index 20d837c3a956c..b4a2baca5da94 100644 --- a/statistics/handle/handle_test.go +++ b/statistics/handle/handle_test.go @@ -2091,3 +2091,14 @@ func (s *testStatsSuite) TestExtendedStatsPartitionTable(c *C) { err = tk.ExecToErr("alter table t2 add stats_extended s1 correlation(b,c)") c.Assert(err.Error(), Equals, "Extended statistics on partitioned tables are not supported now") } + +func (s *testStatsSuite) TestHideExtendedStatsSwitch(c *C) { + // NOTICE: remove this test when this extended-stats reaches GA state. + defer cleanEnv(c, s.store, s.do) + tk := testkit.NewTestKit(c, s.store) + rs := tk.MustQuery("show variables").Rows() + for _, r := range rs { + c.Assert(strings.ToLower(r[0].(string)), Not(Equals), "tidb_enable_extended_stats") + } + tk.MustQuery("show variables like 'tidb_enable_extended_stats'").Check(testkit.Rows()) +} From 9f8888755c30a2c34514428fec0c20762ca44f3f Mon Sep 17 00:00:00 2001 From: qupeng Date: Thu, 18 Mar 2021 15:01:36 +0800 Subject: [PATCH 19/44] gc: central GC mode is deprecated (#23389) --- store/gcworker/gc_worker.go | 44 ++++++++++++-------------------- store/gcworker/gc_worker_test.go | 26 ++++++++----------- 2 files changed, 27 insertions(+), 43 deletions(-) diff --git a/store/gcworker/gc_worker.go b/store/gcworker/gc_worker.go index b85095850d183..130d4908578c2 100644 --- a/store/gcworker/gc_worker.go +++ b/store/gcworker/gc_worker.go @@ -636,16 +636,7 @@ func (w *GCWorker) runGCJob(ctx context.Context, safePoint uint64, concurrency i return errors.Trace(err) } - useDistributedGC, err := w.checkUseDistributedGC() - if err != nil { - logutil.Logger(ctx).Error("[gc worker] failed to load gc mode, fall back to central mode.", - zap.String("uuid", w.uuid), - zap.Error(err)) - metrics.GCJobFailureCounter.WithLabelValues("check_gc_mode").Inc() - useDistributedGC = false - } - - if useDistributedGC { + if w.checkUseDistributedGC() { err = w.uploadSafePointToPD(ctx, safePoint) if err != nil { logutil.Logger(ctx).Error("[gc worker] failed to upload safe point to PD", @@ -949,27 +940,24 @@ func (w *GCWorker) loadGCConcurrencyWithDefault() (int, error) { return jobConcurrency, nil } -func (w *GCWorker) checkUseDistributedGC() (bool, error) { - str, err := w.loadValueFromSysTable(gcModeKey) - if err != nil { - return false, errors.Trace(err) - } - if str == "" { +// Central mode is deprecated in v5.0. This function will always return true. +func (w *GCWorker) checkUseDistributedGC() bool { + mode, err := w.loadValueFromSysTable(gcModeKey) + if err == nil && mode == "" { err = w.saveValueToSysTable(gcModeKey, gcModeDefault) - if err != nil { - return false, errors.Trace(err) - } - str = gcModeDefault } - if strings.EqualFold(str, gcModeDistributed) { - return true, nil - } - if strings.EqualFold(str, gcModeCentral) { - return false, nil + if err != nil { + logutil.BgLogger().Error("[gc worker] failed to load gc mode, fall back to distributed mode", + zap.String("uuid", w.uuid), + zap.Error(err)) + metrics.GCJobFailureCounter.WithLabelValues("check_gc_mode").Inc() + } else if strings.EqualFold(mode, gcModeCentral) { + logutil.BgLogger().Warn("[gc worker] distributed mode will be used as central mode is deprecated") + } else if !strings.EqualFold(mode, gcModeDistributed) { + logutil.BgLogger().Warn("[gc worker] distributed mode will be used", + zap.String("invalid gc mode", mode)) } - logutil.BgLogger().Warn("[gc worker] distributed mode will be used", - zap.String("invalid gc mode", str)) - return true, nil + return true } func (w *GCWorker) checkUsePhysicalScanLock() (bool, error) { diff --git a/store/gcworker/gc_worker_test.go b/store/gcworker/gc_worker_test.go index c4006fa3f31aa..8273801102a29 100644 --- a/store/gcworker/gc_worker_test.go +++ b/store/gcworker/gc_worker_test.go @@ -487,30 +487,28 @@ func (s *testGCWorkerSuite) TestDoGC(c *C) { } func (s *testGCWorkerSuite) TestCheckGCMode(c *C) { - useDistributedGC, err := s.gcWorker.checkUseDistributedGC() - c.Assert(err, IsNil) + useDistributedGC := s.gcWorker.checkUseDistributedGC() c.Assert(useDistributedGC, Equals, true) // Now the row must be set to the default value. str, err := s.gcWorker.loadValueFromSysTable(gcModeKey) c.Assert(err, IsNil) c.Assert(str, Equals, gcModeDistributed) + // Central mode is deprecated in v5.0. err = s.gcWorker.saveValueToSysTable(gcModeKey, gcModeCentral) c.Assert(err, IsNil) - useDistributedGC, err = s.gcWorker.checkUseDistributedGC() + useDistributedGC = s.gcWorker.checkUseDistributedGC() c.Assert(err, IsNil) - c.Assert(useDistributedGC, Equals, false) + c.Assert(useDistributedGC, Equals, true) err = s.gcWorker.saveValueToSysTable(gcModeKey, gcModeDistributed) c.Assert(err, IsNil) - useDistributedGC, err = s.gcWorker.checkUseDistributedGC() - c.Assert(err, IsNil) + useDistributedGC = s.gcWorker.checkUseDistributedGC() c.Assert(useDistributedGC, Equals, true) err = s.gcWorker.saveValueToSysTable(gcModeKey, "invalid_mode") c.Assert(err, IsNil) - useDistributedGC, err = s.gcWorker.checkUseDistributedGC() - c.Assert(err, IsNil) + useDistributedGC = s.gcWorker.checkUseDistributedGC() c.Assert(useDistributedGC, Equals, true) } @@ -987,11 +985,10 @@ func (s *testGCWorkerSuite) TestRunGCJob(c *C) { gcSafePointCacheInterval = 0 // Test distributed mode - useDistributedGC, err := s.gcWorker.checkUseDistributedGC() - c.Assert(err, IsNil) + useDistributedGC := s.gcWorker.checkUseDistributedGC() c.Assert(useDistributedGC, IsTrue) safePoint := s.mustAllocTs(c) - err = s.gcWorker.runGCJob(context.Background(), safePoint, 1) + err := s.gcWorker.runGCJob(context.Background(), safePoint, 1) c.Assert(err, IsNil) pdSafePoint := s.mustGetSafePointFromPd(c) @@ -1004,12 +1001,11 @@ func (s *testGCWorkerSuite) TestRunGCJob(c *C) { err = s.gcWorker.runGCJob(context.Background(), safePoint-1, 1) c.Assert(err, NotNil) - // Test central mode + // Central mode is deprecated in v5.0, fallback to distributed mode if it's set. err = s.gcWorker.saveValueToSysTable(gcModeKey, gcModeCentral) c.Assert(err, IsNil) - useDistributedGC, err = s.gcWorker.checkUseDistributedGC() - c.Assert(err, IsNil) - c.Assert(useDistributedGC, IsFalse) + useDistributedGC = s.gcWorker.checkUseDistributedGC() + c.Assert(useDistributedGC, IsTrue) p := s.createGCProbe(c, "k1") safePoint = s.mustAllocTs(c) From 411dba25b165f1d9baa6ab11895788b530dd7984 Mon Sep 17 00:00:00 2001 From: tangenta Date: Thu, 18 Mar 2021 15:17:36 +0800 Subject: [PATCH 20/44] *: deprecate alter-primary-key configuration (#23270) --- cmd/explaintest/r/clustered_index.result | 22 ++- cmd/explaintest/t/clustered_index.test | 22 ++- config/config.go | 1 + config/config.toml.example | 5 - config/config_test.go | 2 - ddl/db_change_test.go | 18 +- ddl/db_integration_test.go | 32 ++-- ddl/db_test.go | 48 ++---- ddl/ddl_api.go | 73 ++++---- ddl/ddl_test.go | 2 - ddl/serial_test.go | 203 ++++++++++------------- executor/analyze_test.go | 2 +- executor/ddl_test.go | 43 +++-- executor/infoschema_reader.go | 2 +- executor/infoschema_reader_test.go | 4 +- executor/insert_test.go | 24 +-- executor/join_test.go | 6 +- executor/seqtest/seq_executor_test.go | 16 +- executor/show_test.go | 6 +- expression/integration_test.go | 5 +- infoschema/tables_test.go | 7 +- meta/autoid/errors.go | 4 +- session/clustered_index_test.go | 77 +++++---- sessionctx/binloginfo/binloginfo_test.go | 13 +- sessionctx/variable/sysvar.go | 2 +- statistics/selectivity_test.go | 5 +- util/testutil/testutil.go | 29 +--- 27 files changed, 264 insertions(+), 409 deletions(-) diff --git a/cmd/explaintest/r/clustered_index.result b/cmd/explaintest/r/clustered_index.result index 2dded83f272bd..5fc18247bdad4 100644 --- a/cmd/explaintest/r/clustered_index.result +++ b/cmd/explaintest/r/clustered_index.result @@ -3,24 +3,22 @@ create database with_cluster_index; drop database if exists wout_cluster_index; create database wout_cluster_index; use with_cluster_index; -set @@tidb_enable_clustered_index = 1; -create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) , key idx_1 ( col_3 ) , unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ; -create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) , unique key idx_7 ( col_5 ) ) ; -create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) , key idx_9 ( col_11 ) ) ; -create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) , key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ; -create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) , key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ; +create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) clustered, key idx_1 ( col_3 ), unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ; +create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) clustered, unique key idx_7 ( col_5 ) ) ; +create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) clustered, key idx_9 ( col_11 ) ) ; +create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) clustered, key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ; +create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) clustered, key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ; load stats 's/with_cluster_index_tbl_0.json'; load stats 's/with_cluster_index_tbl_1.json'; load stats 's/with_cluster_index_tbl_2.json'; load stats 's/with_cluster_index_tbl_3.json'; load stats 's/with_cluster_index_tbl_4.json'; use wout_cluster_index; -set @@tidb_enable_clustered_index = 0; -create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) , key idx_1 ( col_3 ) , unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ; -create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) , unique key idx_7 ( col_5 ) ) ; -create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) , key idx_9 ( col_11 ) ) ; -create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) , key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ; -create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) , key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ; +create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) nonclustered, key idx_1 ( col_3 ) , unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ; +create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) nonclustered, unique key idx_7 ( col_5 ) ) ; +create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) nonclustered, key idx_9 ( col_11 ) ) ; +create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) nonclustered, key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ; +create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) nonclustered, key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ; load stats 's/wout_cluster_index_tbl_0.json'; load stats 's/wout_cluster_index_tbl_1.json'; load stats 's/wout_cluster_index_tbl_2.json'; diff --git a/cmd/explaintest/t/clustered_index.test b/cmd/explaintest/t/clustered_index.test index 7c40b6ae6523d..9415781f7caf1 100644 --- a/cmd/explaintest/t/clustered_index.test +++ b/cmd/explaintest/t/clustered_index.test @@ -4,12 +4,11 @@ drop database if exists wout_cluster_index; create database wout_cluster_index; use with_cluster_index; -set @@tidb_enable_clustered_index = 1; -create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) , key idx_1 ( col_3 ) , unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ; -create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) , unique key idx_7 ( col_5 ) ) ; -create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) , key idx_9 ( col_11 ) ) ; -create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) , key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ; -create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) , key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ; +create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) clustered, key idx_1 ( col_3 ), unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ; +create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) clustered, unique key idx_7 ( col_5 ) ) ; +create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) clustered, key idx_9 ( col_11 ) ) ; +create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) clustered, key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ; +create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) clustered, key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ; load stats 's/with_cluster_index_tbl_0.json'; load stats 's/with_cluster_index_tbl_1.json'; load stats 's/with_cluster_index_tbl_2.json'; @@ -17,12 +16,11 @@ load stats 's/with_cluster_index_tbl_3.json'; load stats 's/with_cluster_index_tbl_4.json'; use wout_cluster_index; -set @@tidb_enable_clustered_index = 0; -create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) , key idx_1 ( col_3 ) , unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ; -create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) , unique key idx_7 ( col_5 ) ) ; -create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) , key idx_9 ( col_11 ) ) ; -create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) , key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ; -create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) , key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ; +create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) nonclustered, key idx_1 ( col_3 ) , unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ; +create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) nonclustered, unique key idx_7 ( col_5 ) ) ; +create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) nonclustered, key idx_9 ( col_11 ) ) ; +create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) nonclustered, key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ; +create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) nonclustered, key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ; load stats 's/wout_cluster_index_tbl_0.json'; load stats 's/wout_cluster_index_tbl_1.json'; load stats 's/wout_cluster_index_tbl_2.json'; diff --git a/config/config.go b/config/config.go index 8e1a4fe1d4214..123287b505dda 100644 --- a/config/config.go +++ b/config/config.go @@ -707,6 +707,7 @@ var deprecatedConfig = map[string]struct{}{ "experimental.allow-auto-random": {}, "enable-redact-log": {}, // use variable tidb_redact_log instead "tikv-client.copr-cache.enable": {}, + "alter-primary-key": {}, // use NONCLUSTERED keyword instead } func isAllDeprecatedConfigItems(items []string) bool { diff --git a/config/config.toml.example b/config/config.toml.example index 60e86c847f205..227836889f597 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -86,11 +86,6 @@ delay-clean-table-lock = 0 # Maximum number of the splitting region, which is used by the split region statement. split-region-max-num = 1000 -# alter-primary-key is used to control alter primary key feature. Default is false, indicate the alter primary key feature is disabled. -# If it is true, we can add the primary key by "alter table". However, if a table already exists before the switch is turned true and the data type of its primary key column is an integer, -# the primary key cannot be dropped. -alter-primary-key = false - # server-version is used to change the version string of TiDB in the following scenarios: # 1. the server version returned by builtin-function `VERSION()`. # 2. the server version filled in handshake packets of MySQL Connection Protocol, see https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake for more details. diff --git a/config/config_test.go b/config/config_test.go index 39bc1448b61a5..1a4c7762597fc 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -184,7 +184,6 @@ unrecognized-option-test = true _, err = f.WriteString(` token-limit = 0 enable-table-lock = true -alter-primary-key = true delay-clean-table-lock = 5 split-region-max-num=10000 enable-batch-dml = true @@ -243,7 +242,6 @@ spilled-file-encryption-method = "plaintext" // Test that the value will be overwritten by the config file. c.Assert(conf.Performance.TxnTotalSizeLimit, Equals, uint64(2000)) - c.Assert(conf.AlterPrimaryKey, Equals, true) c.Assert(conf.TiKVClient.CommitTimeout, Equals, "41s") c.Assert(conf.TiKVClient.AsyncCommit.KeysLimit, Equals, uint(123)) diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go index fe01b8bdf3f16..99a2de6ee3ef5 100644 --- a/ddl/db_change_test.go +++ b/ddl/db_change_test.go @@ -580,16 +580,11 @@ func (s *serialTestStateChangeSuite) TestWriteReorgForModifyColumnWithPKIsHandle s.se.GetSessionVars().EnableChangeColumnType = true defer func() { s.se.GetSessionVars().EnableChangeColumnType = enableChangeColumnType - config.RestoreFunc()() }() - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = false - }) - _, err := s.se.Execute(context.Background(), "use test_db_state") c.Assert(err, IsNil) - _, err = s.se.Execute(context.Background(), `create table tt (a int not null, b int default 1, c int not null default 0, unique index idx(c), primary key idx1(a), index idx2(a, c))`) + _, err = s.se.Execute(context.Background(), `create table tt (a int not null, b int default 1, c int not null default 0, unique index idx(c), primary key idx1(a) clustered, index idx2(a, c))`) c.Assert(err, IsNil) _, err = s.se.Execute(context.Background(), "insert into tt (a, c) values(-1, -11)") c.Assert(err, IsNil) @@ -935,7 +930,7 @@ func (s *testStateChangeSuiteBase) CheckResult(tk *testkit.TestKit, sql string, } func (s *testStateChangeSuite) TestShowIndex(c *C) { - _, err := s.se.Execute(context.Background(), `create table t(c1 int primary key, c2 int)`) + _, err := s.se.Execute(context.Background(), `create table t(c1 int primary key nonclustered, c2 int)`) c.Assert(err, IsNil) defer func() { _, err := s.se.Execute(context.Background(), "drop table t") @@ -1062,12 +1057,7 @@ func (s *testStateChangeSuite) TestParallelAlterModifyColumn(c *C) { s.testControlParallelExecSQL(c, sql, sql, f) } -func (s *serialTestStateChangeSuite) TestParallelAlterModifyColumnAndAddPK(c *C) { - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = true - }) - +func (s *testStateChangeSuite) TestParallelAlterModifyColumnAndAddPK(c *C) { _, err := s.se.Execute(context.Background(), "set global tidb_enable_change_column_type = 1") c.Assert(err, IsNil) defer func() { @@ -1076,7 +1066,7 @@ func (s *serialTestStateChangeSuite) TestParallelAlterModifyColumnAndAddPK(c *C) }() domain.GetDomain(s.se).GetGlobalVarsCache().Disable() - sql1 := "ALTER TABLE t ADD PRIMARY KEY (b);" + sql1 := "ALTER TABLE t ADD PRIMARY KEY (b) NONCLUSTERED;" sql2 := "ALTER TABLE t MODIFY COLUMN b tinyint;" f := func(c *C, err1, err2 error) { c.Assert(err1, IsNil) diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index 8aa31771e7f8f..7e27382395dd3 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -127,6 +127,7 @@ type testIntegrationSuite5 struct{ *testIntegrationSuite } type testIntegrationSuite6 struct{ *testIntegrationSuite } type testIntegrationSuite7 struct{ *testIntegrationSuite } type testIntegrationSuite8 struct{ *testIntegrationSuite } +type testIntegrationSuite9 struct{ *testIntegrationSuite } func (s *testIntegrationSuite5) TestNoZeroDateMode(c *C) { tk := testkit.NewTestKit(c, s.store) @@ -1262,7 +1263,7 @@ func (s *testIntegrationSuite3) TestMultiRegionGetTableEndHandle(c *C) { tk.MustExec("create database test_get_endhandle") tk.MustExec("use test_get_endhandle") - tk.MustExec("create table t(a bigint PRIMARY KEY, b int)") + tk.MustExec("create table t(a bigint PRIMARY KEY nonclustered, b int)") for i := 0; i < 1000; i++ { tk.MustExec(fmt.Sprintf("insert into t values(%v, %v)", i, i)) } @@ -1572,7 +1573,7 @@ func (s *testIntegrationSuite3) TestAlterColumn(c *C) { // The followings tests whether adding constraints via change / modify column // is forbidden as expected. tk.MustExec("drop table if exists mc") - tk.MustExec("create table mc(a int key, b int, c int)") + tk.MustExec("create table mc(a int key nonclustered, b int, c int)") _, err = tk.Exec("alter table mc modify column a int key") // Adds a new primary key c.Assert(err, NotNil) _, err = tk.Exec("alter table mc modify column c int unique") // Adds a new unique key @@ -1584,7 +1585,7 @@ func (s *testIntegrationSuite3) TestAlterColumn(c *C) { // Change / modify column should preserve index options. tk.MustExec("drop table if exists mc") - tk.MustExec("create table mc(a int key, b int, c int unique)") + tk.MustExec("create table mc(a int key nonclustered, b int, c int unique)") tk.MustExec("alter table mc modify column a bigint") // NOT NULL & PRIMARY KEY should be preserved tk.MustExec("alter table mc modify column b bigint") tk.MustExec("alter table mc modify column c bigint") // Unique should be preserved @@ -1595,7 +1596,7 @@ func (s *testIntegrationSuite3) TestAlterColumn(c *C) { // Dropping or keeping auto_increment is allowed, however adding is not allowed. tk.MustExec("drop table if exists mc") - tk.MustExec("create table mc(a int key auto_increment, b int)") + tk.MustExec("create table mc(a int key nonclustered auto_increment, b int)") tk.MustExec("alter table mc modify column a bigint auto_increment") // Keeps auto_increment result = tk.MustQuery("show create table mc") createSQL = result.Rows()[0][1] @@ -2228,19 +2229,18 @@ func (s *testIntegrationSuite7) TestCreateExpressionIndexError(c *C) { defer config.RestoreFunc()() config.UpdateGlobal(func(conf *config.Config) { conf.Experimental.AllowsExpressionIndex = true - conf.AlterPrimaryKey = true }) tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") tk.MustExec("drop table if exists t;") tk.MustExec("create table t (a int, b real);") - tk.MustGetErrCode("alter table t add primary key ((a+b));", errno.ErrFunctionalIndexPrimaryKey) + tk.MustGetErrCode("alter table t add primary key ((a+b)) nonclustered;", errno.ErrFunctionalIndexPrimaryKey) // Test for error tk.MustExec("drop table if exists t;") tk.MustExec("create table t (a int, b real);") - tk.MustGetErrCode("alter table t add primary key ((a+b));", errno.ErrFunctionalIndexPrimaryKey) + tk.MustGetErrCode("alter table t add primary key ((a+b)) nonclustered;", errno.ErrFunctionalIndexPrimaryKey) tk.MustGetErrCode("alter table t add index ((rand()));", errno.ErrFunctionalIndexFunctionIsNotAllowed) tk.MustGetErrCode("alter table t add index ((now()+1));", errno.ErrFunctionalIndexFunctionIsNotAllowed) @@ -2265,7 +2265,7 @@ func (s *testIntegrationSuite7) TestCreateExpressionIndexError(c *C) { tk.MustExec("create table t (j json, key k ((j+1),(j+1)))") tk.MustGetErrCode("create table t1 (col1 int, index ((concat(''))));", errno.ErrWrongKeyColumnFunctionalIndex) - tk.MustGetErrCode("CREATE TABLE t1 (col1 INT, PRIMARY KEY ((ABS(col1))));", errno.ErrFunctionalIndexPrimaryKey) + tk.MustGetErrCode("CREATE TABLE t1 (col1 INT, PRIMARY KEY ((ABS(col1))) NONCLUSTERED);", errno.ErrFunctionalIndexPrimaryKey) } func (s *testIntegrationSuite7) TestAddExpressionIndexOnPartition(c *C) { @@ -2307,7 +2307,7 @@ func (s *testIntegrationSuite3) TestCreateTableWithAutoIdCache(c *C) { tk.MustExec("drop table if exists t1;") // Test primary key is handle. - tk.MustExec("create table t(a int auto_increment key) auto_id_cache 100") + tk.MustExec("create table t(a int auto_increment key clustered) auto_id_cache 100") tblInfo, err := s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(100)) @@ -2371,13 +2371,15 @@ func (s *testIntegrationSuite3) TestCreateTableWithAutoIdCache(c *C) { tk.MustExec("drop table if exists t;") tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t(a int auto_increment key) auto_id_cache 3") + tk.MustExec("create table t(a int auto_increment key clustered) auto_id_cache 3") tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(3)) // Test insert batch size(4 here) greater than the customized autoid step(3 here). - tk.MustExec("insert into t(a) values(NULL),(NULL),(NULL),(NULL)") + tk.MustExec("insert into t(a) values(NULL),(NULL),(NULL)") + // Cache 3 more values. We can't merge this two lines because the batch allocation overrides autoid step. + tk.MustExec("insert into t(a) values(NULL)") tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4")) tk.MustExec("delete from t") @@ -2522,17 +2524,12 @@ func (s *testIntegrationSuite5) TestDropLastVisibleColumns(c *C) { func (s *testIntegrationSuite7) TestAutoIncrementTableOption(c *C) { tk := testkit.NewTestKit(c, s.store) - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - // Make sure the integer primary key is the handle(PkIsHandle). - conf.AlterPrimaryKey = false - }) tk.MustExec("drop database if exists test_auto_inc_table_opt;") tk.MustExec("create database test_auto_inc_table_opt;") tk.MustExec("use test_auto_inc_table_opt;") // Empty auto_inc allocator should not cause error. - tk.MustExec("create table t (a bigint primary key) auto_increment = 10;") + tk.MustExec("create table t (a bigint primary key clustered) auto_increment = 10;") tk.MustExec("alter table t auto_increment = 10;") tk.MustExec("alter table t auto_increment = 12345678901234567890;") @@ -2640,7 +2637,6 @@ func (s *testIntegrationSuite7) TestDuplicateErrorMessage(c *C) { restoreConfig := config.RestoreFunc() config.UpdateGlobal(func(conf *config.Config) { conf.EnableGlobalIndex = globalIndex - conf.AlterPrimaryKey = false }) for _, clusteredIndex := range []bool{false, true} { tk.Se.GetSessionVars().EnableClusteredIndex = clusteredIndex diff --git a/ddl/db_test.go b/ddl/db_test.go index 1f83765a48b47..bf9686e14423e 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -146,7 +146,7 @@ type testDBSuite6 struct{ *testDBSuite } type testDBSuite7 struct{ *testDBSuite } type testSerialDBSuite struct{ *testDBSuite } -func testAddIndexWithPK(tk *testkit.TestKit, s *testSerialDBSuite, c *C) { +func testAddIndexWithPK(tk *testkit.TestKit) { tk.MustExec("drop table if exists test_add_index_with_pk") tk.MustExec("create table test_add_index_with_pk(a int not null, b int not null default '0', primary key(a))") tk.MustExec("insert into test_add_index_with_pk values(1, 2)") @@ -173,17 +173,13 @@ func testAddIndexWithPK(tk *testkit.TestKit, s *testSerialDBSuite, c *C) { tk.MustExec("create index idx on t (a, b);") } -func (s *testSerialDBSuite) TestAddIndexWithPK(c *C) { +func (s *testDBSuite7) TestAddIndexWithPK(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use " + s.schemaName) - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = false - }) - testAddIndexWithPK(tk, s, c) + testAddIndexWithPK(tk) tk.Se.GetSessionVars().EnableClusteredIndex = true - testAddIndexWithPK(tk, s, c) + testAddIndexWithPK(tk) } func (s *testDBSuite5) TestAddIndexWithDupIndex(c *C) { @@ -1140,11 +1136,7 @@ func (s *testDBSuite4) TestAddIndex4(c *C) { partition p4 values less than maxvalue)`, "") } -func (s *testSerialDBSuite) TestAddIndex5(c *C) { - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = false - }) +func (s *testDBSuite5) TestAddIndex5(c *C) { testAddIndex(c, s.store, s.lease, testClusteredIndex, `create table test_add_index (c1 bigint, c2 bigint, c3 bigint, primary key(c2, c3))`, "") } @@ -1414,7 +1406,7 @@ func (s *testDBSuite1) TestCancelAddTableAndDropTablePartition(c *C) { func (s *testDBSuite1) TestDropPrimaryKey(c *C) { idxName := "primary" - createSQL := "create table test_drop_index (c1 int, c2 int, c3 int, unique key(c1), primary key(c3))" + createSQL := "create table test_drop_index (c1 int, c2 int, c3 int, unique key(c1), primary key(c3) nonclustered)" dropIdxSQL := "alter table test_drop_index drop primary key;" testDropIndex(c, s.store, s.lease, createSQL, dropIdxSQL, idxName) } @@ -1435,7 +1427,7 @@ func testDropIndex(c *C, store kv.Storage, lease time.Duration, createSQL, dropI tk.MustExec("delete from test_drop_index") num := 100 - // add some rows + // add some rows for i := 0; i < num; i++ { tk.MustExec("insert into test_drop_index values (?, ?, ?)", i, i, i) } @@ -1953,7 +1945,6 @@ func (s *testSerialDBSuite) TestAddGlobalIndex(c *C) { defer config.RestoreFunc()() config.UpdateGlobal(func(conf *config.Config) { conf.EnableGlobalIndex = true - conf.AlterPrimaryKey = true }) tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test_db") @@ -1994,7 +1985,7 @@ func (s *testSerialDBSuite) TestAddGlobalIndex(c *C) { " (partition p0 values less than (10), " + " partition p1 values less than (maxvalue));") tk.MustExec("insert test_t2 values (1, 1)") - tk.MustExec("alter table test_t2 add primary key (a);") + tk.MustExec("alter table test_t2 add primary key (a) nonclustered;") tk.MustExec("insert test_t2 values (2, 11)") t = s.testGetTable(c, "test_t2") tblInfo = t.Meta() @@ -2432,6 +2423,7 @@ func (s *testDBSuite7) TestSelectInViewFromAnotherDB(c *C) { _, _ = s.s.Execute(context.Background(), "create database test_db2") tk := testkit.NewTestKit(c, s.store) tk.MustExec("use " + s.schemaName) + tk.MustExec("drop table if exists t;") tk.MustExec("create table t(a int)") tk.MustExec("use test_db2") tk.MustExec("create sql security invoker view v as select * from " + s.schemaName + ".t") @@ -2672,16 +2664,12 @@ func (s *testSerialDBSuite) TestRepairTable(c *C) { defer func() { c.Assert(failpoint.Disable("github.com/pingcap/tidb/infoschema/repairFetchCreateTable"), IsNil) }() - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = true - }) tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") tk.MustExec("drop table if exists t, other_table, origin") // Test repair table when TiDB is not in repair mode. - tk.MustExec("CREATE TABLE t (a int primary key, b varchar(10));") + tk.MustExec("CREATE TABLE t (a int primary key nonclustered, b varchar(10));") _, err := tk.Exec("admin repair table t CREATE TABLE t (a float primary key, b varchar(5));") c.Assert(err, NotNil) c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: TiDB is not in REPAIR MODE") @@ -2757,7 +2745,7 @@ func (s *testSerialDBSuite) TestRepairTable(c *C) { turnRepairModeAndInit(true) defer turnRepairModeAndInit(false) // Domain reload the tableInfo and add it into repairInfo. - tk.MustExec("CREATE TABLE origin (a int primary key auto_increment, b varchar(10), c int);") + tk.MustExec("CREATE TABLE origin (a int primary key nonclustered auto_increment, b varchar(10), c int);") // Repaired tableInfo has been filtered by `domain.InfoSchema()`, so get it in repairInfo. originTableInfo, _ := domainutil.RepairInfo.GetRepairedTableInfoByTableName("test", "origin") @@ -2788,7 +2776,7 @@ func (s *testSerialDBSuite) TestRepairTable(c *C) { s.dom.DDL().(ddl.DDLForTest).SetHook(hook) // Exec the repair statement to override the tableInfo. - tk.MustExec("admin repair table origin CREATE TABLE origin (a int primary key auto_increment, b varchar(5), c int);") + tk.MustExec("admin repair table origin CREATE TABLE origin (a int primary key nonclustered auto_increment, b varchar(5), c int);") c.Assert(repairErr, IsNil) // Check the repaired tableInfo is exactly the same with old one in tableID, indexID, colID. @@ -4501,7 +4489,7 @@ func (s *testDBSuite4) TestIfExists(c *C) { tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Note|1507|Error in list of partitions to p1")) } -func testAddIndexForGeneratedColumn(tk *testkit.TestKit, s *testSerialDBSuite, c *C) { +func testAddIndexForGeneratedColumn(tk *testkit.TestKit, s *testDBSuite5, c *C) { tk.MustExec("use test_db") tk.MustExec("drop table if exists t") tk.MustExec("create table t(y year NOT NULL DEFAULT '2155')") @@ -4541,15 +4529,9 @@ func testAddIndexForGeneratedColumn(tk *testkit.TestKit, s *testSerialDBSuite, c tk.MustQuery("select id1 from gcai_table use index(idx1)").Check(testkit.Rows("6")) tk.MustExec("admin check table gcai_table") } -func (s *testSerialDBSuite) TestAddIndexForGeneratedColumn(c *C) { - tk := testkit.NewTestKit(c, s.store) - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = false - }) - testAddIndexForGeneratedColumn(tk, s, c) - tk.Se.GetSessionVars().EnableClusteredIndex = true +func (s *testDBSuite5) TestAddIndexForGeneratedColumn(c *C) { + tk := testkit.NewTestKit(c, s.store) testAddIndexForGeneratedColumn(tk, s, c) } diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 1bb5700dfdfe9..57a8f203b6575 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -39,6 +39,7 @@ import ( field_types "github.com/pingcap/parser/types" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/placement" + "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" @@ -54,6 +55,7 @@ import ( "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/collate" + "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/domainutil" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mock" @@ -1405,6 +1407,12 @@ func buildTableInfo( tbInfo.Columns = append(tbInfo.Columns, hiddenCol) tblColumns = append(tblColumns, table.ToColumn(hiddenCol)) } + // Check clustered on non-primary key. + if constr.Option != nil && constr.Option.PrimaryKeyTp != model.PrimaryKeyTypeDefault && + constr.Tp != ast.ConstraintPrimaryKey { + msg := mysql.Message("CLUSTERED/NONCLUSTERED keyword is only supported for primary key", nil) + return nil, dbterror.ClassDDL.NewStdErr(errno.ErrUnsupportedDDLOperation, msg) + } if constr.Tp == ast.ConstraintForeignKey { for _, fk := range tbInfo.ForeignKeys { if fk.Name.L == strings.ToLower(constr.Name) { @@ -1425,41 +1433,18 @@ func buildTableInfo( if err != nil { return nil, err } - pkTp := model.PrimaryKeyTypeDefault - if constr.Option != nil { - pkTp = constr.Option.PrimaryKeyTp - } - noBinlog := ctx.GetSessionVars().BinlogClient == nil - switch pkTp { - case model.PrimaryKeyTypeNonClustered: - break - case model.PrimaryKeyTypeClustered: - if isSingleIntPK(constr, lastCol) { + isSingleIntPK := isSingleIntPK(constr, lastCol) + if ShouldBuildClusteredIndex(ctx, constr.Option, isSingleIntPK) { + if isSingleIntPK { tbInfo.PKIsHandle = true } else { - tbInfo.IsCommonHandle = noBinlog - if tbInfo.IsCommonHandle { - tbInfo.CommonHandleVersion = 1 - } - if !noBinlog { - errMsg := "cannot build clustered index table because the binlog is ON" - ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf(errMsg)) - } - } - case model.PrimaryKeyTypeDefault: - // (AlterPrimaryKey = true) ----> all pk must be nonclustered - // (AlterPrimaryKey = false) + (EnableClusteredIndex = true) + noBinlog ---> all pk must be clustered - // (AlterPrimaryKey = false) + (EnableClusteredIndex = true) + HasBinlog ---> int pk must be clustered, other must be nonclustered - // (AlterPrimaryKey = false) + (EnableClusteredIndex = false) + (IntPrimaryKeyDefaultAsClustered = true) --> int pk must be clustered - // (AlterPrimaryKey = false) + (EnableClusteredIndex = false) + (IntPrimaryKeyDefaultAsClustered = false) --> all pk must be nonclustered [Default] - alterPKConf := config.GetGlobalConfig().AlterPrimaryKey - if isSingleIntPK(constr, lastCol) { - tbInfo.PKIsHandle = !alterPKConf && (ctx.GetSessionVars().EnableClusteredIndex || ctx.GetSessionVars().IntPrimaryKeyDefaultAsClustered) - } else { - tbInfo.IsCommonHandle = !alterPKConf && ctx.GetSessionVars().EnableClusteredIndex && noBinlog - if tbInfo.IsCommonHandle { - tbInfo.CommonHandleVersion = 1 + hasBinlog := ctx.GetSessionVars().BinlogClient != nil + if hasBinlog { + msg := mysql.Message("Cannot create clustered index table when the binlog is ON", nil) + return nil, dbterror.ClassDDL.NewStdErr(errno.ErrUnsupportedDDLOperation, msg) } + tbInfo.IsCommonHandle = true + tbInfo.CommonHandleVersion = 1 } } if tbInfo.PKIsHandle || tbInfo.IsCommonHandle { @@ -1535,6 +1520,15 @@ func isSingleIntPK(constr *ast.Constraint, lastCol *model.ColumnInfo) bool { return false } +// ShouldBuildClusteredIndex is used to determine whether the CREATE TABLE statement should build a clustered index table. +func ShouldBuildClusteredIndex(ctx sessionctx.Context, opt *ast.IndexOption, isSingleIntPK bool) bool { + if opt == nil || opt.PrimaryKeyTp == model.PrimaryKeyTypeDefault { + return ctx.GetSessionVars().EnableClusteredIndex || + (isSingleIntPK && ctx.GetSessionVars().IntPrimaryKeyDefaultAsClustered) + } + return opt.PrimaryKeyTp == model.PrimaryKeyTypeClustered +} + // checkTableInfoValidExtra is like checkTableInfoValid, but also assumes the // table info comes from untrusted source and performs further checks such as // name length and column count. @@ -2176,7 +2170,7 @@ func handleTableOptions(options []*ast.TableOption, tbInfo *model.TableInfo) err case ast.TableOptionCompression: tbInfo.Compression = op.StrValue case ast.TableOptionShardRowID: - if op.UintValue > 0 && tbInfo.PKIsHandle { + if op.UintValue > 0 && (tbInfo.PKIsHandle || tbInfo.IsCommonHandle) { return errUnsupportedShardRowIDBits } tbInfo.ShardRowIDBits = op.UintValue @@ -2604,7 +2598,7 @@ func (d *ddl) ShardRowID(ctx sessionctx.Context, tableIdent ast.Ident, uVal uint // Nothing need to do. return nil } - if uVal > 0 && t.Meta().PKIsHandle { + if uVal > 0 && (t.Meta().PKIsHandle || t.Meta().IsCommonHandle) { return errUnsupportedShardRowIDBits } err = verifyNoOverflowShardBits(d.sessPool, t, uVal) @@ -4759,11 +4753,10 @@ func getAnonymousIndex(t table.Table, colName model.CIStr, idxName model.CIStr) func (d *ddl) CreatePrimaryKey(ctx sessionctx.Context, ti ast.Ident, indexName model.CIStr, indexPartSpecifications []*ast.IndexPartSpecification, indexOption *ast.IndexOption) error { - if !config.GetGlobalConfig().AlterPrimaryKey { - return ErrUnsupportedModifyPrimaryKey.GenWithStack("Unsupported add primary key, alter-primary-key is false. " + - "Please check the documentation for the tidb-server configuration files") + if indexOption != nil && indexOption.PrimaryKeyTp == model.PrimaryKeyTypeClustered { + return ErrUnsupportedModifyPrimaryKey.GenWithStack("Adding clustered primary key is not supported. " + + "Please consider adding NONCLUSTERED primary key instead") } - schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) if err != nil { return errors.Trace(err) @@ -5173,10 +5166,6 @@ func (d *ddl) DropIndex(ctx sessionctx.Context, ti ast.Ident, indexName model.CI isPK = true } if isPK { - if !config.GetGlobalConfig().AlterPrimaryKey { - return ErrUnsupportedModifyPrimaryKey.GenWithStack("Unsupported drop primary key when alter-primary-key is false") - - } // If the table's PKIsHandle is true, we can't find the index from the table. So we check the value of PKIsHandle. if indexInfo == nil && !t.Meta().PKIsHandle { return ErrCantDropFieldOrKey.GenWithStack("Can't DROP 'PRIMARY'; check that column/key exists") diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index a2bca47ee382e..90a8a091fe3c8 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -79,8 +79,6 @@ func TestT(t *testing.T) { // Test for table lock. conf.EnableTableLock = true conf.Log.SlowThreshold = 10000 - // Test for add/drop primary key. - conf.AlterPrimaryKey = true conf.TiKVClient.AsyncCommit.SafeWindow = 0 conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0 }) diff --git a/ddl/serial_test.go b/ddl/serial_test.go index e5d84c90776a9..43b4773cadc85 100644 --- a/ddl/serial_test.go +++ b/ddl/serial_test.go @@ -53,6 +53,9 @@ import ( // Make it serial because config is modified in test cases. var _ = SerialSuites(&testSerialSuite{}) +// TODO(tangenta): Move all the parallel tests out of this file. +var _ = Suite(&testIntegrationSuite9{&testIntegrationSuite{}}) + type testSerialSuite struct { CommonHandleSuite store kv.Storage @@ -64,8 +67,7 @@ func (s *testSerialSuite) SetUpSuite(c *C) { session.SetSchemaLease(200 * time.Millisecond) session.DisableStats4Test() config.UpdateGlobal(func(conf *config.Config) { - // Test for add/drop primary key. - conf.AlterPrimaryKey = false + // Update config here. }) ddl.SetWaitTimeWhenErrorOccurred(1 * time.Microsecond) @@ -110,81 +112,72 @@ func (s *testSerialSuite) TestChangeMaxIndexLength(c *C) { tk.MustExec("drop table t, t1") } -func (s *testSerialSuite) TestPrimaryKey(c *C) { +func (s *testIntegrationSuite9) TestPrimaryKey(c *C) { tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test") + tk.MustExec("drop database if exists test_primary_key;") + tk.MustExec("create database test_primary_key;") + tk.MustExec("use test_primary_key;") tk.Se.GetSessionVars().EnableClusteredIndex = false - tk.MustExec("create table primary_key_test (a int, b varchar(10))") - tk.MustExec("create table primary_key_test_1 (a int, b varchar(10), primary key(a))") - _, err := tk.Exec("alter table primary_key_test add primary key(a)") - c.Assert(ddl.ErrUnsupportedModifyPrimaryKey.Equal(err), IsTrue) - _, err = tk.Exec("alter table primary_key_test drop primary key") - c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported drop primary key when alter-primary-key is false") - // for "drop index `primary` on ..." syntax - _, err = tk.Exec("drop index `primary` on primary_key_test") - c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported drop primary key when alter-primary-key is false") - _, err = tk.Exec("drop index `primary` on primary_key_test_1") - c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported drop primary key when alter-primary-key is false") - - // Change the value of AlterPrimaryKey. - tk.MustExec("create table primary_key_test1 (a int, b varchar(10), primary key(a))") - tk.MustExec("create table primary_key_test2 (a int, b varchar(10), primary key(b))") - tk.MustExec("create table primary_key_test3 (a int, b varchar(10))") - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = true - }) - - _, err = tk.Exec("alter table primary_key_test2 add primary key(a)") - c.Assert(infoschema.ErrMultiplePriKey.Equal(err), IsTrue) - // We can't add a primary key when the table's pk_is_handle is true. - _, err = tk.Exec("alter table primary_key_test1 add primary key(a)") - c.Assert(infoschema.ErrMultiplePriKey.Equal(err), IsTrue) - _, err = tk.Exec("alter table primary_key_test1 add primary key(b)") - c.Assert(infoschema.ErrMultiplePriKey.Equal(err), IsTrue) - - _, err = tk.Exec("alter table primary_key_test1 drop primary key") - c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported drop primary key when the table's pkIsHandle is true") - tk.MustExec("alter table primary_key_test2 drop primary key") - _, err = tk.Exec("alter table primary_key_test3 drop primary key") - c.Assert(err.Error(), Equals, "[ddl:1091]Can't DROP 'PRIMARY'; check that column/key exists") - - // for "drop index `primary` on ..." syntax - tk.MustExec("create table primary_key_test4 (a int, b varchar(10), primary key(a))") - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = false - }) - _, err = tk.Exec("drop index `primary` on primary_key_test4") - c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported drop primary key when alter-primary-key is false") - // for the index name is `primary` - tk.MustExec("create table tt(`primary` int);") - tk.MustExec("alter table tt add index (`primary`);") - _, err = tk.Exec("drop index `primary` on tt") - c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported drop primary key when alter-primary-key is false") + // Test add/drop primary key on a plain table. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b varchar(10));") + tk.MustGetErrCode("alter table t add primary key(a) clustered;", errno.ErrUnsupportedDDLOperation) + tk.MustExec("alter table t add primary key(a) nonclustered;") + tk.MustExec("alter table t drop primary key;") + tk.MustExec("alter table t add primary key(a) nonclustered;") + tk.MustExec("drop index `primary` on t;") + tk.MustExec("alter table t add primary key(a);") // implicit nonclustered + tk.MustExec("drop index `primary` on t;") + tk.MustGetErrCode("drop index `primary` on t;", errno.ErrCantDropFieldOrKey) + + // Test add/drop primary key on a PKIsHandle table. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b varchar(10), primary key(a) clustered);") + tk.MustGetErrCode("alter table t drop primary key;", errno.ErrUnsupportedDDLOperation) + tk.MustGetErrCode("alter table t add primary key(a) clustered;", errno.ErrUnsupportedDDLOperation) + tk.MustGetErrCode("alter table t add primary key(a) nonclustered;", mysql.ErrMultiplePriKey) + tk.MustGetErrCode("alter table t add primary key(a);", errno.ErrMultiplePriKey) // implicit nonclustered + tk.MustGetErrCode("alter table t add primary key(b) clustered;", errno.ErrUnsupportedDDLOperation) + tk.MustGetErrCode("alter table t add primary key(b) nonclustered;", errno.ErrMultiplePriKey) + tk.MustGetErrCode("alter table t add primary key(b);", errno.ErrMultiplePriKey) // implicit nonclustered + + // Test add/drop primary key on a nonclustered primary key table. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b varchar(10), primary key(a) nonclustered);") + tk.MustGetErrCode("alter table t add primary key(a) clustered;", errno.ErrUnsupportedDDLOperation) + tk.MustGetErrCode("alter table t add primary key(a) nonclustered;", errno.ErrMultiplePriKey) + tk.MustGetErrCode("alter table t add primary key(a);", errno.ErrMultiplePriKey) // implicit nonclustered + tk.MustGetErrCode("alter table t add primary key(b) clustered;", errno.ErrUnsupportedDDLOperation) + tk.MustGetErrCode("alter table t add primary key(b) nonclustered;", errno.ErrMultiplePriKey) + tk.MustGetErrCode("alter table t add primary key(b);", errno.ErrMultiplePriKey) // implicit nonclustered + tk.MustExec("alter table t drop primary key;") + + // Test add/drop primary key on a CommonHandle key table. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b varchar(10), primary key(b) clustered);") + tk.MustGetErrCode("alter table t drop primary key;", errno.ErrUnsupportedDDLOperation) + tk.MustGetErrCode("alter table t add primary key(a) clustered;", errno.ErrUnsupportedDDLOperation) + tk.MustGetErrCode("alter table t add primary key(a) nonclustered;", errno.ErrMultiplePriKey) + tk.MustGetErrCode("alter table t add primary key(a);", errno.ErrMultiplePriKey) // implicit nonclustered + tk.MustGetErrCode("alter table t add primary key(b) clustered;", errno.ErrUnsupportedDDLOperation) + tk.MustGetErrCode("alter table t add primary key(b) nonclustered;", errno.ErrMultiplePriKey) + tk.MustGetErrCode("alter table t add primary key(b);", errno.ErrMultiplePriKey) // implicit nonclustered + + // Test add/drop primary key when the column&index name is `primary`. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (`primary` int);") + tk.MustExec("alter table t add index (`primary`);") + tk.MustGetErrCode("drop index `primary` on t;", errno.ErrCantDropFieldOrKey) // The primary key cannot be invisible, for the case pk_is_handle. - tk.MustExec("drop table if exists t1, t2;") - _, err = tk.Exec("create table t1(c1 int not null, primary key(c1) invisible);") - c.Assert(ddl.ErrPKIndexCantBeInvisible.Equal(err), IsTrue) - tk.MustExec("create table t2 (a int, b int not null, primary key(a), unique(b) invisible);") - - // Test drop clustered primary key. - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = false - }) tk.MustExec("drop table if exists t;") - tk.Se.GetSessionVars().EnableClusteredIndex = true - tk.MustExec("create table t(a int, b varchar(64), primary key(b));") - tk.MustExec("insert into t values(1,'a'), (2, 'b');") - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = true - }) - errMsg := "[ddl:8200]Unsupported drop primary key when the table is using clustered index" - tk.MustGetErrMsg("alter table t drop primary key;", errMsg) + tk.MustGetErrCode("create table t(c1 int not null, primary key(c1) invisible);", errno.ErrPKIndexCantBeInvisible) + tk.MustExec("create table t (a int, b int not null, primary key(a), unique(b) invisible);") + tk.MustExec("drop table t;") } -func (s *testSerialSuite) TestDropAutoIncrementIndex(c *C) { +func (s *testIntegrationSuite9) TestDropAutoIncrementIndex(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") tk.MustExec("drop table if exists t1") @@ -192,7 +185,7 @@ func (s *testSerialSuite) TestDropAutoIncrementIndex(c *C) { tk.MustExec("alter table t1 drop index a") } -func (s *testSerialSuite) TestMultiRegionGetTableEndHandle(c *C) { +func (s *testIntegrationSuite9) TestMultiRegionGetTableEndHandle(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("drop database if exists test_get_endhandle") tk.MustExec("create database test_get_endhandle") @@ -235,7 +228,7 @@ func (s *testSerialSuite) TestMultiRegionGetTableEndHandle(c *C) { c.Assert(maxHandle, Equals, kv.IntHandle(10000)) } -func (s *testSerialSuite) TestGetTableEndHandle(c *C) { +func (s *testIntegrationSuite9) TestGetTableEndHandle(c *C) { // TestGetTableEndHandle test ddl.GetTableMaxHandle method, which will return the max row id of the table. tk := testkit.NewTestKit(c, s.store) tk.MustExec("drop database if exists test_get_endhandle") @@ -327,7 +320,7 @@ func (s *testSerialSuite) TestGetTableEndHandle(c *C) { c.Assert(emptyTable, IsFalse) } -func (s *testSerialSuite) TestMultiRegionGetTableEndCommonHandle(c *C) { +func (s *testIntegrationSuite9) TestMultiRegionGetTableEndCommonHandle(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("drop database if exists test_get_endhandle") tk.MustExec("create database test_get_endhandle") @@ -371,7 +364,7 @@ func (s *testSerialSuite) TestMultiRegionGetTableEndCommonHandle(c *C) { c.Assert(maxHandle, HandleEquals, MustNewCommonHandle(c, "a", 1, 1)) } -func (s *testSerialSuite) TestGetTableEndCommonHandle(c *C) { +func (s *testIntegrationSuite9) TestGetTableEndCommonHandle(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("drop database if exists test_get_endhandle") tk.MustExec("create database test_get_endhandle") @@ -987,9 +980,6 @@ func (s *testSerialSuite) TestAutoRandom(c *C) { tk.MustExec("drop table t") } - ConfigTestUtils.SetupAutoRandomTestConfig() - defer ConfigTestUtils.RestoreAutoRandomTestConfig() - // Only bigint column can set auto_random assertBigIntOnly("create table t (a char primary key auto_random(3), b int)", "char") assertBigIntOnly("create table t (a varchar(255) primary key auto_random(3), b int)", "varchar") @@ -1006,16 +996,10 @@ func (s *testSerialSuite) TestAutoRandom(c *C) { assertPKIsNotHandle("create table t (a bigint auto_random(3), b bigint, primary key (a, b))", "a") assertPKIsNotHandle("create table t (a bigint auto_random(3), b int, c char, primary key (a, c))", "a") - // PKIsNotHandle: table is created when alter-primary-key = true. - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = true - }) - assertPKIsNotHandle("create table t (a bigint auto_random(3) primary key, b int)", "a") - assertPKIsNotHandle("create table t (a bigint auto_random(3) primary key, b int)", "a") - assertPKIsNotHandle("create table t (a int, b bigint auto_random(3) primary key)", "b") - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = false - }) + // PKIsNotHandle: nonclustered integer primary key. + assertPKIsNotHandle("create table t (a bigint auto_random(3) primary key nonclustered, b int)", "a") + assertPKIsNotHandle("create table t (a bigint auto_random(3) primary key nonclustered, b int)", "a") + assertPKIsNotHandle("create table t (a int, b bigint auto_random(3) primary key nonclustered)", "b") // Can not set auto_random along with auto_increment. assertWithAutoInc("create table t (a bigint auto_random(3) primary key auto_increment)") @@ -1158,14 +1142,11 @@ func (s *testSerialSuite) TestAutoRandom(c *C) { }) } -func (s *testSerialSuite) TestAutoRandomExchangePartition(c *C) { +func (s *testIntegrationSuite9) TestAutoRandomExchangePartition(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("create database if not exists auto_random_db") defer tk.MustExec("drop database if exists auto_random_db") - ConfigTestUtils.SetupAutoRandomTestConfig() - defer ConfigTestUtils.RestoreAutoRandomTestConfig() - tk.MustExec("use auto_random_db") tk.MustExec("set @@tidb_enable_exchange_partition=1") @@ -1173,7 +1154,7 @@ func (s *testSerialSuite) TestAutoRandomExchangePartition(c *C) { tk.MustExec("drop table if exists e1, e2, e3, e4;") - tk.MustExec("create table e1 (a bigint primary key auto_random(3)) partition by hash(a) partitions 1;") + tk.MustExec("create table e1 (a bigint primary key clustered auto_random(3)) partition by hash(a) partitions 1;") tk.MustExec("create table e2 (a bigint primary key);") tk.MustGetErrCode("alter table e1 exchange partition p0 with table e2;", errno.ErrTablesDifferentMetadata) @@ -1195,19 +1176,16 @@ func (s *testSerialSuite) TestAutoRandomExchangePartition(c *C) { tk.MustQuery("select count(*) from e4").Check(testkit.Rows("4")) } -func (s *testSerialSuite) TestAutoRandomIncBitsIncrementAndOffset(c *C) { +func (s *testIntegrationSuite9) TestAutoRandomIncBitsIncrementAndOffset(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("create database if not exists auto_random_db") defer tk.MustExec("drop database if exists auto_random_db") tk.MustExec("use auto_random_db") tk.MustExec("drop table if exists t") - ConfigTestUtils.SetupAutoRandomTestConfig() - defer ConfigTestUtils.RestoreAutoRandomTestConfig() - recreateTable := func() { tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a bigint auto_random(6) primary key)") + tk.MustExec("create table t (a bigint auto_random(6) primary key clustered)") } truncateTable := func() { _, _ = tk.Exec("delete from t") @@ -1262,15 +1240,13 @@ func (s *testSerialSuite) TestAutoRandomWithPreSplitRegion(c *C) { tk.MustExec("use auto_random_db;") tk.MustExec("drop table if exists t;") - ConfigTestUtils.SetupAutoRandomTestConfig() - defer ConfigTestUtils.RestoreAutoRandomTestConfig() origin := atomic.LoadUint32(&ddl.EnableSplitTableRegion) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, origin) tk.MustExec("set @@global.tidb_scatter_region=1;") // Test pre-split table region for auto_random table. - tk.MustExec("create table t (a bigint auto_random(2) primary key, b int) pre_split_regions=2;") + tk.MustExec("create table t (a bigint auto_random(2) primary key clustered, b int) pre_split_regions=2;") re := tk.MustQuery("show table t regions;") rows := re.Rows() c.Assert(len(rows), Equals, 4) @@ -1352,7 +1328,7 @@ func (s *testSerialSuite) TestForbidUnsupportedCollations(c *C) { // mustGetUnsupportedCollation("alter table t convert to collate utf8mb4_unicode_ci", "utf8mb4_unicode_ci") } -func (s *testSerialSuite) TestInvisibleIndex(c *C) { +func (s *testIntegrationSuite9) TestInvisibleIndex(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") @@ -1391,17 +1367,12 @@ func (s *testSerialSuite) TestInvisibleIndex(c *C) { tk.MustExec("insert into t values (11, 12)") tk.MustQuery("select * from t").Check(testkit.Rows("1 2", "3 4", "5 6", "7 8", "9 10", "11 12")) - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = true - }) - // Limitation: Primary key cannot be invisible index - tk.MustGetErrCode("create table t1 (a int, primary key (a) invisible)", errno.ErrPKIndexCantBeInvisible) - tk.MustGetErrCode("create table t1 (a int, b int, primary key (a, b) invisible)", errno.ErrPKIndexCantBeInvisible) + tk.MustGetErrCode("create table t1 (a int, primary key (a) nonclustered invisible)", errno.ErrPKIndexCantBeInvisible) + tk.MustGetErrCode("create table t1 (a int, b int, primary key (a, b) nonclustered invisible)", errno.ErrPKIndexCantBeInvisible) tk.MustExec("create table t1 (a int, b int)") - tk.MustGetErrCode("alter table t1 add primary key(a) invisible", errno.ErrPKIndexCantBeInvisible) - tk.MustGetErrCode("alter table t1 add primary key(a, b) invisible", errno.ErrPKIndexCantBeInvisible) + tk.MustGetErrCode("alter table t1 add primary key(a) nonclustered invisible", errno.ErrPKIndexCantBeInvisible) + tk.MustGetErrCode("alter table t1 add primary key(a, b) nonclustered invisible", errno.ErrPKIndexCantBeInvisible) // Implicit primary key cannot be invisible index // Create a implicit primary key @@ -1423,7 +1394,7 @@ func (s *testSerialSuite) TestInvisibleIndex(c *C) { tk.MustGetErrCode("alter table t5 drop index a", errno.ErrPKIndexCantBeInvisible) tk.MustGetErrCode("alter table t5 modify column a int null", errno.ErrPKIndexCantBeInvisible) // If these is a explicit primary key, no key will become implicit primary key - tk.MustExec("create table t6 (a int not null, b int, unique (a) invisible, primary key(b))") + tk.MustExec("create table t6 (a int not null, b int, unique (a) invisible, primary key(b) nonclustered)") showIndexes = "select index_name, is_visible from information_schema.statistics where table_schema = 'test' and table_name = 't6'" tk.MustQuery(showIndexes).Check(testkit.Rows("a NO", "PRIMARY YES")) tk.MustExec("insert into t6 values (1, 2)") @@ -1433,7 +1404,7 @@ func (s *testSerialSuite) TestInvisibleIndex(c *C) { c.Check(len(res.Rows()), Equals, 1) } -func (s *testSerialSuite) TestCreateClusteredIndex(c *C) { +func (s *testIntegrationSuite9) TestCreateClusteredIndex(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) tk.Se.GetSessionVars().EnableClusteredIndex = true tk.MustExec("CREATE TABLE t1 (a int primary key, b int)") @@ -1456,11 +1427,8 @@ func (s *testSerialSuite) TestCreateClusteredIndex(c *C) { c.Assert(err, IsNil) c.Assert(tbl.Meta().IsCommonHandle, IsFalse) - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = true - }) - tk.MustExec("CREATE TABLE t5 (a varchar(255) primary key, b int)") - tk.MustExec("CREATE TABLE t6 (a int, b int, c int, primary key (a, b))") + tk.MustExec("CREATE TABLE t5 (a varchar(255) primary key nonclustered, b int)") + tk.MustExec("CREATE TABLE t6 (a int, b int, c int, primary key (a, b) nonclustered)") is = domain.GetDomain(ctx).InfoSchema() tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t5")) c.Assert(err, IsNil) @@ -1468,9 +1436,6 @@ func (s *testSerialSuite) TestCreateClusteredIndex(c *C) { tbl, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t6")) c.Assert(err, IsNil) c.Assert(tbl.Meta().IsCommonHandle, IsFalse) - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = false - }) tk.MustExec("CREATE TABLE t21 like t2") tk.MustExec("CREATE TABLE t31 like t3") diff --git a/executor/analyze_test.go b/executor/analyze_test.go index d535707738173..66b7e03f52549 100644 --- a/executor/analyze_test.go +++ b/executor/analyze_test.go @@ -731,7 +731,7 @@ func (s *testFastAnalyze) TestFastAnalyzeRetryRowCount(c *C) { c.Assert(row[5], Equals, "30") } -func (s *testSuite9) TestFailedAnalyzeRequest(c *C) { +func (s *testSuite10) TestFailedAnalyzeRequest(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") tk.MustExec("drop table if exists t") diff --git a/executor/ddl_test.go b/executor/ddl_test.go index bc848091998a9..5ea9171649c71 100644 --- a/executor/ddl_test.go +++ b/executor/ddl_test.go @@ -790,12 +790,17 @@ func (s *testSuite8) TestShardRowIDBits(c *C) { tk.MustExec("alter table auto shard_row_id_bits = 0") tk.MustExec("drop table auto") + errMsg := "[ddl:8200]Unsupported shard_row_id_bits for table with primary key as row id" + tk.MustGetErrMsg("create table auto (id varchar(255) primary key clustered, b int) shard_row_id_bits = 4;", errMsg) + tk.MustExec("create table auto (id varchar(255) primary key clustered, b int) shard_row_id_bits = 0;") + tk.MustGetErrMsg("alter table auto shard_row_id_bits = 5;", errMsg) + tk.MustExec("alter table auto shard_row_id_bits = 0;") + tk.MustExec("drop table if exists auto;") + // After PR 10759, shard_row_id_bits is not supported with pk_is_handle tables. - err = tk.ExecToErr("create table auto (id int not null auto_increment primary key, b int) shard_row_id_bits = 4") - c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported shard_row_id_bits for table with primary key as row id") + tk.MustGetErrMsg("create table auto (id int not null auto_increment primary key, b int) shard_row_id_bits = 4", errMsg) tk.MustExec("create table auto (id int not null auto_increment primary key, b int) shard_row_id_bits = 0") - err = tk.ExecToErr("alter table auto shard_row_id_bits = 5") - c.Assert(err.Error(), Equals, "[ddl:8200]Unsupported shard_row_id_bits for table with primary key as row id") + tk.MustGetErrMsg("alter table auto shard_row_id_bits = 5", errMsg) tk.MustExec("alter table auto shard_row_id_bits = 0") // Hack an existing table with shard_row_id_bits and primary key as handle @@ -862,14 +867,6 @@ type testAutoRandomSuite struct { *baseTestSuite } -func (s *testAutoRandomSuite) SetUpTest(c *C) { - testutil.ConfigTestUtils.SetupAutoRandomTestConfig() -} - -func (s *testAutoRandomSuite) TearDownTest(c *C) { - testutil.ConfigTestUtils.RestoreAutoRandomTestConfig() -} - func (s *testAutoRandomSuite) TestAutoRandomBitsData(c *C) { tk := testkit.NewTestKit(c, s.store) @@ -886,7 +883,7 @@ func (s *testAutoRandomSuite) TestAutoRandomBitsData(c *C) { tk.MustExec("set @@allow_auto_random_explicit_insert = true") - tk.MustExec("create table t (a bigint primary key auto_random(15), b int)") + tk.MustExec("create table t (a bigint primary key clustered auto_random(15), b int)") for i := 0; i < 100; i++ { tk.MustExec("insert into t(b) values (?)", i) } @@ -902,7 +899,7 @@ func (s *testAutoRandomSuite) TestAutoRandomBitsData(c *C) { } c.Assert(allZero, IsFalse) // Test non-shard-bits part of auto random id is monotonic increasing and continuous. - orderedHandles := testutil.ConfigTestUtils.MaskSortHandles(allHandles, 15, mysql.TypeLonglong) + orderedHandles := testutil.MaskSortHandles(allHandles, 15, mysql.TypeLonglong) size := int64(len(allHandles)) for i := int64(1); i <= size; i++ { c.Assert(i, Equals, orderedHandles[i-1]) @@ -910,7 +907,7 @@ func (s *testAutoRandomSuite) TestAutoRandomBitsData(c *C) { // Test explicit insert. autoRandBitsUpperBound := 2<<47 - 1 - tk.MustExec("create table t (a bigint primary key auto_random(15), b int)") + tk.MustExec("create table t (a bigint primary key clustered auto_random(15), b int)") for i := -10; i < 10; i++ { tk.MustExec(fmt.Sprintf("insert into t values(%d, %d)", i+autoRandBitsUpperBound, i)) } @@ -944,7 +941,7 @@ func (s *testAutoRandomSuite) TestAutoRandomBitsData(c *C) { tk.MustExec("insert into t(a, b) values (?, ?)", -i, i) } // orderedHandles should be [-100, -99, ..., -2, -1, 1, 2, ..., 99, 100] - orderedHandles = testutil.ConfigTestUtils.MaskSortHandles(extractAllHandles(), 15, mysql.TypeLonglong) + orderedHandles = testutil.MaskSortHandles(extractAllHandles(), 15, mysql.TypeLonglong) size = int64(len(allHandles)) for i := int64(0); i < 100; i++ { c.Assert(orderedHandles[i], Equals, i-100) @@ -1015,7 +1012,7 @@ func (s *testAutoRandomSuite) TestAutoRandomTableOption(c *C) { c.Assert(err, IsNil) c.Assert(len(allHandles), Equals, 5) // Test non-shard-bits part of auto random id is monotonic increasing and continuous. - orderedHandles := testutil.ConfigTestUtils.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) + orderedHandles := testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) size := int64(len(allHandles)) for i := int64(0); i < size; i++ { c.Assert(i+1000, Equals, orderedHandles[i]) @@ -1029,7 +1026,7 @@ func (s *testAutoRandomSuite) TestAutoRandomTableOption(c *C) { tk.MustExec("insert into alter_table_auto_random_option values(),(),(),(),()") allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Se, "test", "alter_table_auto_random_option") c.Assert(err, IsNil) - orderedHandles = testutil.ConfigTestUtils.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) + orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) size = int64(len(allHandles)) for i := int64(0); i < size; i++ { c.Assert(orderedHandles[i], Equals, i+1) @@ -1047,7 +1044,7 @@ func (s *testAutoRandomSuite) TestAutoRandomTableOption(c *C) { tk.MustExec("insert into alter_table_auto_random_option values(),(),(),(),()") allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Se, "test", "alter_table_auto_random_option") c.Assert(err, IsNil) - orderedHandles = testutil.ConfigTestUtils.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) + orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) size = int64(len(allHandles)) for i := int64(0); i < size; i++ { c.Assert(orderedHandles[i], Equals, i+3000000) @@ -1079,7 +1076,7 @@ func (s *testAutoRandomSuite) TestFilterDifferentAllocators(c *C) { allHandles, err := ddltestutil.ExtractAllTableHandles(tk.Se, "test", "t") c.Assert(err, IsNil) c.Assert(len(allHandles), Equals, 1) - orderedHandles := testutil.ConfigTestUtils.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) + orderedHandles := testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) c.Assert(orderedHandles[0], Equals, int64(1)) tk.MustExec("delete from t") @@ -1090,7 +1087,7 @@ func (s *testAutoRandomSuite) TestFilterDifferentAllocators(c *C) { allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Se, "test", "t") c.Assert(err, IsNil) c.Assert(len(allHandles), Equals, 1) - orderedHandles = testutil.ConfigTestUtils.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) + orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) c.Assert(orderedHandles[0], Equals, int64(2)) tk.MustExec("delete from t") @@ -1101,7 +1098,7 @@ func (s *testAutoRandomSuite) TestFilterDifferentAllocators(c *C) { allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Se, "test", "t") c.Assert(err, IsNil) c.Assert(len(allHandles), Equals, 1) - orderedHandles = testutil.ConfigTestUtils.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) + orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) c.Assert(orderedHandles[0], Equals, int64(3000000)) tk.MustExec("delete from t") @@ -1115,7 +1112,7 @@ func (s *testAutoRandomSuite) TestFilterDifferentAllocators(c *C) { allHandles, err = ddltestutil.ExtractAllTableHandles(tk.Se, "test", "t1") c.Assert(err, IsNil) c.Assert(len(allHandles), Equals, 1) - orderedHandles = testutil.ConfigTestUtils.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) + orderedHandles = testutil.MaskSortHandles(allHandles, 5, mysql.TypeLonglong) c.Assert(orderedHandles[0], Greater, int64(3000001)) } diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index 8d5f44c785ba2..4d9a5b19ec1ad 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -458,7 +458,7 @@ func (e *memtableRetriever) setDataFromTables(ctx sessionctx.Context, schemas [] if checker != nil && !checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, schema.Name.L, table.Name.L, "", mysql.AllPrivMask) { continue } - pkType := "NON-CLUSTERED" + pkType := "NONCLUSTERED" if !table.IsView() { if table.GetPartitionInfo() != nil { createOptions = "partitioned" diff --git a/executor/infoschema_reader_test.go b/executor/infoschema_reader_test.go index 903d42a02df29..bb3497ac564a8 100644 --- a/executor/infoschema_reader_test.go +++ b/executor/infoschema_reader_test.go @@ -875,9 +875,9 @@ func (s *testInfoschemaTableSuite) TestTablesPKType(c *C) { tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'test' and table_name = 't_int'").Check(testkit.Rows("CLUSTERED")) tk.Se.GetSessionVars().EnableClusteredIndex = false tk.MustExec("create table t_implicit (a varchar(64) primary key, b int)") - tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'test' and table_name = 't_implicit'").Check(testkit.Rows("NON-CLUSTERED")) + tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'test' and table_name = 't_implicit'").Check(testkit.Rows("NONCLUSTERED")) tk.Se.GetSessionVars().EnableClusteredIndex = true tk.MustExec("create table t_common (a varchar(64) primary key, b int)") tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'test' and table_name = 't_common'").Check(testkit.Rows("CLUSTERED")) - tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'INFORMATION_SCHEMA' and table_name = 'TABLES'").Check(testkit.Rows("NON-CLUSTERED")) + tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'INFORMATION_SCHEMA' and table_name = 'TABLES'").Check(testkit.Rows("NONCLUSTERED")) } diff --git a/executor/insert_test.go b/executor/insert_test.go index 467e9e022c77e..891a74f495660 100644 --- a/executor/insert_test.go +++ b/executor/insert_test.go @@ -1126,20 +1126,17 @@ func (s *testSuite3) TestAutoIDIncrementAndOffset(c *C) { c.Assert(err.Error(), Equals, "[autoid:8060]Invalid auto_increment settings: auto_increment_increment: 65536, auto_increment_offset: 65536, both of them must be in range [1..65535]") } -var _ = SerialSuites(&testSuite9{&baseTestSuite{}}) +var _ = Suite(&testSuite9{&baseTestSuite{}}) type testSuite9 struct { *baseTestSuite } func (s *testSuite9) TestAutoRandomID(c *C) { - testutil.ConfigTestUtils.SetupAutoRandomTestConfig() - defer testutil.ConfigTestUtils.RestoreAutoRandomTestConfig() - tk := testkit.NewTestKit(c, s.store) tk.MustExec(`use test`) tk.MustExec(`drop table if exists ar`) - tk.MustExec(`create table ar (id bigint key auto_random, name char(10))`) + tk.MustExec(`create table ar (id bigint key clustered auto_random, name char(10))`) tk.MustExec(`insert into ar(id) values (null)`) rs := tk.MustQuery(`select id from ar`) @@ -1168,7 +1165,7 @@ func (s *testSuite9) TestAutoRandomID(c *C) { tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows(fmt.Sprintf("%d", firstValue))) tk.MustExec(`drop table ar`) - tk.MustExec(`create table ar (id bigint key auto_random(15), name char(10))`) + tk.MustExec(`create table ar (id bigint key clustered auto_random(15), name char(10))`) overflowVal := 1 << (64 - 5) errMsg := fmt.Sprintf(autoid.AutoRandomRebaseOverflow, overflowVal, 1<<(64-16)-1) _, err = tk.Exec(fmt.Sprintf("alter table ar auto_random_base = %d", overflowVal)) @@ -1177,13 +1174,10 @@ func (s *testSuite9) TestAutoRandomID(c *C) { } func (s *testSuite9) TestMultiAutoRandomID(c *C) { - testutil.ConfigTestUtils.SetupAutoRandomTestConfig() - defer testutil.ConfigTestUtils.RestoreAutoRandomTestConfig() - tk := testkit.NewTestKit(c, s.store) tk.MustExec(`use test`) tk.MustExec(`drop table if exists ar`) - tk.MustExec(`create table ar (id bigint key auto_random, name char(10))`) + tk.MustExec(`create table ar (id bigint key clustered auto_random, name char(10))`) tk.MustExec(`insert into ar(id) values (null),(null),(null)`) rs := tk.MustQuery(`select id from ar order by id`) @@ -1221,13 +1215,10 @@ func (s *testSuite9) TestMultiAutoRandomID(c *C) { } func (s *testSuite9) TestAutoRandomIDAllowZero(c *C) { - testutil.ConfigTestUtils.SetupAutoRandomTestConfig() - defer testutil.ConfigTestUtils.RestoreAutoRandomTestConfig() - tk := testkit.NewTestKit(c, s.store) tk.MustExec(`use test`) tk.MustExec(`drop table if exists ar`) - tk.MustExec(`create table ar (id bigint key auto_random, name char(10))`) + tk.MustExec(`create table ar (id bigint key clustered auto_random, name char(10))`) rs := tk.MustQuery(`select @@session.sql_mode`) sqlMode := rs.Rows()[0][0].(string) @@ -1254,15 +1245,12 @@ func (s *testSuite9) TestAutoRandomIDAllowZero(c *C) { } func (s *testSuite9) TestAutoRandomIDExplicit(c *C) { - testutil.ConfigTestUtils.SetupAutoRandomTestConfig() - defer testutil.ConfigTestUtils.RestoreAutoRandomTestConfig() - tk := testkit.NewTestKit(c, s.store) tk.MustExec("set @@allow_auto_random_explicit_insert = true") tk.MustExec(`use test`) tk.MustExec(`drop table if exists ar`) - tk.MustExec(`create table ar (id bigint key auto_random, name char(10))`) + tk.MustExec(`create table ar (id bigint key clustered auto_random, name char(10))`) tk.MustExec(`insert into ar(id) values (1)`) tk.MustQuery(`select id from ar`).Check(testkit.Rows("1")) diff --git a/executor/join_test.go b/executor/join_test.go index 1edac17004362..76afe571f3d41 100644 --- a/executor/join_test.go +++ b/executor/join_test.go @@ -2292,7 +2292,7 @@ func (s *testSuiteJoin1) TestInvalidEnumVal(c *C) { rows.Check(testkit.Rows("a a", " ", " ", " ", " ")) } -func (s *testSuite9) TestIssue18572_1(c *C) { +func (s *testSuiteJoinSerial) TestIssue18572_1(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) tk.MustExec("drop table if exists t1") tk.MustExec("create table t1(a int, b int, index idx(b));") @@ -2311,7 +2311,7 @@ func (s *testSuite9) TestIssue18572_1(c *C) { c.Assert(rs.Close(), IsNil) } -func (s *testSuite9) TestIssue18572_2(c *C) { +func (s *testSuiteJoinSerial) TestIssue18572_2(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) tk.MustExec("drop table if exists t1") tk.MustExec("create table t1(a int, b int, index idx(b));") @@ -2330,7 +2330,7 @@ func (s *testSuite9) TestIssue18572_2(c *C) { c.Assert(rs.Close(), IsNil) } -func (s *testSuite9) TestIssue18572_3(c *C) { +func (s *testSuiteJoinSerial) TestIssue18572_3(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) tk.MustExec("drop table if exists t1") tk.MustExec("create table t1(a int, b int, index idx(b));") diff --git a/executor/seqtest/seq_executor_test.go b/executor/seqtest/seq_executor_test.go index fa85f36adba38..46b04af26d131 100644 --- a/executor/seqtest/seq_executor_test.go +++ b/executor/seqtest/seq_executor_test.go @@ -848,12 +848,10 @@ func HelperTestAdminShowNextID(c *C, s *seqTestSuite, str string) { r.Check(testkit.Rows("test1 tt id 41 AUTO_INCREMENT")) tk.MustExec("drop table tt") - testutil.ConfigTestUtils.SetupAutoRandomTestConfig() - defer testutil.ConfigTestUtils.RestoreAutoRandomTestConfig() tk.MustExec("set @@allow_auto_random_explicit_insert = true") // Test for a table with auto_random primary key. - tk.MustExec("create table t3(id bigint primary key auto_random(5), c int)") + tk.MustExec("create table t3(id bigint primary key clustered auto_random(5), c int)") // Start handle is 1. r = tk.MustQuery(str + " t3 next_row_id") r.Check(testkit.Rows("test1 t3 id 1 AUTO_RANDOM")) @@ -1381,17 +1379,15 @@ func (s *seqTestSuite) TestInsertFromSelectConflictRetryAutoID(c *C) { func (s *seqTestSuite) TestAutoRandIDRetry(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) - testutil.ConfigTestUtils.SetupAutoRandomTestConfig() - defer testutil.ConfigTestUtils.RestoreAutoRandomTestConfig() tk.MustExec("create database if not exists auto_random_retry") tk.MustExec("use auto_random_retry") tk.MustExec("drop table if exists t") - tk.MustExec("create table t (id bigint auto_random(3) primary key)") + tk.MustExec("create table t (id bigint auto_random(3) primary key clustered)") extractMaskedOrderedHandles := func() []int64 { handles, err := ddltestutil.ExtractAllTableHandles(tk.Se, "auto_random_retry", "t") c.Assert(err, IsNil) - return testutil.ConfigTestUtils.MaskSortHandles(handles, 3, mysql.TypeLong) + return testutil.MaskSortHandles(handles, 3, mysql.TypeLong) } tk.MustExec("set @@tidb_disable_txn_auto_retry = 0") @@ -1425,8 +1421,6 @@ func (s *seqTestSuite) TestAutoRandIDRetry(c *C) { func (s *seqTestSuite) TestAutoRandRecoverTable(c *C) { tk := testkit.NewTestKit(c, s.store) - testutil.ConfigTestUtils.SetupAutoRandomTestConfig() - defer testutil.ConfigTestUtils.RestoreAutoRandomTestConfig() tk.MustExec("create database if not exists test_recover") tk.MustExec("use test_recover") tk.MustExec("drop table if exists t_recover_auto_rand") @@ -1462,14 +1456,14 @@ func (s *seqTestSuite) TestAutoRandRecoverTable(c *C) { defer autoid.SetStep(stp) // Check rebase auto_random id. - tk.MustExec("create table t_recover_auto_rand (a bigint auto_random(5) primary key);") + tk.MustExec("create table t_recover_auto_rand (a bigint auto_random(5) primary key clustered);") tk.MustExec("insert into t_recover_auto_rand values (),(),()") tk.MustExec("drop table t_recover_auto_rand") tk.MustExec("recover table t_recover_auto_rand") tk.MustExec("insert into t_recover_auto_rand values (),(),()") hs, err := ddltestutil.ExtractAllTableHandles(tk.Se, "test_recover", "t_recover_auto_rand") c.Assert(err, IsNil) - ordered := testutil.ConfigTestUtils.MaskSortHandles(hs, 5, mysql.TypeLong) + ordered := testutil.MaskSortHandles(hs, 5, mysql.TypeLong) c.Assert(ordered, DeepEquals, []int64{1, 2, 3, autoRandIDStep + 1, autoRandIDStep + 2, autoRandIDStep + 3}) } diff --git a/executor/show_test.go b/executor/show_test.go index 600c47c7c0586..61ec2e5bd1ec1 100644 --- a/executor/show_test.go +++ b/executor/show_test.go @@ -1025,16 +1025,14 @@ func (s *testAutoRandomSuite) TestAutoRandomBase(c *C) { )) } -func (s *testSerialSuite) TestAutoRandomWithLargeSignedShowTableRegions(c *C) { +func (s *testSuite5) TestAutoRandomWithLargeSignedShowTableRegions(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("create database if not exists auto_random_db;") defer tk.MustExec("drop database if exists auto_random_db;") tk.MustExec("use auto_random_db;") tk.MustExec("drop table if exists t;") - testutil.ConfigTestUtils.SetupAutoRandomTestConfig() - defer testutil.ConfigTestUtils.RestoreAutoRandomTestConfig() - tk.MustExec("create table t (a bigint unsigned auto_random primary key);") + tk.MustExec("create table t (a bigint unsigned auto_random primary key clustered);") tk.MustExec("set @@global.tidb_scatter_region=1;") // 18446744073709541615 is MaxUint64 - 10000. // 18446744073709551615 is the MaxUint64. diff --git a/expression/integration_test.go b/expression/integration_test.go index f16efa2c1095e..ad22742a891ad 100644 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -7946,7 +7946,7 @@ func (s *testIntegrationSerialSuite) TestClusteredIndexAndNewCollationIndexEncod tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") tk.MustExec("drop table if exists t") - tk.MustExec("set @@tidb_enable_clustered_index=1;") + tk.Se.GetSessionVars().EnableClusteredIndex = true tk.MustExec("create table t(a int, b char(10) collate utf8mb4_bin, c char(10) collate utf8mb4_general_ci," + "d varchar(10) collate utf8mb4_bin, e varchar(10) collate utf8mb4_general_ci, f char(10) collate utf8mb4_unicode_ci, g varchar(10) collate utf8mb4_unicode_ci, " + "primary key(a, b, c, d, e, f, g), key a(a), unique key ua(a), key b(b), unique key ub(b), key c(c), unique key uc(c)," + @@ -8919,9 +8919,8 @@ func (s *testIntegrationSuite) TestClusteredIndexCorCol(c *C) { // For issue 23076 tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_clustered_index=1;") tk.MustExec("drop table if exists t1, t2;") - tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int, c_str) , key(c_int) );") + tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int, c_str) clustered, key(c_int) );") tk.MustExec("create table t2 like t1 ;") tk.MustExec("insert into t1 values (1, 'crazy lumiere'), (10, 'goofy mestorf');") tk.MustExec("insert into t2 select * from t1 ;") diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index a952eff93db66..3858aece8d96b 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -641,13 +641,10 @@ func (s *testTableSuite) TestTableRowIDShardingInfo(c *C) { testFunc("performance_schema", nil) testFunc("uucc", "NOT_SHARDED") - testutil.ConfigTestUtils.SetupAutoRandomTestConfig() - defer testutil.ConfigTestUtils.RestoreAutoRandomTestConfig() - - tk.MustExec("CREATE TABLE `sharding_info_test_db`.`t4` (a bigint key auto_random)") + tk.MustExec("CREATE TABLE `sharding_info_test_db`.`t4` (a bigint key clustered auto_random)") assertShardingInfo("t4", "PK_AUTO_RANDOM_BITS=5") - tk.MustExec("CREATE TABLE `sharding_info_test_db`.`t5` (a bigint key auto_random(1))") + tk.MustExec("CREATE TABLE `sharding_info_test_db`.`t5` (a bigint key clustered auto_random(1))") assertShardingInfo("t5", "PK_AUTO_RANDOM_BITS=1") tk.MustExec("DROP DATABASE `sharding_info_test_db`") diff --git a/meta/autoid/errors.go b/meta/autoid/errors.go index ad0b1bcf5d12b..dbc279c0641a2 100644 --- a/meta/autoid/errors.go +++ b/meta/autoid/errors.go @@ -29,8 +29,8 @@ var ( ) const ( - // AutoRandomPKisNotHandleErrMsg indicates the auto_random column attribute is defined on a non-primary key column, or the table's primary key is not a single integer column. - AutoRandomPKisNotHandleErrMsg = "column %s is not the integer primary key, or table is created with alter-primary-key enabled" + // AutoRandomPKisNotHandleErrMsg indicates the auto_random column attribute is defined on a non-primary key column, or the primary key is nonclustered. + AutoRandomPKisNotHandleErrMsg = "column %s is not the integer primary key, or the primary key is nonclustered" // AutoRandomIncompatibleWithAutoIncErrMsg is reported when auto_random and auto_increment are specified on the same column. AutoRandomIncompatibleWithAutoIncErrMsg = "auto_random is incompatible with auto_increment" // AutoRandomIncompatibleWithDefaultValueErrMsg is reported when auto_random and default are specified on the same column. diff --git a/session/clustered_index_test.go b/session/clustered_index_test.go index 7e0479e22cce7..f9336b9f4991b 100644 --- a/session/clustered_index_test.go +++ b/session/clustered_index_test.go @@ -15,7 +15,6 @@ package session_test import ( . "github.com/pingcap/check" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util/collate" @@ -417,44 +416,52 @@ func (s *testClusteredSuite) TestClusteredIndexSelectWhereInNull(c *C) { tk.MustQuery("select * from t where a in (null);").Check(testkit.Rows( /* empty result */ )) } -func (s *testClusteredSerialSuite) TestClusteredIndexSyntax(c *C) { +func (s *testClusteredSuite) TestClusteredIndexSyntax(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) const showPKType = `select tidb_pk_type from information_schema.tables where table_schema = 'test' and table_name = 't';` - const nonClustered, clustered = `NON-CLUSTERED`, `CLUSTERED` + const nonClustered, clustered = `NONCLUSTERED`, `CLUSTERED` assertPkType := func(sql string, pkType string) { tk.MustExec("drop table if exists t;") tk.MustExec(sql) tk.MustQuery(showPKType).Check(testkit.Rows(pkType)) } - defer config.RestoreFunc()() - for _, allowAlterPK := range []bool{true, false} { - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = allowAlterPK - }) - // Test single integer column as the primary key. - clusteredDefault := clustered - if allowAlterPK { - clusteredDefault = nonClustered - } - assertPkType("create table t (a int primary key, b int);", clusteredDefault) - assertPkType("create table t (a int, b int, primary key(a) clustered);", clustered) - assertPkType("create table t (a int, b int, primary key(a) /*T![clustered_index] clustered */);", clustered) - assertPkType("create table t (a int, b int, primary key(a) nonclustered);", nonClustered) - assertPkType("create table t (a int, b int, primary key(a) /*T![clustered_index] nonclustered */);", nonClustered) - - // Test for clustered index. - tk.Se.GetSessionVars().EnableClusteredIndex = false - assertPkType("create table t (a int, b varchar(255), primary key(b, a));", nonClustered) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) nonclustered);", nonClustered) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) clustered);", clustered) - tk.Se.GetSessionVars().EnableClusteredIndex = true - assertPkType("create table t (a int, b varchar(255), primary key(b, a));", clusteredDefault) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) nonclustered);", nonClustered) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) /*T![clustered_index] nonclustered */);", nonClustered) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) clustered);", clustered) - assertPkType("create table t (a int, b varchar(255), primary key(b, a) /*T![clustered_index] clustered */);", clustered) - } + // Test single integer column as the primary key. + clusteredDefault := clustered + assertPkType("create table t (a int primary key, b int);", clusteredDefault) + assertPkType("create table t (a int, b int, primary key(a) clustered);", clustered) + assertPkType("create table t (a int, b int, primary key(a) /*T![clustered_index] clustered */);", clustered) + assertPkType("create table t (a int, b int, primary key(a) nonclustered);", nonClustered) + assertPkType("create table t (a int, b int, primary key(a) /*T![clustered_index] nonclustered */);", nonClustered) + + // Test for clustered index. + tk.Se.GetSessionVars().EnableClusteredIndex = false + assertPkType("create table t (a int, b varchar(255), primary key(b, a));", nonClustered) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) nonclustered);", nonClustered) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) clustered);", clustered) + tk.Se.GetSessionVars().EnableClusteredIndex = true + assertPkType("create table t (a int, b varchar(255), primary key(b, a));", clusteredDefault) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) nonclustered);", nonClustered) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) /*T![clustered_index] nonclustered */);", nonClustered) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) clustered);", clustered) + assertPkType("create table t (a int, b varchar(255), primary key(b, a) /*T![clustered_index] clustered */);", clustered) + + tk.MustGetErrCode("create table t (a varchar(255) unique key clustered);", errno.ErrParse) + tk.MustGetErrCode("create table t (a varchar(255), foreign key (a) reference t1(a) clustered);", errno.ErrParse) + tk.MustGetErrCode("create table t (a varchar(255), foreign key (a) clustered reference t1(a));", errno.ErrParse) + tk.MustGetErrCode("create table t (a varchar(255) clustered);", errno.ErrParse) + + errMsg := "[ddl:8200]CLUSTERED/NONCLUSTERED keyword is only supported for primary key" + tk.MustGetErrMsg("create table t (a varchar(255), unique key(a) clustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), unique key(a) nonclustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), unique index(a) clustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), unique index(a) nonclustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), key(a) clustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), key(a) nonclustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), index(a) clustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), index(a) nonclustered);", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), b decimal(5, 4), primary key (a, b) clustered, key (b) clustered)", errMsg) + tk.MustGetErrMsg("create table t (a varchar(255), b decimal(5, 4), primary key (a, b) clustered, key (b) nonclustered)", errMsg) } func (s *testClusteredSerialSuite) TestPrefixClusteredIndexAddIndexAndRecover(c *C) { @@ -478,10 +485,6 @@ func (s *testClusteredSerialSuite) TestPrefixClusteredIndexAddIndexAndRecover(c // https://github.com/pingcap/tidb/issues/23106 func (s *testClusteredSerialSuite) TestClusteredIndexDecodeRestoredDataV5(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = false - }) defer collate.SetNewCollationEnabledForTest(false) collate.SetNewCollationEnabledForTest(true) tk.MustExec("use test") @@ -506,10 +509,6 @@ func (s *testClusteredSerialSuite) TestPrefixedClusteredIndexUniqueKeyWithNewCol defer collate.SetNewCollationEnabledForTest(false) collate.SetNewCollationEnabledForTest(true) tk := testkit.NewTestKitWithInit(c, s.store) - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.AlterPrimaryKey = false - }) tk.MustExec("use test;") tk.Se.GetSessionVars().EnableClusteredIndex = true tk.MustExec("create table t (a text collate utf8mb4_general_ci not null, b int(11) not null, " + diff --git a/sessionctx/binloginfo/binloginfo_test.go b/sessionctx/binloginfo/binloginfo_test.go index a6bb7a758a5e7..03a003543ada6 100644 --- a/sessionctx/binloginfo/binloginfo_test.go +++ b/sessionctx/binloginfo/binloginfo_test.go @@ -268,14 +268,15 @@ func (s *testBinlogSuite) TestBinlog(c *C) { binlog.MutationType_Insert, }) - // Test cannot build clustered index tables when binlog client exists. - tk.MustExec("create table local_clustered_index (c1 varchar(255) primary key clustered);") - warnMsg := "Warning 1105 cannot build clustered index table because the binlog is ON" - tk.MustQuery("show warnings;").Check(testkit.Rows(warnMsg)) + // Cannot create common clustered index table when binlog client exists. + errMsg := "[ddl:8200]Cannot create clustered index table when the binlog is ON" + tk.MustGetErrMsg("create table local_clustered_index (c1 varchar(255) primary key clustered);", errMsg) + // Create int clustered index table when binlog client exists. + tk.MustExec("create table local_clustered_index (c1 bigint primary key clustered);") tk.MustQuery("select tidb_pk_type from information_schema.tables where table_name = 'local_clustered_index' and table_schema = 'test';"). - Check(testkit.Rows("NON-CLUSTERED")) + Check(testkit.Rows("CLUSTERED")) tk.MustExec("drop table if exists local_clustered_index;") - // Test clustered index tables will not write binlog. + // Test common clustered index tables will not write binlog. tk.Se.GetSessionVars().BinlogClient = nil tk.MustExec("create table local_clustered_index (c1 varchar(255) primary key clustered);") tk.MustQuery("select tidb_pk_type from information_schema.tables where table_name = 'local_clustered_index' and table_schema = 'test';"). diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 4c0957adb7558..0a7c4fbb02c79 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -768,7 +768,7 @@ var defaultSysVars = []*SysVar{ {Scope: ScopeSession, Name: TiDBFoundInBinding, Value: BoolToOnOff(DefTiDBFoundInBinding), Type: TypeBool, ReadOnly: true}, {Scope: ScopeSession, Name: TiDBEnableCollectExecutionInfo, Value: BoolToOnOff(DefTiDBEnableCollectExecutionInfo), Type: TypeBool}, {Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowAutoRandExplicitInsert, Value: BoolToOnOff(DefTiDBAllowAutoRandExplicitInsert), Type: TypeBool}, - {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableClusteredIndex, Value: BoolToOnOff(DefTiDBEnableClusteredIndex), Type: TypeBool}, + {Scope: ScopeGlobal, Name: TiDBEnableClusteredIndex, Value: BoolToOnOff(DefTiDBEnableClusteredIndex), Type: TypeBool}, {Scope: ScopeGlobal | ScopeSession, Name: TiDBPartitionPruneMode, Value: string(Static), Type: TypeStr, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) { mode := PartitionPruneMode(normalizedValue).Update() if !mode.Valid() { diff --git a/statistics/selectivity_test.go b/statistics/selectivity_test.go index d5aca3cf8a6e0..f371d2189b0b7 100644 --- a/statistics/selectivity_test.go +++ b/statistics/selectivity_test.go @@ -607,14 +607,13 @@ func (s *testStatsSuite) TestStatsVer2(c *C) { testKit.MustExec("analyze table tprefix with 2 topn, 3 buckets") // test with clustered index - testKit.MustExec("set @@tidb_enable_clustered_index = 1") testKit.MustExec("drop table if exists ct1") - testKit.MustExec("create table ct1 (a int, pk varchar(10), primary key(pk))") + testKit.MustExec("create table ct1 (a int, pk varchar(10), primary key(pk) clustered)") testKit.MustExec("insert into ct1 values (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8')") testKit.MustExec("analyze table ct1 with 2 topn, 3 buckets") testKit.MustExec("drop table if exists ct2") - testKit.MustExec("create table ct2 (a int, b int, c int, primary key(a, b))") + testKit.MustExec("create table ct2 (a int, b int, c int, primary key(a, b) clustered)") testKit.MustExec("insert into ct2 values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6), (7, 7, 7), (8, 8, 8)") testKit.MustExec("analyze table ct2 with 2 topn, 3 buckets") diff --git a/util/testutil/testutil.go b/util/testutil/testutil.go index eb2c9ad565282..d1ecd061a5624 100644 --- a/util/testutil/testutil.go +++ b/util/testutil/testutil.go @@ -32,7 +32,6 @@ import ( "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/parser/mysql" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" @@ -409,34 +408,8 @@ func (t *TestData) GenerateOutputIfNeeded() error { return err } -// ConfigTestUtils contains a set of set-up/restore methods related to config used in tests. -var ConfigTestUtils configTestUtils - -type configTestUtils struct { - autoRandom -} - -type autoRandom struct { - originAlterPrimaryKey bool -} - -// SetupAutoRandomTestConfig set alter-primary-key to false and save its origin values. -// This method should only be used for the tests in SerialSuite. -func (a *autoRandom) SetupAutoRandomTestConfig() { - globalCfg := config.GetGlobalConfig() - a.originAlterPrimaryKey = globalCfg.AlterPrimaryKey - globalCfg.AlterPrimaryKey = false -} - -// RestoreAutoRandomTestConfig restore the values had been saved in SetupTestConfig. -// This method should only be used for the tests in SerialSuite. -func (a *autoRandom) RestoreAutoRandomTestConfig() { - globalCfg := config.GetGlobalConfig() - globalCfg.AlterPrimaryKey = a.originAlterPrimaryKey -} - // MaskSortHandles sorts the handles by lowest (fieldTypeBits - 1 - shardBitsCount) bits. -func (a *autoRandom) MaskSortHandles(handles []int64, shardBitsCount int, fieldType byte) []int64 { +func MaskSortHandles(handles []int64, shardBitsCount int, fieldType byte) []int64 { typeBitsLength := mysql.DefaultLengthOfMysqlTypes[fieldType] * 8 const signBitCount = 1 shiftBitsCount := 64 - typeBitsLength + shardBitsCount + signBitCount From 28c3748496b673e6ed5c9c455c190fac09f5f0ca Mon Sep 17 00:00:00 2001 From: Yuanjia Zhang Date: Thu, 18 Mar 2021 15:35:11 +0800 Subject: [PATCH 21/44] planner: remove some risky cache operations in the plan builder (#23354) --- cmd/explaintest/r/explain_easy.result | 122 +++++++++--------- cmd/explaintest/r/explain_easy_stats.result | 22 ++-- cmd/explaintest/r/select.result | 18 +-- cmd/explaintest/r/subquery.result | 6 +- cmd/explaintest/r/tpch.result | 4 +- executor/executor_test.go | 4 +- executor/testdata/agg_suite_out.json | 12 +- .../testdata/integration_suite_out.json | 64 ++++----- .../cascades/testdata/stringer_suite_out.json | 24 ++-- planner/core/integration_test.go | 8 ++ planner/core/logical_plan_builder.go | 34 +---- planner/core/planbuilder.go | 19 +-- planner/core/testdata/analyze_suite_out.json | 10 +- .../core/testdata/integration_suite_out.json | 6 +- planner/core/testdata/plan_suite_out.json | 4 +- .../testdata/plan_suite_unexported_out.json | 12 +- planner/core/testdata/stats_suite_out.json | 38 +++--- util/ranger/testdata/ranger_suite_out.json | 6 +- 18 files changed, 196 insertions(+), 217 deletions(-) diff --git a/cmd/explaintest/r/explain_easy.result b/cmd/explaintest/r/explain_easy.result index 96f66065effb7..edcda6e95fde0 100644 --- a/cmd/explaintest/r/explain_easy.result +++ b/cmd/explaintest/r/explain_easy.result @@ -90,8 +90,8 @@ Selection 0.33 root gt(test.t1.c2, 1) └─Point_Get 1.00 root table:t1 handle:1 explain format = 'brief' select sum(t1.c1 in (select c1 from t2)) from t1; id estRows task access object operator info -StreamAgg 1.00 root funcs:sum(Column#10)->Column#8 -└─Projection 10000.00 root cast(Column#7, decimal(65,0) BINARY)->Column#10 +StreamAgg 1.00 root funcs:sum(Column#13)->Column#11 +└─Projection 10000.00 root cast(Column#10, decimal(65,0) BINARY)->Column#13 └─HashJoin 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t1.c1, test.t2.c1) ├─IndexReader(Build) 10000.00 root index:IndexFullScan │ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo @@ -108,9 +108,9 @@ HashJoin 9990.00 root inner join, equal:[eq(test.t1.c1, test.t2.c2)] └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo explain format = 'brief' select (select count(1) k from t1 s where s.c1 = t1.c1 having k != 0) from t1; id estRows task access object operator info -Projection 10000.00 root ifnull(Column#7, 0)->Column#7 +Projection 10000.00 root ifnull(Column#10, 0)->Column#10 └─MergeJoin 10000.00 root left outer join, left key:test.t1.c1, right key:test.t1.c1 - ├─Projection(Build) 8000.00 root 1->Column#7, test.t1.c1 + ├─Projection(Build) 8000.00 root 1->Column#10, test.t1.c1 │ └─TableReader 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:s keep order:true, stats:pseudo └─TableReader(Probe) 10000.00 root data:TableFullScan @@ -120,7 +120,7 @@ id estRows task access object operator info MemTableScan 10000.00 root table:COLUMNS explain format = 'brief' select c2 = (select c2 from t2 where t1.c1 = t2.c1 order by c1 limit 1) from t1; id estRows task access object operator info -Projection 10000.00 root eq(test.t1.c2, test.t2.c2)->Column#8 +Projection 10000.00 root eq(test.t1.c2, test.t2.c2)->Column#11 └─Apply 10000.00 root CARTESIAN left outer join ├─TableReader(Build) 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo @@ -222,8 +222,8 @@ StreamAgg 1.00 root funcs:count(1)->Column#5 set @@session.tidb_opt_insubq_to_join_and_agg=0; explain format = 'brief' select sum(t1.c1 in (select c1 from t2)) from t1; id estRows task access object operator info -StreamAgg 1.00 root funcs:sum(Column#10)->Column#8 -└─Projection 10000.00 root cast(Column#7, decimal(65,0) BINARY)->Column#10 +StreamAgg 1.00 root funcs:sum(Column#13)->Column#11 +└─Projection 10000.00 root cast(Column#10, decimal(65,0) BINARY)->Column#13 └─HashJoin 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t1.c1, test.t2.c1) ├─IndexReader(Build) 10000.00 root index:IndexFullScan │ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo @@ -238,8 +238,8 @@ HashJoin 10000.00 root CARTESIAN left outer semi join, other cond:eq(1, test.t2 └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo explain format = 'brief' select sum(6 in (select c2 from t2)) from t1; id estRows task access object operator info -StreamAgg 1.00 root funcs:sum(Column#10)->Column#8 -└─Projection 10000.00 root cast(Column#7, decimal(65,0) BINARY)->Column#10 +StreamAgg 1.00 root funcs:sum(Column#13)->Column#11 +└─Projection 10000.00 root cast(Column#10, decimal(65,0) BINARY)->Column#13 └─HashJoin 10000.00 root CARTESIAN left outer semi join, other cond:eq(6, test.t2.c2) ├─TableReader(Build) 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo @@ -248,57 +248,57 @@ StreamAgg 1.00 root funcs:sum(Column#10)->Column#8 explain format="dot" select sum(t1.c1 in (select c1 from t2)) from t1; dot contents -digraph StreamAgg_9 { -subgraph cluster9{ +digraph StreamAgg_10 { +subgraph cluster10{ node [style=filled, color=lightgrey] color=black label = "root" -"StreamAgg_9" -> "Projection_20" -"Projection_20" -> "HashJoin_19" -"HashJoin_19" -> "TableReader_12" -"HashJoin_19" -> "IndexReader_18" +"StreamAgg_10" -> "Projection_21" +"Projection_21" -> "HashJoin_20" +"HashJoin_20" -> "TableReader_13" +"HashJoin_20" -> "IndexReader_19" } -subgraph cluster11{ +subgraph cluster12{ node [style=filled, color=lightgrey] color=black label = "cop" -"TableFullScan_11" +"TableFullScan_12" } -subgraph cluster17{ +subgraph cluster18{ node [style=filled, color=lightgrey] color=black label = "cop" -"IndexFullScan_17" +"IndexFullScan_18" } -"TableReader_12" -> "TableFullScan_11" -"IndexReader_18" -> "IndexFullScan_17" +"TableReader_13" -> "TableFullScan_12" +"IndexReader_19" -> "IndexFullScan_18" } explain format="dot" select 1 in (select c2 from t2) from t1; dot contents -digraph HashJoin_7 { -subgraph cluster7{ +digraph HashJoin_8 { +subgraph cluster8{ node [style=filled, color=lightgrey] color=black label = "root" -"HashJoin_7" -> "TableReader_9" -"HashJoin_7" -> "TableReader_13" +"HashJoin_8" -> "TableReader_10" +"HashJoin_8" -> "TableReader_14" } -subgraph cluster8{ +subgraph cluster9{ node [style=filled, color=lightgrey] color=black label = "cop" -"TableFullScan_8" +"TableFullScan_9" } -subgraph cluster12{ +subgraph cluster13{ node [style=filled, color=lightgrey] color=black label = "cop" -"TableFullScan_12" +"TableFullScan_13" } -"TableReader_9" -> "TableFullScan_8" -"TableReader_13" -> "TableFullScan_12" +"TableReader_10" -> "TableFullScan_9" +"TableReader_14" -> "TableFullScan_13" } drop table if exists t1, t2, t3, t4; @@ -306,11 +306,11 @@ drop table if exists t; create table t(a int primary key, b int, c int, index idx(b)); explain format = 'brief' select t.c in (select count(*) from t s ignore index(idx), t t1 where s.a = t.a and s.a = t1.a) from t; id estRows task access object operator info -Projection 10000.00 root Column#11 -└─Apply 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10) +Projection 10000.00 root Column#17 +└─Apply 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#16) ├─TableReader(Build) 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo - └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#10 + └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#16 └─MergeJoin 12.50 root inner join, left key:test.t.a, right key:test.t.a ├─TableReader(Build) 1.00 root data:TableRangeScan │ └─TableRangeScan 1.00 cop[tikv] table:t1 range: decided by [eq(test.t.a, test.t.a)], keep order:true, stats:pseudo @@ -318,11 +318,11 @@ Projection 10000.00 root Column#11 └─TableRangeScan 1.00 cop[tikv] table:s range: decided by [eq(test.t.a, test.t.a)], keep order:true, stats:pseudo explain format = 'brief' select t.c in (select count(*) from t s use index(idx), t t1 where s.b = t.a and s.a = t1.a) from t; id estRows task access object operator info -Projection 10000.00 root Column#11 -└─Apply 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10) +Projection 10000.00 root Column#17 +└─Apply 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#16) ├─TableReader(Build) 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo - └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#10 + └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#16 └─IndexJoin 12.50 root inner join, inner:TableReader, outer key:test.t.a, inner key:test.t.a, equal cond:eq(test.t.a, test.t.a) ├─IndexReader(Build) 10.00 root index:IndexRangeScan │ └─IndexRangeScan 10.00 cop[tikv] table:s, index:idx(b) range: decided by [eq(test.t.b, test.t.a)], keep order:false, stats:pseudo @@ -330,11 +330,11 @@ Projection 10000.00 root Column#11 └─TableRangeScan 1.00 cop[tikv] table:t1 range: decided by [test.t.a], keep order:false, stats:pseudo explain format = 'brief' select t.c in (select count(*) from t s use index(idx), t t1 where s.b = t.a and s.c = t1.a) from t; id estRows task access object operator info -Projection 10000.00 root Column#11 -└─Apply 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10) +Projection 10000.00 root Column#17 +└─Apply 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#16) ├─TableReader(Build) 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo - └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#10 + └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#16 └─IndexJoin 12.49 root inner join, inner:TableReader, outer key:test.t.c, inner key:test.t.a, equal cond:eq(test.t.c, test.t.a) ├─IndexLookUp(Build) 9.99 root │ ├─IndexRangeScan(Build) 10.00 cop[tikv] table:s, index:idx(b) range: decided by [eq(test.t.b, test.t.a)], keep order:false, stats:pseudo @@ -346,11 +346,11 @@ insert into t values(1, 1, 1), (2, 2 ,2), (3, 3, 3), (4, 3, 4),(5,3,5); analyze table t; explain format = 'brief' select t.c in (select count(*) from t s, t t1 where s.b = t.a and s.b = 3 and s.a = t1.a) from t; id estRows task access object operator info -Projection 5.00 root Column#11 -└─Apply 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10) +Projection 5.00 root Column#17 +└─Apply 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#16) ├─TableReader(Build) 5.00 root data:TableFullScan │ └─TableFullScan 5.00 cop[tikv] table:t keep order:false - └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#10 + └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#16 └─MergeJoin 2.40 root inner join, left key:test.t.a, right key:test.t.a ├─TableReader(Build) 4.00 root data:Selection │ └─Selection 4.00 cop[tikv] eq(3, test.t.a) @@ -360,11 +360,11 @@ Projection 5.00 root Column#11 └─IndexRangeScan 3.00 cop[tikv] table:s, index:idx(b) range:[3,3], keep order:true explain format = 'brief' select t.c in (select count(*) from t s left join t t1 on s.a = t1.a where 3 = t.a and s.b = 3) from t; id estRows task access object operator info -Projection 5.00 root Column#11 -└─Apply 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10) +Projection 5.00 root Column#17 +└─Apply 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#16) ├─TableReader(Build) 5.00 root data:TableFullScan │ └─TableFullScan 5.00 cop[tikv] table:t keep order:false - └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#10 + └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#16 └─MergeJoin 2.40 root left outer join, left key:test.t.a, right key:test.t.a ├─TableReader(Build) 4.00 root data:Selection │ └─Selection 4.00 cop[tikv] eq(3, test.t.a) @@ -374,11 +374,11 @@ Projection 5.00 root Column#11 └─IndexRangeScan 3.00 cop[tikv] table:s, index:idx(b) range:[3,3], keep order:true explain format = 'brief' select t.c in (select count(*) from t s right join t t1 on s.a = t1.a where 3 = t.a and t1.b = 3) from t; id estRows task access object operator info -Projection 5.00 root Column#11 -└─Apply 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#10) +Projection 5.00 root Column#17 +└─Apply 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#16) ├─TableReader(Build) 5.00 root data:TableFullScan │ └─TableFullScan 5.00 cop[tikv] table:t keep order:false - └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#10 + └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#16 └─MergeJoin 2.40 root right outer join, left key:test.t.a, right key:test.t.a ├─TableReader(Build) 4.00 root data:Selection │ └─Selection 4.00 cop[tikv] eq(3, test.t.a) @@ -579,15 +579,15 @@ HashJoin 2773.61 root inner join, equal:[eq(test.t.nb, test.t.nb)] └─TableFullScan 10000.00 cop[tikv] table:tb keep order:false, stats:pseudo explain format = 'brief' select ifnull(t.nc, 1) in (select count(*) from t s , t t1 where s.a = t.a and s.a = t1.a) from t; id estRows task access object operator info -Projection 10000.00 root Column#14 -└─Apply 10000.00 root left outer semi join, equal:[eq(test.t.nc, Column#13)] +Projection 10000.00 root Column#22 +└─Apply 10000.00 root left outer semi join, equal:[eq(test.t.nc, Column#21)] ├─TableReader(Build) 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo - └─HashAgg(Probe) 1.00 root funcs:count(Column#15)->Column#13 + └─HashAgg(Probe) 1.00 root funcs:count(Column#23)->Column#21 └─HashJoin 9.99 root inner join, equal:[eq(test.t.a, test.t.a)] - ├─HashAgg(Build) 7.99 root group by:test.t.a, funcs:count(Column#16)->Column#15, funcs:firstrow(test.t.a)->test.t.a + ├─HashAgg(Build) 7.99 root group by:test.t.a, funcs:count(Column#24)->Column#23, funcs:firstrow(test.t.a)->test.t.a │ └─TableReader 7.99 root data:HashAgg - │ └─HashAgg 7.99 cop[tikv] group by:test.t.a, funcs:count(1)->Column#16 + │ └─HashAgg 7.99 cop[tikv] group by:test.t.a, funcs:count(1)->Column#24 │ └─Selection 9.99 cop[tikv] eq(test.t.a, test.t.a), not(isnull(test.t.a)) │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo └─TableReader(Probe) 9.99 root data:Selection @@ -612,16 +612,16 @@ HashJoin 8002.00 root right outer join, equal:[eq(test.t.nb, test.t.nb)] └─TableFullScan 10000.00 cop[tikv] table:tb keep order:false, stats:pseudo explain format = 'brief' select ifnull(t.a, 1) in (select count(*) from t s , t t1 where s.a = t.a and s.a = t1.a) from t; id estRows task access object operator info -Projection 10000.00 root Column#14 -└─Apply 10000.00 root left outer semi join, equal:[eq(Column#15, Column#13)] - ├─Projection(Build) 10000.00 root test.t.a, ifnull(test.t.a, 1)->Column#15 +Projection 10000.00 root Column#22 +└─Apply 10000.00 root left outer semi join, equal:[eq(Column#23, Column#21)] + ├─Projection(Build) 10000.00 root test.t.a, ifnull(test.t.a, 1)->Column#23 │ └─TableReader 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo - └─HashAgg(Probe) 1.00 root funcs:count(Column#17)->Column#13 + └─HashAgg(Probe) 1.00 root funcs:count(Column#25)->Column#21 └─HashJoin 9.99 root inner join, equal:[eq(test.t.a, test.t.a)] - ├─HashAgg(Build) 7.99 root group by:test.t.a, funcs:count(Column#18)->Column#17, funcs:firstrow(test.t.a)->test.t.a + ├─HashAgg(Build) 7.99 root group by:test.t.a, funcs:count(Column#26)->Column#25, funcs:firstrow(test.t.a)->test.t.a │ └─TableReader 7.99 root data:HashAgg - │ └─HashAgg 7.99 cop[tikv] group by:test.t.a, funcs:count(1)->Column#18 + │ └─HashAgg 7.99 cop[tikv] group by:test.t.a, funcs:count(1)->Column#26 │ └─Selection 9.99 cop[tikv] eq(test.t.a, test.t.a), not(isnull(test.t.a)) │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo └─TableReader(Probe) 9.99 root data:Selection @@ -788,7 +788,7 @@ drop table if exists t; create table t(a int, b int); explain format = 'brief' select (select count(n.a) from t) from t n; id estRows task access object operator info -Projection 1.00 root Column#8 +Projection 1.00 root Column#11 └─Apply 1.00 root CARTESIAN left outer join ├─StreamAgg(Build) 1.00 root funcs:count(test.t.a)->Column#7 │ └─TableReader 10000.00 root data:TableFullScan diff --git a/cmd/explaintest/r/explain_easy_stats.result b/cmd/explaintest/r/explain_easy_stats.result index 92eee10bfc47c..d31bcf4f4174f 100644 --- a/cmd/explaintest/r/explain_easy_stats.result +++ b/cmd/explaintest/r/explain_easy_stats.result @@ -102,7 +102,7 @@ id estRows task access object operator info MemTableScan 10000.00 root table:COLUMNS explain format = 'brief' select c2 = (select c2 from t2 where t1.c1 = t2.c1 order by c1 limit 1) from t1; id estRows task access object operator info -Projection 1999.00 root eq(test.t1.c2, test.t2.c2)->Column#8 +Projection 1999.00 root eq(test.t1.c2, test.t2.c2)->Column#11 └─Apply 1999.00 root CARTESIAN left outer join ├─TableReader(Build) 1999.00 root data:TableFullScan │ └─TableFullScan 1999.00 cop[tikv] table:t1 keep order:false @@ -128,28 +128,28 @@ HashJoin 1999.00 root CARTESIAN left outer semi join, other cond:eq(1, test.t2. explain format="dot" select 1 in (select c2 from t2) from t1; dot contents -digraph HashJoin_7 { -subgraph cluster7{ +digraph HashJoin_8 { +subgraph cluster8{ node [style=filled, color=lightgrey] color=black label = "root" -"HashJoin_7" -> "TableReader_9" -"HashJoin_7" -> "TableReader_13" +"HashJoin_8" -> "TableReader_10" +"HashJoin_8" -> "TableReader_14" } -subgraph cluster8{ +subgraph cluster9{ node [style=filled, color=lightgrey] color=black label = "cop" -"TableFullScan_8" +"TableFullScan_9" } -subgraph cluster12{ +subgraph cluster13{ node [style=filled, color=lightgrey] color=black label = "cop" -"TableFullScan_12" +"TableFullScan_13" } -"TableReader_9" -> "TableFullScan_8" -"TableReader_13" -> "TableFullScan_12" +"TableReader_10" -> "TableFullScan_9" +"TableReader_14" -> "TableFullScan_13" } explain format = 'brief' select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 1; diff --git a/cmd/explaintest/r/select.result b/cmd/explaintest/r/select.result index d4133abd647fc..0836e2461a2a3 100644 --- a/cmd/explaintest/r/select.result +++ b/cmd/explaintest/r/select.result @@ -374,20 +374,20 @@ drop table if exists t; create table t(a int, b int); explain format = 'brief' select a != any (select a from t t2) from t t1; id estRows task access object operator info -Projection 10000.00 root and(or(or(gt(Column#8, 1), ne(test.t.a, Column#7)), if(ne(Column#9, 0), , 0)), and(ne(Column#10, 0), if(isnull(test.t.a), , 1)))->Column#11 +Projection 10000.00 root and(or(or(gt(Column#11, 1), ne(test.t.a, Column#10)), if(ne(Column#12, 0), , 0)), and(ne(Column#13, 0), if(isnull(test.t.a), , 1)))->Column#14 └─HashJoin 10000.00 root CARTESIAN inner join - ├─StreamAgg(Build) 1.00 root funcs:max(Column#13)->Column#7, funcs:count(distinct Column#14)->Column#8, funcs:sum(Column#15)->Column#9, funcs:count(1)->Column#10 - │ └─Projection 10000.00 root test.t.a, test.t.a, cast(isnull(test.t.a), decimal(22,0) BINARY)->Column#15 + ├─StreamAgg(Build) 1.00 root funcs:max(Column#16)->Column#10, funcs:count(distinct Column#17)->Column#11, funcs:sum(Column#18)->Column#12, funcs:count(1)->Column#13 + │ └─Projection 10000.00 root test.t.a, test.t.a, cast(isnull(test.t.a), decimal(22,0) BINARY)->Column#18 │ └─TableReader 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo └─TableReader(Probe) 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo explain format = 'brief' select a = all (select a from t t2) from t t1; id estRows task access object operator info -Projection 10000.00 root or(and(and(le(Column#8, 1), eq(test.t.a, Column#7)), if(ne(Column#9, 0), , 1)), or(eq(Column#10, 0), if(isnull(test.t.a), , 0)))->Column#11 +Projection 10000.00 root or(and(and(le(Column#11, 1), eq(test.t.a, Column#10)), if(ne(Column#12, 0), , 1)), or(eq(Column#13, 0), if(isnull(test.t.a), , 0)))->Column#14 └─HashJoin 10000.00 root CARTESIAN inner join - ├─StreamAgg(Build) 1.00 root funcs:firstrow(Column#13)->Column#7, funcs:count(distinct Column#14)->Column#8, funcs:sum(Column#15)->Column#9, funcs:count(1)->Column#10 - │ └─Projection 10000.00 root test.t.a, test.t.a, cast(isnull(test.t.a), decimal(22,0) BINARY)->Column#15 + ├─StreamAgg(Build) 1.00 root funcs:firstrow(Column#16)->Column#10, funcs:count(distinct Column#17)->Column#11, funcs:sum(Column#18)->Column#12, funcs:count(1)->Column#13 + │ └─Projection 10000.00 root test.t.a, test.t.a, cast(isnull(test.t.a), decimal(22,0) BINARY)->Column#18 │ └─TableReader 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo └─TableReader(Probe) 10000.00 root data:TableFullScan @@ -398,11 +398,11 @@ drop table if exists s; create table s(a varchar(20), b varchar(20)); explain format = 'brief' select a in (select a from s where s.b = t.b) from t; id estRows task access object operator info -HashJoin 10000.00 root left outer semi join, equal:[eq(Column#8, Column#9)], other cond:eq(cast(test.t.a, double BINARY), cast(test.s.a, double BINARY)) -├─Projection(Build) 10000.00 root test.s.a, cast(test.s.b, double BINARY)->Column#9 +HashJoin 10000.00 root left outer semi join, equal:[eq(Column#11, Column#12)], other cond:eq(cast(test.t.a, double BINARY), cast(test.s.a, double BINARY)) +├─Projection(Build) 10000.00 root test.s.a, cast(test.s.b, double BINARY)->Column#12 │ └─TableReader 10000.00 root data:TableFullScan │ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo -└─Projection(Probe) 10000.00 root test.t.a, cast(test.t.b, double BINARY)->Column#8 +└─Projection(Probe) 10000.00 root test.t.a, cast(test.t.b, double BINARY)->Column#11 └─TableReader 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo explain format = 'brief' select a in (select a+b from t t2 where t2.b = t1.b) from t t1; diff --git a/cmd/explaintest/r/subquery.result b/cmd/explaintest/r/subquery.result index 50c83dc1800f6..0b054d45ba2a3 100644 --- a/cmd/explaintest/r/subquery.result +++ b/cmd/explaintest/r/subquery.result @@ -17,11 +17,11 @@ insert into t values(1,1,1,1),(2,2,2,2),(3,2,2,2),(4,2,2,2),(5,2,2,2); analyze table t; explain format = 'brief' select t.c in (select count(*) from t s use index(idx), t t1 where s.b = 1 and s.c = 1 and s.d = t.a and s.a = t1.a) from t; id estRows task access object operator info -Projection 5.00 root Column#14 -└─Apply 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#13) +Projection 5.00 root Column#22 +└─Apply 5.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#21) ├─TableReader(Build) 5.00 root data:TableFullScan │ └─TableFullScan 5.00 cop[tikv] table:t keep order:false - └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#13 + └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#21 └─IndexJoin 0.50 root inner join, inner:TableReader, outer key:test.t.a, inner key:test.t.a, equal cond:eq(test.t.a, test.t.a) ├─IndexReader(Build) 1.00 root index:IndexRangeScan │ └─IndexRangeScan 1.00 cop[tikv] table:s, index:idx(b, c, d) range: decided by [eq(test.t.b, 1) eq(test.t.c, 1) eq(test.t.d, test.t.a)], keep order:false diff --git a/cmd/explaintest/r/tpch.result b/cmd/explaintest/r/tpch.result index addf03e16b5b8..319713d946aa2 100644 --- a/cmd/explaintest/r/tpch.result +++ b/cmd/explaintest/r/tpch.result @@ -719,8 +719,8 @@ id estRows task access object operator info Projection 1304801.67 root tpch.partsupp.ps_partkey, Column#35 └─Sort 1304801.67 root Column#35:desc └─Selection 1304801.67 root gt(Column#35, NULL) - └─HashAgg 1631002.09 root group by:Column#44, funcs:sum(Column#42)->Column#35, funcs:firstrow(Column#43)->tpch.partsupp.ps_partkey - └─Projection 1631002.09 root mul(tpch.partsupp.ps_supplycost, cast(tpch.partsupp.ps_availqty, decimal(20,0) BINARY))->Column#42, tpch.partsupp.ps_partkey, tpch.partsupp.ps_partkey + └─HashAgg 1631002.09 root group by:Column#61, funcs:sum(Column#59)->Column#35, funcs:firstrow(Column#60)->tpch.partsupp.ps_partkey + └─Projection 1631002.09 root mul(tpch.partsupp.ps_supplycost, cast(tpch.partsupp.ps_availqty, decimal(20,0) BINARY))->Column#59, tpch.partsupp.ps_partkey, tpch.partsupp.ps_partkey └─HashJoin 1631002.09 root inner join, equal:[eq(tpch.supplier.s_suppkey, tpch.partsupp.ps_suppkey)] ├─HashJoin(Build) 20000.00 root inner join, equal:[eq(tpch.nation.n_nationkey, tpch.supplier.s_nationkey)] │ ├─TableReader(Build) 1.00 root data:Selection diff --git a/executor/executor_test.go b/executor/executor_test.go index 51cf16173365b..027255952060e 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -6643,7 +6643,7 @@ func (s *testSuiteP2) TestApplyCache(c *C) { tk.MustExec("insert into t values (1),(1),(1),(1),(1),(1),(1),(1),(1);") tk.MustExec("analyze table t;") result := tk.MustQuery("explain analyze SELECT count(1) FROM (SELECT (SELECT min(a) FROM t as t2 WHERE t2.a > t1.a) AS a from t as t1) t;") - c.Assert(result.Rows()[1][0], Equals, "└─Apply_38") + c.Assert(result.Rows()[1][0], Equals, "└─Apply_39") var ( ind int flag bool @@ -6663,7 +6663,7 @@ func (s *testSuiteP2) TestApplyCache(c *C) { tk.MustExec("insert into t values (1),(2),(3),(4),(5),(6),(7),(8),(9);") tk.MustExec("analyze table t;") result = tk.MustQuery("explain analyze SELECT count(1) FROM (SELECT (SELECT min(a) FROM t as t2 WHERE t2.a > t1.a) AS a from t as t1) t;") - c.Assert(result.Rows()[1][0], Equals, "└─Apply_38") + c.Assert(result.Rows()[1][0], Equals, "└─Apply_39") flag = false value = (result.Rows()[1][5]).(string) for ind = 0; ind < len(value)-5; ind++ { diff --git a/executor/testdata/agg_suite_out.json b/executor/testdata/agg_suite_out.json index 325f9dd48972c..abfb0f9d102ef 100644 --- a/executor/testdata/agg_suite_out.json +++ b/executor/testdata/agg_suite_out.json @@ -49,27 +49,27 @@ "Name": "TestIssue12759HashAggCalledByApply", "Cases": [ [ - "Projection 1.00 root Column#9, Column#10, Column#11, Column#12", + "Projection 1.00 root Column#9, Column#12, Column#15, Column#18", "└─Apply 1.00 root CARTESIAN left outer join", " ├─Apply(Build) 1.00 root CARTESIAN left outer join", " │ ├─Apply(Build) 1.00 root CARTESIAN left outer join", - " │ │ ├─HashAgg(Build) 1.00 root funcs:sum(Column#22)->Column#9, funcs:firstrow(Column#23)->test.test.a", - " │ │ │ └─Projection 10000.00 root cast(test.test.a, decimal(32,0) BINARY)->Column#22, test.test.a", + " │ │ ├─HashAgg(Build) 1.00 root funcs:sum(Column#28)->Column#9, funcs:firstrow(Column#29)->test.test.a", + " │ │ │ └─Projection 10000.00 root cast(test.test.a, decimal(32,0) BINARY)->Column#28, test.test.a", " │ │ │ └─TableReader 10000.00 root data:TableFullScan", " │ │ │ └─TableFullScan 10000.00 cop[tikv] table:tt keep order:false, stats:pseudo", - " │ │ └─Projection(Probe) 1.00 root ->Column#10", + " │ │ └─Projection(Probe) 1.00 root ->Column#12", " │ │ └─Limit 1.00 root offset:0, count:1", " │ │ └─TableReader 1.00 root data:Limit", " │ │ └─Limit 1.00 cop[tikv] offset:0, count:1", " │ │ └─Selection 1.00 cop[tikv] eq(test.test.a, test.test.a)", " │ │ └─TableFullScan 1000.00 cop[tikv] table:test keep order:false, stats:pseudo", - " │ └─Projection(Probe) 1.00 root ->Column#11", + " │ └─Projection(Probe) 1.00 root ->Column#15", " │ └─Limit 1.00 root offset:0, count:1", " │ └─TableReader 1.00 root data:Limit", " │ └─Limit 1.00 cop[tikv] offset:0, count:1", " │ └─Selection 1.00 cop[tikv] eq(test.test.a, test.test.a)", " │ └─TableFullScan 1000.00 cop[tikv] table:test keep order:false, stats:pseudo", - " └─Projection(Probe) 1.00 root ->Column#12", + " └─Projection(Probe) 1.00 root ->Column#18", " └─Limit 1.00 root offset:0, count:1", " └─TableReader 1.00 root data:Limit", " └─Limit 1.00 cop[tikv] offset:0, count:1", diff --git a/planner/cascades/testdata/integration_suite_out.json b/planner/cascades/testdata/integration_suite_out.json index f7ac52e543f2b..cd3123e3ee0be 100644 --- a/planner/cascades/testdata/integration_suite_out.json +++ b/planner/cascades/testdata/integration_suite_out.json @@ -1026,17 +1026,17 @@ { "SQL": "select a = (select a from t2 where t1.b = t2.b order by a limit 1) from t1", "Plan": [ - "Projection_18 10000.00 root eq(test.t1.a, test.t2.a)->Column#5", - "└─Apply_20 10000.00 root CARTESIAN left outer join", - " ├─TableReader_21(Build) 10000.00 root data:TableFullScan_22", - " │ └─TableFullScan_22 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - " └─MaxOneRow_23(Probe) 1.00 root ", - " └─Projection_24 1.00 root test.t2.a", - " └─Limit_26 1.00 root offset:0, count:1", - " └─TableReader_34 1.00 root data:Limit_35", - " └─Limit_35 1.00 cop[tikv] offset:0, count:1", - " └─Selection_32 1.00 cop[tikv] eq(test.t1.b, test.t2.b)", - " └─TableFullScan_33 1.00 cop[tikv] table:t2 keep order:true, stats:pseudo" + "Projection_19 10000.00 root eq(test.t1.a, test.t2.a)->Column#7", + "└─Apply_21 10000.00 root CARTESIAN left outer join", + " ├─TableReader_22(Build) 10000.00 root data:TableFullScan_23", + " │ └─TableFullScan_23 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─MaxOneRow_24(Probe) 1.00 root ", + " └─Projection_25 1.00 root test.t2.a", + " └─Limit_27 1.00 root offset:0, count:1", + " └─TableReader_35 1.00 root data:Limit_36", + " └─Limit_36 1.00 cop[tikv] offset:0, count:1", + " └─Selection_33 1.00 cop[tikv] eq(test.t1.b, test.t2.b)", + " └─TableFullScan_34 1.00 cop[tikv] table:t2 keep order:true, stats:pseudo" ], "Result": [ "1", @@ -1048,25 +1048,25 @@ { "SQL": "select sum(a), (select t1.a from t1 where t1.a = t2.a limit 1), (select t1.b from t1 where t1.b = t2.b limit 1) from t2", "Plan": [ - "Projection_30 1.00 root Column#7, test.t1.a, test.t1.b", - "└─Apply_32 1.00 root CARTESIAN left outer join", - " ├─Apply_34(Build) 1.00 root CARTESIAN left outer join", - " │ ├─HashAgg_39(Build) 1.00 root funcs:sum(Column#8)->Column#7, funcs:firstrow(Column#9)->test.t2.a, funcs:firstrow(Column#10)->test.t2.b", - " │ │ └─TableReader_40 1.00 root data:HashAgg_41", - " │ │ └─HashAgg_41 1.00 cop[tikv] funcs:sum(test.t2.a)->Column#8, funcs:firstrow(test.t2.a)->Column#9, funcs:firstrow(test.t2.b)->Column#10", - " │ │ └─TableFullScan_38 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - " │ └─MaxOneRow_42(Probe) 1.00 root ", - " │ └─Limit_43 1.00 root offset:0, count:1", - " │ └─TableReader_44 1.00 root data:Limit_45", - " │ └─Limit_45 1.00 cop[tikv] offset:0, count:1", - " │ └─Selection_46 1.00 cop[tikv] eq(test.t1.a, test.t2.a)", - " │ └─TableFullScan_47 1.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - " └─MaxOneRow_48(Probe) 1.00 root ", - " └─Limit_49 1.00 root offset:0, count:1", - " └─TableReader_50 1.00 root data:Limit_51", - " └─Limit_51 1.00 cop[tikv] offset:0, count:1", - " └─Selection_52 1.00 cop[tikv] eq(test.t1.b, test.t2.b)", - " └─TableFullScan_53 1.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + "Projection_32 1.00 root Column#7, test.t1.a, test.t1.b", + "└─Apply_34 1.00 root CARTESIAN left outer join", + " ├─Apply_36(Build) 1.00 root CARTESIAN left outer join", + " │ ├─HashAgg_41(Build) 1.00 root funcs:sum(Column#12)->Column#7, funcs:firstrow(Column#13)->test.t2.a, funcs:firstrow(Column#14)->test.t2.b", + " │ │ └─TableReader_42 1.00 root data:HashAgg_43", + " │ │ └─HashAgg_43 1.00 cop[tikv] funcs:sum(test.t2.a)->Column#12, funcs:firstrow(test.t2.a)->Column#13, funcs:firstrow(test.t2.b)->Column#14", + " │ │ └─TableFullScan_40 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " │ └─MaxOneRow_44(Probe) 1.00 root ", + " │ └─Limit_45 1.00 root offset:0, count:1", + " │ └─TableReader_46 1.00 root data:Limit_47", + " │ └─Limit_47 1.00 cop[tikv] offset:0, count:1", + " │ └─Selection_48 1.00 cop[tikv] eq(test.t1.a, test.t2.a)", + " │ └─TableFullScan_49 1.00 cop[tikv] table:t1 keep order:false, stats:pseudo", + " └─MaxOneRow_50(Probe) 1.00 root ", + " └─Limit_51 1.00 root offset:0, count:1", + " └─TableReader_52 1.00 root data:Limit_53", + " └─Limit_53 1.00 cop[tikv] offset:0, count:1", + " └─Selection_54 1.00 cop[tikv] eq(test.t1.b, test.t2.b)", + " └─TableFullScan_55 1.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "Result": [ "6 1 11" @@ -1280,7 +1280,7 @@ { "SQL": "select 1 from (select /*+ HASH_JOIN(t1) */ t1.a in (select t2.a from t2) from t1) x;", "Plan": [ - "Projection 10000.00 root 1->Column#8", + "Projection 10000.00 root 1->Column#11", "└─HashJoin 10000.00 root CARTESIAN left outer semi join, other cond:eq(test.t1.a, test.t2.a)", " ├─IndexReader(Build) 10000.00 root index:IndexFullScan", " │ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:idx_a(a) keep order:false, stats:pseudo", @@ -1295,7 +1295,7 @@ { "SQL": "select 1 from (select /*+ HASH_JOIN(t1) */ t1.a not in (select t2.a from t2) from t1) x;", "Plan": [ - "Projection 10000.00 root 1->Column#8", + "Projection 10000.00 root 1->Column#11", "└─HashJoin 10000.00 root CARTESIAN anti left outer semi join, other cond:eq(test.t1.a, test.t2.a)", " ├─IndexReader(Build) 10000.00 root index:IndexFullScan", " │ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:idx_a(a) keep order:false, stats:pseudo", diff --git a/planner/cascades/testdata/stringer_suite_out.json b/planner/cascades/testdata/stringer_suite_out.json index 4a3335854ac88..4890084817ea2 100644 --- a/planner/cascades/testdata/stringer_suite_out.json +++ b/planner/cascades/testdata/stringer_suite_out.json @@ -289,28 +289,28 @@ { "SQL": "select a = (select a from t t2 where t1.b = t2.b order by a limit 1) from t t1", "Result": [ - "Group#0 Schema:[Column#25]", - " Projection_3 input:[Group#1], eq(test.t.a, test.t.a)->Column#25", + "Group#0 Schema:[Column#37]", + " Projection_3 input:[Group#1], eq(test.t.a, test.t.a)->Column#37", "Group#1 Schema:[test.t.a,test.t.b,test.t.a]", - " Apply_9 input:[Group#2,Group#3], left outer join", + " Apply_10 input:[Group#2,Group#3], left outer join", "Group#2 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TiKVSingleGather_11 input:[Group#4], table:t1", + " TiKVSingleGather_12 input:[Group#4], table:t1", "Group#4 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableScan_10 table:t1, pk col:test.t.a", + " TableScan_11 table:t1, pk col:test.t.a", "Group#3 Schema:[test.t.a], UniqueKey:[test.t.a]", - " MaxOneRow_8 input:[Group#5]", + " MaxOneRow_9 input:[Group#5]", "Group#5 Schema:[test.t.a], UniqueKey:[test.t.a]", - " Limit_7 input:[Group#6], offset:0, count:1", + " Limit_8 input:[Group#6], offset:0, count:1", "Group#6 Schema:[test.t.a], UniqueKey:[test.t.a]", - " Sort_6 input:[Group#7], test.t.a", + " Sort_7 input:[Group#7], test.t.a", "Group#7 Schema:[test.t.a], UniqueKey:[test.t.a]", - " Projection_5 input:[Group#8], test.t.a", + " Projection_6 input:[Group#8], test.t.a", "Group#8 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TiKVSingleGather_13 input:[Group#9], table:t2", + " TiKVSingleGather_14 input:[Group#9], table:t2", "Group#9 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " Selection_14 input:[Group#10], eq(test.t.b, test.t.b)", + " Selection_15 input:[Group#10], eq(test.t.b, test.t.b)", "Group#10 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableScan_12 table:t2, pk col:test.t.a" + " TableScan_13 table:t2, pk col:test.t.a" ] } ] diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 99dd359fdbb8b..e0985788f5f60 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -2882,6 +2882,14 @@ func (s *testIntegrationSuite) TestIndexMergeTableFilter(c *C) { )) } +func (s *testIntegrationSuite) TestIssue22850(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.MustExec("CREATE TABLE t1 (a int(11))") + tk.MustQuery("SELECT @v:=(SELECT 1 FROM t1 t2 LEFT JOIN t1 ON t1.a GROUP BY t1.a) FROM t1").Check(testkit.Rows()) // work fine +} + // #22949: test HexLiteral Used in GetVar expr func (s *testIntegrationSuite) TestGetVarExprWithHexLiteral(c *C) { tk := testkit.NewTestKit(c, s.store) diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 33b5abd46e06a..a887538deed16 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -300,32 +300,12 @@ func (b *PlanBuilder) buildAggregation(ctx context.Context, p LogicalPlan, aggFu return plan4Agg, aggIndexMap, nil } -func (b *PlanBuilder) buildTableRefsWithCache(ctx context.Context, from *ast.TableRefsClause) (p LogicalPlan, err error) { - return b.buildTableRefs(ctx, from, true) -} - -func (b *PlanBuilder) buildTableRefs(ctx context.Context, from *ast.TableRefsClause, useCache bool) (p LogicalPlan, err error) { +func (b *PlanBuilder) buildTableRefs(ctx context.Context, from *ast.TableRefsClause) (p LogicalPlan, err error) { if from == nil { p = b.buildTableDual() return } - if !useCache { - return b.buildResultSetNode(ctx, from.TableRefs) - } - var ok bool - p, ok = b.cachedResultSetNodes[from.TableRefs] - if ok { - m := b.cachedHandleHelperMap[from.TableRefs] - b.handleHelper.pushMap(m) - return - } - p, err = b.buildResultSetNode(ctx, from.TableRefs) - if err != nil { - return nil, err - } - b.cachedResultSetNodes[from.TableRefs] = p - b.cachedHandleHelperMap[from.TableRefs] = b.handleHelper.tailMap() - return + return b.buildResultSetNode(ctx, from.TableRefs) } func (b *PlanBuilder) buildResultSetNode(ctx context.Context, node ast.ResultSetNode) (p LogicalPlan, err error) { @@ -2261,17 +2241,13 @@ func (r *correlatedAggregateResolver) Enter(n ast.Node) (ast.Node, bool) { // Finally it restore the original SELECT stmt. func (r *correlatedAggregateResolver) resolveSelect(sel *ast.SelectStmt) (err error) { // collect correlated aggregate from sub-queries inside FROM clause. - useCache, err := r.collectFromTableRefs(r.ctx, sel.From) + _, err = r.collectFromTableRefs(r.ctx, sel.From) if err != nil { return err } - // do not use cache when for update read - if isForUpdateReadSelectLock(sel.LockInfo) { - useCache = false - } // we cannot use cache if there are correlated aggregates inside FROM clause, // since the plan we are building now is not correct and need to be rebuild later. - p, err := r.b.buildTableRefs(r.ctx, sel.From, useCache) + p, err := r.b.buildTableRefs(r.ctx, sel.From) if err != nil { return err } @@ -3339,7 +3315,7 @@ func (b *PlanBuilder) buildSelect(ctx context.Context, sel *ast.SelectStmt) (p L // For sub-queries, the FROM clause may have already been built in outer query when resolving correlated aggregates. // If the ResultSetNode inside FROM clause has nothing to do with correlated aggregates, we can simply get the // existing ResultSetNode from the cache. - p, err = b.buildTableRefsWithCache(ctx, sel.From) + p, err = b.buildTableRefs(ctx, sel.From) if err != nil { return nil, err } diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 0a7b73beb482e..ed1777031d43c 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -467,9 +467,6 @@ type PlanBuilder struct { // correlatedAggMapper stores columns for correlated aggregates which should be evaluated in outer query. correlatedAggMapper map[*ast.AggregateFuncExpr]*expression.CorrelatedColumn - // cache ResultSetNodes and HandleHelperMap to avoid rebuilding. - cachedResultSetNodes map[*ast.Join]LogicalPlan - cachedHandleHelperMap map[*ast.Join]map[int64][]HandleCols // isForUpdateRead should be true in either of the following situations // 1. use `inside insert`, `update`, `delete` or `select for update` statement // 2. isolation level is RC @@ -579,15 +576,13 @@ func NewPlanBuilder(sctx sessionctx.Context, is infoschema.InfoSchema, processor sctx.GetSessionVars().PlannerSelectBlockAsName = make([]ast.HintTable, processor.MaxSelectStmtOffset()+1) } return &PlanBuilder{ - ctx: sctx, - is: is, - colMapper: make(map[*ast.ColumnNameExpr]int), - handleHelper: &handleColHelper{id2HandleMapStack: make([]map[int64][]HandleCols, 0)}, - hintProcessor: processor, - correlatedAggMapper: make(map[*ast.AggregateFuncExpr]*expression.CorrelatedColumn), - cachedResultSetNodes: make(map[*ast.Join]LogicalPlan), - cachedHandleHelperMap: make(map[*ast.Join]map[int64][]HandleCols), - isForUpdateRead: sctx.GetSessionVars().IsPessimisticReadConsistency(), + ctx: sctx, + is: is, + colMapper: make(map[*ast.ColumnNameExpr]int), + handleHelper: &handleColHelper{id2HandleMapStack: make([]map[int64][]HandleCols, 0)}, + hintProcessor: processor, + correlatedAggMapper: make(map[*ast.AggregateFuncExpr]*expression.CorrelatedColumn), + isForUpdateRead: sctx.GetSessionVars().IsPessimisticReadConsistency(), }, savedBlockNames } diff --git a/planner/core/testdata/analyze_suite_out.json b/planner/core/testdata/analyze_suite_out.json index d902165db2dfe..7a803b145210a 100644 --- a/planner/core/testdata/analyze_suite_out.json +++ b/planner/core/testdata/analyze_suite_out.json @@ -264,11 +264,11 @@ "Name": "TestCorrelatedEstimation", "Cases": [ [ - "Projection 10.00 root Column#14", - "└─Apply 10.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#13)", + "Projection 10.00 root Column#22", + "└─Apply 10.00 root CARTESIAN left outer semi join, other cond:eq(test.t.c, Column#21)", " ├─TableReader(Build) 10.00 root data:TableFullScan", " │ └─TableFullScan 10.00 cop[tikv] table:t keep order:false", - " └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#13", + " └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#21", " └─HashJoin 1.00 root inner join, equal:[eq(test.t.a, test.t.a)]", " ├─TableReader(Build) 1.00 root data:Selection", " │ └─Selection 1.00 cop[tikv] eq(test.t.a, test.t.a), not(isnull(test.t.a))", @@ -278,12 +278,12 @@ " └─TableFullScan 10.00 cop[tikv] table:s keep order:false" ], [ - "Projection 10.00 root Column#9", + "Projection 10.00 root Column#13", "└─Apply 10.00 root CARTESIAN left outer join", " ├─TableReader(Build) 10.00 root data:TableFullScan", " │ └─TableFullScan 10.00 cop[tikv] table:t keep order:false", " └─MaxOneRow(Probe) 1.00 root ", - " └─Projection 0.10 root concat(cast(test.t.a, var_string(20)), ,, cast(test.t.b, var_string(20)))->Column#9", + " └─Projection 0.10 root concat(cast(test.t.a, var_string(20)), ,, cast(test.t.b, var_string(20)))->Column#13", " └─IndexReader 0.10 root index:Selection", " └─Selection 0.10 cop[tikv] eq(test.t.a, test.t.a)", " └─IndexRangeScan 1.00 cop[tikv] table:t1, index:idx(c, b, a) range: decided by [eq(test.t.c, test.t.c)], keep order:false" diff --git a/planner/core/testdata/integration_suite_out.json b/planner/core/testdata/integration_suite_out.json index 5cda7110658b0..f2af5bf1ae1ab 100644 --- a/planner/core/testdata/integration_suite_out.json +++ b/planner/core/testdata/integration_suite_out.json @@ -338,9 +338,9 @@ "SQL": "desc format = 'brief' select t1.a from t t1 order by (t1.b = 1 and exists (select 1 from t t2 where t1.b = t2.b)) limit 1", "Plan": [ "Projection 1.00 root test.t.a", - "└─Projection 1.00 root test.t.a, test.t.b, Column#8", - " └─TopN 1.00 root Column#10, offset:0, count:1", - " └─Projection 10000.00 root test.t.a, test.t.b, Column#8, and(eq(test.t.b, 1), Column#8)->Column#10", + "└─Projection 1.00 root test.t.a, test.t.b, Column#11", + " └─TopN 1.00 root Column#13, offset:0, count:1", + " └─Projection 10000.00 root test.t.a, test.t.b, Column#11, and(eq(test.t.b, 1), Column#11)->Column#13", " └─HashJoin 10000.00 root left outer semi join, equal:[eq(test.t.b, test.t.b)]", " ├─TableReader(Build) 10000.00 root data:TableFullScan", " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", diff --git a/planner/core/testdata/plan_suite_out.json b/planner/core/testdata/plan_suite_out.json index 2677b074a7ee7..8b3507b4d79eb 100644 --- a/planner/core/testdata/plan_suite_out.json +++ b/planner/core/testdata/plan_suite_out.json @@ -2144,12 +2144,12 @@ { "SQL": "select 1 from (select /*+ HASH_JOIN(t1) */ t1.a in (select t2.a from t2) from t1) x;", "Plan": "LeftHashJoin{IndexReader(Index(t1.idx_a)[[NULL,+inf]])->IndexReader(Index(t2.idx_a)[[NULL,+inf]])}->Projection", - "Hints": "use_index(@`sel_2` `test`.`t1` `idx_a`), use_index(@`sel_2` `test`.`t2` `idx_a`), hash_join(@`sel_2` `test`.`t1`)" + "Hints": "use_index(@`sel_2` `test`.`t1` `idx_a`), use_index(@`sel_3` `test`.`t2` `idx_a`), hash_join(@`sel_2` `test`.`t1`)" }, { "SQL": "select 1 from (select /*+ HASH_JOIN(t1) */ t1.a not in (select t2.a from t2) from t1) x;", "Plan": "LeftHashJoin{IndexReader(Index(t1.idx_a)[[NULL,+inf]])->IndexReader(Index(t2.idx_a)[[NULL,+inf]])}->Projection", - "Hints": "use_index(@`sel_2` `test`.`t1` `idx_a`), use_index(@`sel_2` `test`.`t2` `idx_a`), hash_join(@`sel_2` `test`.`t1`)" + "Hints": "use_index(@`sel_2` `test`.`t1` `idx_a`), use_index(@`sel_3` `test`.`t2` `idx_a`), hash_join(@`sel_2` `test`.`t1`)" }, { "SQL": "select /*+ INL_JOIN(t1) */ t1.b, t2.b from t1 inner join t2 on t1.a = t2.a;", diff --git a/planner/core/testdata/plan_suite_unexported_out.json b/planner/core/testdata/plan_suite_unexported_out.json index fb45c07f644e6..204972182d411 100644 --- a/planner/core/testdata/plan_suite_unexported_out.json +++ b/planner/core/testdata/plan_suite_unexported_out.json @@ -136,7 +136,7 @@ "Join{DataScan(t)->Limit->DataScan(s)}(test.t.a,test.t.a)->Limit->Projection", "Join{DataScan(t)->TopN([test.t.a],0,5)->DataScan(s)}(test.t.a,test.t.a)->TopN([test.t.a],0,5)->Projection", "Join{DataScan(t)->TopN([test.t.a],0,5)->DataScan(s)}(test.t.a,test.t.a)->TopN([test.t.a],0,5)->Projection", - "Join{DataScan(t)->DataScan(s)}(test.t.a,test.t.a)->TopN([Column#25],0,5)->Projection", + "Join{DataScan(t)->DataScan(s)}(test.t.a,test.t.a)->TopN([Column#37],0,5)->Projection", "Join{DataScan(t)->DataScan(s)}(test.t.a,test.t.a)->TopN([test.t.a],0,5)->Projection", "Join{DataScan(t)->DataScan(s)->TopN([test.t.a],0,5)}(test.t.a,test.t.a)->TopN([test.t.a],0,5)->Projection", "Join{DataScan(t)->DataScan(s)}(test.t.a,test.t.a)->TopN([test.t.a test.t.b],0,5)->Projection", @@ -194,7 +194,7 @@ "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over())->Sort->Projection", "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", "TableReader(Table(t)->StreamAgg)->StreamAgg->Window(sum(Column#13)->Column#15 over())->Sort->Projection", - "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)]))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#26 over())->MaxOneRow->Sel([Column#26])}->Projection", + "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)]))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#38 over())->MaxOneRow->Sel([Column#38])}->Projection", "[planner:3594]You cannot use the alias 'w' of an expression containing a window function in this context.'", "[planner:1247]Reference 'sum_a' not supported (reference to window function)", "[planner:3579]Window name 'w2' is not defined.", @@ -267,7 +267,7 @@ "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over())->Sort->Projection", "TableReader(Table(t))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#14 over(partition by test.t.a))->Sort->Projection", "TableReader(Table(t)->StreamAgg)->StreamAgg->Window(sum(Column#13)->Column#15 over())->Sort->Projection", - "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)]))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#26 over())->MaxOneRow->Sel([Column#26])}->Projection", + "Apply{IndexReader(Index(t.f)[[NULL,+inf]])->IndexReader(Index(t.f)[[NULL,+inf]]->Sel([gt(test.t.a, test.t.a)]))->Window(sum(cast(test.t.a, decimal(65,0) BINARY))->Column#38 over())->MaxOneRow->Sel([Column#38])}->Projection", "[planner:3594]You cannot use the alias 'w' of an expression containing a window function in this context.'", "[planner:1247]Reference 'sum_a' not supported (reference to window function)", "[planner:3579]Window name 'w2' is not defined.", @@ -650,7 +650,7 @@ "1": [ "test.t.a" ], - "2": [ + "4": [ "test.t.a", "test.t.b" ] @@ -665,7 +665,7 @@ "test.t.a", "test.t.b" ], - "2": [ + "4": [ "test.t.b" ] }, @@ -673,7 +673,7 @@ "1": [ "test.t.a" ], - "2": [ + "4": [ "test.t.b" ] }, diff --git a/planner/core/testdata/stats_suite_out.json b/planner/core/testdata/stats_suite_out.json index ff1abdaac8e95..c782b4658d542 100644 --- a/planner/core/testdata/stats_suite_out.json +++ b/planner/core/testdata/stats_suite_out.json @@ -49,12 +49,12 @@ }, { "SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b in (select t2.b from t2 where t2.a = t1.a limit 3)) as cmp from t1) tmp group by tmp.a, tmp.b", - "AggInput": "[{[53 54] 4}]", + "AggInput": "[{[56 57] 4}]", "JoinInput": "" }, { "SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b not in (select t2.b from t2 where t2.a = t1.a limit 3)) as cmp from t1) tmp group by tmp.a, tmp.b", - "AggInput": "[{[61 62] 4}]", + "AggInput": "[{[67 68] 4}]", "JoinInput": "" }, { @@ -74,8 +74,8 @@ }, { "SQL": "select count(1) from t1 left join t2 on t1.a = t2.a group by t1.a, t1.b", - "AggInput": "[{[90 91] 4}]", - "JoinInput": "[{[90 91] 4}];[]" + "AggInput": "[{[99 100] 4}]", + "JoinInput": "[{[99 100] 4}];[]" }, { "SQL": "select count(1) from t1 left join t2 on t1.a = t2.a group by t2.a, t2.b", @@ -89,18 +89,18 @@ }, { "SQL": "select count(1) from t1 right join t2 on t1.a = t2.a group by t2.a, t2.b", - "AggInput": "[{[114 115] 9}]", - "JoinInput": "[];[{[114 115] 9}]" + "AggInput": "[{[123 124] 9}]", + "JoinInput": "[];[{[123 124] 9}]" }, { "SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b in (select t2.b from t2 where t2.a > t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b", - "AggInput": "[{[118 119] 4}]", - "JoinInput": "[{[118 119] 4}];[]" + "AggInput": "[{[127 128] 4}]", + "JoinInput": "[{[127 128] 4}];[]" }, { "SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b not in (select t2.b from t2 where t2.a > t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b", - "AggInput": "[{[126 127] 4}]", - "JoinInput": "[{[126 127] 4}];[]" + "AggInput": "[{[138 139] 4}]", + "JoinInput": "[{[138 139] 4}];[]" }, { "SQL": "select count(1) from (select t1.a as a, t1.b as b from t1 where t1.b in (select t2.b from t2 where t2.a > t1.a)) tmp group by tmp.a, tmp.b", @@ -114,8 +114,8 @@ }, { "SQL": "select * from t1 left join (select t2.a as a, t2.b as b, count(1) as cnt from t2 group by t2.a, t2.b) as tmp on t1.a = tmp.a and t1.b = tmp.b", - "AggInput": "[{[151 152] 9}]", - "JoinInput": "[{[148 149] 4}];[{[151 152] 9}]" + "AggInput": "[{[166 167] 9}]", + "JoinInput": "[{[163 164] 4}];[{[166 167] 9}]" }, { "SQL": "select count(1) from (select t1.a as a, t1.b as b from t1 limit 3) tmp group by tmp.a, tmp.b", @@ -124,7 +124,7 @@ }, { "SQL": "select count(tmp.a_sum) from (select t1.a as a, t1.b as b, sum(a) over() as a_sum from t1) tmp group by tmp.a, tmp.b", - "AggInput": "[{[159 160] 4}]", + "AggInput": "[{[174 175] 4}]", "JoinInput": "" } ] @@ -179,8 +179,8 @@ { "SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b > (select t2.b from t2 where t2.a = t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b", "Plan": [ - "StreamAgg 4.00 root group by:Column#11, Column#12, funcs:count(Column#10)->Column#8", - "└─Projection 4.00 root gt(test.t1.b, test.t2.b)->Column#10, test.t1.a, test.t1.b", + "StreamAgg 4.00 root group by:Column#14, Column#15, funcs:count(Column#13)->Column#11", + "└─Projection 4.00 root gt(test.t1.b, test.t2.b)->Column#13, test.t1.a, test.t1.b", " └─Apply 4.00 root CARTESIAN left outer join", " ├─IndexReader(Build) 4.00 root index:IndexFullScan", " │ └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true", @@ -192,7 +192,7 @@ { "SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b in (select t2.b from t2 where t2.a = t1.a limit 3)) as cmp from t1) tmp group by tmp.a, tmp.b", "Plan": [ - "StreamAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#7)->Column#8", + "StreamAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#10)->Column#11", "└─Apply 4.00 root left outer semi join, equal:[eq(test.t1.b, test.t2.b)]", " ├─IndexReader(Build) 4.00 root index:IndexFullScan", " │ └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true", @@ -205,7 +205,7 @@ { "SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b not in (select t2.b from t2 where t2.a = t1.a limit 3)) as cmp from t1) tmp group by tmp.a, tmp.b", "Plan": [ - "StreamAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#7)->Column#8", + "StreamAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#10)->Column#11", "└─Apply 4.00 root anti left outer semi join, equal:[eq(test.t1.b, test.t2.b)]", " ├─IndexReader(Build) 4.00 root index:IndexFullScan", " │ └─IndexFullScan 4.00 cop[tikv] table:t1, index:a(a, b) keep order:true", @@ -240,7 +240,7 @@ { "SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b in (select t2.b from t2 where t2.a > t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b", "Plan": [ - "HashAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#7)->Column#8", + "HashAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#10)->Column#11", "└─HashJoin 4.00 root left outer semi join, equal:[eq(test.t1.b, test.t2.b)], other cond:gt(test.t2.a, test.t1.a)", " ├─TableReader(Build) 9.00 root data:TableFullScan", " │ └─TableFullScan 9.00 cop[tikv] table:t2 keep order:false", @@ -251,7 +251,7 @@ { "SQL": "select count(tmp.cmp) from (select t1.a as a, t1.b as b, (t1.b not in (select t2.b from t2 where t2.a > t1.a)) as cmp from t1) tmp group by tmp.a, tmp.b", "Plan": [ - "HashAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#7)->Column#8", + "HashAgg 4.00 root group by:test.t1.a, test.t1.b, funcs:count(Column#10)->Column#11", "└─HashJoin 4.00 root anti left outer semi join, equal:[eq(test.t1.b, test.t2.b)], other cond:gt(test.t2.a, test.t1.a)", " ├─TableReader(Build) 9.00 root data:TableFullScan", " │ └─TableFullScan 9.00 cop[tikv] table:t2 keep order:false", diff --git a/util/ranger/testdata/ranger_suite_out.json b/util/ranger/testdata/ranger_suite_out.json index b7ad880d894d7..965dcf5fe2d18 100644 --- a/util/ranger/testdata/ranger_suite_out.json +++ b/util/ranger/testdata/ranger_suite_out.json @@ -5,11 +5,11 @@ { "SQL": "explain format = 'brief' select t.e in (select count(*) from t s use index(idx), t t1 where s.b = 1 and s.c in (1, 2) and s.d = t.a and s.a = t1.a) from t", "Result": [ - "Projection 2.00 root Column#17", - "└─Apply 2.00 root CARTESIAN left outer semi join, other cond:eq(test.t.e, Column#16)", + "Projection 2.00 root Column#27", + "└─Apply 2.00 root CARTESIAN left outer semi join, other cond:eq(test.t.e, Column#26)", " ├─TableReader(Build) 2.00 root data:TableFullScan", " │ └─TableFullScan 2.00 cop[tikv] table:t keep order:false", - " └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#16", + " └─StreamAgg(Probe) 1.00 root funcs:count(1)->Column#26", " └─HashJoin 2.00 root inner join, equal:[eq(test.t.a, test.t.a)]", " ├─TableReader(Build) 2.00 root data:TableFullScan", " │ └─TableFullScan 2.00 cop[tikv] table:t1 keep order:false", From 4baa3d34fbcf41e1556f8055658ef6a09740c9f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patrick=20Jiang=28=E7=99=BD=E6=B3=BD=29?= Date: Thu, 18 Mar 2021 16:00:57 +0800 Subject: [PATCH 22/44] planner: fix statement-optimize not work in `TryFastPlan` (#20905) --- planner/core/point_get_plan_test.go | 12 ++++++++++++ planner/optimize.go | 28 ++++++++++++++-------------- 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/planner/core/point_get_plan_test.go b/planner/core/point_get_plan_test.go index ed079646f5d8e..643267b24a3c8 100644 --- a/planner/core/point_get_plan_test.go +++ b/planner/core/point_get_plan_test.go @@ -598,3 +598,15 @@ func (s *testPointGetSuite) TestCBOShouldNotUsePointGet(c *C) { res.Check(testkit.Rows(output[i].Res...)) } } + +func (s *testPointGetSuite) TestIssue18042(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int, c int, primary key(a), index ab(a, b));") + tk.MustExec("insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4)") + tk.MustExec("SELECT /*+ MAX_EXECUTION_TIME(100), MEMORY_QUOTA(1 MB) */ * FROM t where a = 1;") + c.Assert(tk.Se.GetSessionVars().StmtCtx.MemQuotaQuery, Equals, int64(1<<20)) + c.Assert(tk.Se.GetSessionVars().StmtCtx.MaxExecutionTime, Equals, uint64(100)) + tk.MustExec("drop table t") +} diff --git a/planner/optimize.go b/planner/optimize.go index b8bac7a8c2cd8..3f511360fe54e 100644 --- a/planner/optimize.go +++ b/planner/optimize.go @@ -88,6 +88,20 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in }() } + tableHints := hint.ExtractTableHintsFromStmtNode(node, sctx) + stmtHints, warns := handleStmtHints(tableHints) + sessVars.StmtCtx.StmtHints = stmtHints + for _, warn := range warns { + sctx.GetSessionVars().StmtCtx.AppendWarning(warn) + } + warns = warns[:0] + for name, val := range stmtHints.SetVars { + err := variable.SetStmtVar(sessVars, name, val) + if err != nil { + sctx.GetSessionVars().StmtCtx.AppendWarning(err) + } + } + if _, isolationReadContainTiKV := sessVars.IsolationReadEngines[kv.TiKV]; isolationReadContainTiKV { var fp plannercore.Plan if fpv, ok := sctx.Value(plannercore.PointPlanKey).(plannercore.PointPlanVal); ok { @@ -106,20 +120,6 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in sctx.PrepareTSFuture(ctx) - tableHints := hint.ExtractTableHintsFromStmtNode(node, sctx) - stmtHints, warns := handleStmtHints(tableHints) - sessVars.StmtCtx.StmtHints = stmtHints - for _, warn := range warns { - sctx.GetSessionVars().StmtCtx.AppendWarning(warn) - } - warns = warns[:0] - for name, val := range stmtHints.SetVars { - err := variable.SetStmtVar(sessVars, name, val) - if err != nil { - sctx.GetSessionVars().StmtCtx.AppendWarning(err) - } - } - bestPlan, names, _, err := optimize(ctx, sctx, node, is) if err != nil { return nil, nil, err From f34afc4e287454791b125d2c77030b905ec52916 Mon Sep 17 00:00:00 2001 From: dongjunduo Date: Thu, 18 Mar 2021 16:17:36 +0800 Subject: [PATCH 23/44] executor: fix linter --enable=deadcode check error in executor(#22979) (#23111) --- executor/adapter.go | 19 ------------------- executor/batch_checker.go | 18 ------------------ executor/benchmark_test.go | 4 ---- executor/distsql.go | 8 -------- executor/partition_table.go | 5 ----- 5 files changed, 54 deletions(-) diff --git a/executor/adapter.go b/executor/adapter.go index f56271de9845e..3e64c9d5b320b 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -19,7 +19,6 @@ import ( "fmt" "math" "runtime/trace" - "strconv" "strings" "sync/atomic" "time" @@ -699,24 +698,6 @@ func (a *ExecStmt) handlePessimisticLockError(ctx context.Context, err error) (E return e, nil } -func extractConflictCommitTS(errStr string) uint64 { - strs := strings.Split(errStr, "conflictCommitTS=") - if len(strs) != 2 { - return 0 - } - tsPart := strs[1] - length := strings.IndexByte(tsPart, ',') - if length < 0 { - return 0 - } - tsStr := tsPart[:length] - ts, err := strconv.ParseUint(tsStr, 10, 64) - if err != nil { - return 0 - } - return ts -} - type pessimisticTxn interface { kv.Transaction // KeysNeedToLock returns the keys need to be locked. diff --git a/executor/batch_checker.go b/executor/batch_checker.go index 58ae81d32feb3..713496a6cfb6c 100644 --- a/executor/batch_checker.go +++ b/executor/batch_checker.go @@ -49,24 +49,6 @@ type toBeCheckedRow struct { ignored bool } -// encodeNewRow encodes a new row to value. -func encodeNewRow(ctx sessionctx.Context, t table.Table, row []types.Datum) ([]byte, error) { - colIDs := make([]int64, 0, len(row)) - skimmedRow := make([]types.Datum, 0, len(row)) - for _, col := range t.Cols() { - if !tables.CanSkip(t.Meta(), col, &row[col.Offset]) { - colIDs = append(colIDs, col.ID) - skimmedRow = append(skimmedRow, row[col.Offset]) - } - } - sctx, rd := ctx.GetSessionVars().StmtCtx, &ctx.GetSessionVars().RowEncoder - newRowValue, err := tablecodec.EncodeRow(sctx, skimmedRow, colIDs, nil, nil, rd) - if err != nil { - return nil, err - } - return newRowValue, nil -} - // getKeysNeedCheck gets keys converted from to-be-insert rows to record keys and unique index keys, // which need to be checked whether they are duplicate keys. func getKeysNeedCheck(ctx context.Context, sctx sessionctx.Context, t table.Table, rows [][]types.Datum) ([]toBeCheckedRow, error) { diff --git a/executor/benchmark_test.go b/executor/benchmark_test.go index 2bc89bda9f737..75c0f84bd4ecc 100644 --- a/executor/benchmark_test.go +++ b/executor/benchmark_test.go @@ -1615,10 +1615,6 @@ func prepare4MergeJoin(tc *mergeJoinTestCase, innerDS, outerDS *mockDataSource, return e } -func defaultMergeJoinTestCase() *mergeJoinTestCase { - return &mergeJoinTestCase{*defaultIndexJoinTestCase(), nil} -} - func newMergeJoinBenchmark(numOuterRows, numInnerDup, numInnerRedundant int) (tc *mergeJoinTestCase, innerDS, outerDS *mockDataSource) { ctx := mock.NewContext() ctx.GetSessionVars().InitChunkSize = variable.DefInitChunkSize diff --git a/executor/distsql.go b/executor/distsql.go index 9f95de0b8775a..2957c0031aa55 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -135,14 +135,6 @@ func closeAll(objs ...Closeable) error { return nil } -// handleIsExtra checks whether this column is a extra handle column generated during plan building phase. -func handleIsExtra(col *expression.Column) bool { - if col != nil && col.ID == model.ExtraHandleID { - return true - } - return false -} - // rebuildIndexRanges will be called if there's correlated column in access conditions. We will rebuild the range // by substitute correlated column with the constant. func rebuildIndexRanges(ctx sessionctx.Context, is *plannercore.PhysicalIndexScan, idxCols []*expression.Column, colLens []int) (ranges []*ranger.Range, err error) { diff --git a/executor/partition_table.go b/executor/partition_table.go index 806e7b116e9fe..b1224c80e5983 100644 --- a/executor/partition_table.go +++ b/executor/partition_table.go @@ -47,11 +47,6 @@ type innerPartitionInfo struct { nextRange map[int64][]*ranger.Range } -type innerNextPartition interface { - nextPartition - GetInnerPartitionInfo() *innerPartitionInfo -} - type nextPartitionForTableReader struct { *innerPartitionInfo rangeBuilders map[int64]kvRangeBuilder From f4b9da59c11d4873dde8dcb421f155bb202457ab Mon Sep 17 00:00:00 2001 From: Zhuomin Liu Date: Thu, 18 Mar 2021 16:35:36 +0800 Subject: [PATCH 24/44] expression: fix refine compare constant (#23339) --- expression/builtin_compare.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/expression/builtin_compare.go b/expression/builtin_compare.go index 78db731613e39..3fd9b7a188ee8 100644 --- a/expression/builtin_compare.go +++ b/expression/builtin_compare.go @@ -1309,12 +1309,20 @@ func RefineComparedConstant(ctx sessionctx.Context, targetFieldType types.FieldT // We try to convert the string constant to double. // If the double result equals the int result, we can return the int result; // otherwise, the compare function will be false. + // **note** + // 1. We compare `doubleDatum` with the `integral part of doubleDatum` rather then intDatum to handle the + // case when `targetFieldType.Tp` is `TypeYear`. + // 2. When `targetFieldType.Tp` is `TypeYear`, we can not compare `doubleDatum` with `intDatum` directly, + // because we'll convert values in the ranges '0' to '69' and '70' to '99' to YEAR values in the ranges + // 2000 to 2069 and 1970 to 1999. + // 3. Suppose the value of `con` is 2, when `targetFieldType.Tp` is `TypeYear`, the value of `doubleDatum` + // will be 2.0 and the value of `intDatum` will be 2002 in this case. var doubleDatum types.Datum doubleDatum, err = dt.ConvertTo(sc, types.NewFieldType(mysql.TypeDouble)) if err != nil { return con, false } - if doubleDatum.GetFloat64() > math.Trunc(doubleDatum.GetFloat64()) { + if doubleDatum.GetFloat64() != math.Trunc(doubleDatum.GetFloat64()) { return con, true } return &Constant{ From 901891ae1f44f2dcbd27508eb679ab641b0287a0 Mon Sep 17 00:00:00 2001 From: HuaiyuXu <391585975@qq.com> Date: Thu, 18 Mar 2021 17:09:53 +0800 Subject: [PATCH 25/44] *: hide the config `global-kill` and session var `tidb_enable_index_merge_join` (#23395) --- config/config.go | 2 +- config/config.toml.example | 2 -- executor/show_test.go | 11 +++++++++++ sessionctx/variable/tidb_vars.go | 1 + 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/config/config.go b/config/config.go index 123287b505dda..322294b987827 100644 --- a/config/config.go +++ b/config/config.go @@ -534,7 +534,7 @@ type Experimental struct { // Whether enable creating expression index. AllowsExpressionIndex bool `toml:"allow-expression-index" json:"allow-expression-index"` // Whether enable global kill. - EnableGlobalKill bool `toml:"enable-global-kill" json:"enable-global-kill"` + EnableGlobalKill bool `toml:"enable-global-kill" json:"-"` } var defTiKVCfg = tikvcfg.DefaultConfig() diff --git a/config/config.toml.example b/config/config.toml.example index 227836889f597..940936845abf3 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -463,8 +463,6 @@ history-size = 24 [experimental] # enable creating expression index. allow-expression-index = false -# enable global kill. -enable-global-kill = false # server level isolation read by engines and labels [isolation-read] diff --git a/executor/show_test.go b/executor/show_test.go index 61ec2e5bd1ec1..032c4d08839c8 100644 --- a/executor/show_test.go +++ b/executor/show_test.go @@ -1131,6 +1131,17 @@ func (s *testSuite5) TestInvisibleCoprCacheConfig(c *C) { c.Assert(strings.Contains(configValue, coprCacheVal), Equals, true) } +func (s *testSuite5) TestInvisibleGlobalKillConfig(c *C) { + se1, err := session.CreateSession(s.store) + c.Assert(err, IsNil) + tk := testkit.NewTestKitWithSession(c, s.store, se1) + rows := tk.MustQuery("show variables like '%config%'").Rows() + c.Assert(len(rows), Equals, 1) + configValue := rows[0][1].(string) + globalKillVal := "global-kill" + c.Assert(strings.Contains(configValue, globalKillVal), Equals, false) +} + func (s *testSerialSuite1) TestShowCreateTableWithIntegerDisplayLengthWarnings(c *C) { parsertypes.TiDBStrictIntegerDisplayWidth = true defer func() { diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 8acc5841bd1a6..dcedbd56affc6 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -714,6 +714,7 @@ var FeatureSwitchVariables = []string{ TiDBPartitionPruneMode, TiDBIntPrimaryKeyDefaultAsClustered, TiDBEnableExtendedStats, + TiDBEnableIndexMergeJoin, } // FilterImplicitFeatureSwitch is used to filter result of show variables, these switches should be turn blind to users. From 6ba51c8eacbd468ce3f9a33186499a65bfae1d07 Mon Sep 17 00:00:00 2001 From: Zhuhe Fang Date: Thu, 18 Mar 2021 17:27:36 +0800 Subject: [PATCH 26/44] plan: setting not null flag for extrak pk (#23237) --- .../r/access_path_selection.result | 2 +- executor/revoke_test.go | 2 +- go.mod | 2 +- go.sum | 4 +-- planner/core/logical_plan_builder.go | 4 ++- planner/core/logical_plan_test.go | 17 ++++++++++- planner/core/mock.go | 28 +++++++++++++++++++ 7 files changed, 52 insertions(+), 7 deletions(-) diff --git a/cmd/explaintest/r/access_path_selection.result b/cmd/explaintest/r/access_path_selection.result index b31f7ce896668..bbf5dcb8627a2 100644 --- a/cmd/explaintest/r/access_path_selection.result +++ b/cmd/explaintest/r/access_path_selection.result @@ -35,7 +35,7 @@ StreamAgg 1.00 root funcs:max(test.access_path_selection._tidb_rowid)->Column#4 └─Limit 1.00 root offset:0, count:1 └─TableReader 1.00 root data:Limit └─Limit 1.00 cop[tikv] offset:0, count:1 - └─TableFullScan 1.25 cop[tikv] table:access_path_selection keep order:true, desc, stats:pseudo + └─TableFullScan 1.00 cop[tikv] table:access_path_selection keep order:true, desc, stats:pseudo explain format = 'brief' select count(1) from access_path_selection; id estRows task access object operator info StreamAgg 1.00 root funcs:count(Column#18)->Column#4 diff --git a/executor/revoke_test.go b/executor/revoke_test.go index 71cf1d68fd713..21e2d4e155357 100644 --- a/executor/revoke_test.go +++ b/executor/revoke_test.go @@ -82,7 +82,7 @@ func (s *testSuite1) TestRevokeTableScope(c *C) { // Make sure all the table privs for new user is Y. res := tk.MustQuery(`SELECT Table_priv FROM mysql.tables_priv WHERE User="testTblRevoke" and host="localhost" and db="test" and Table_name="test1"`) - res.Check(testkit.Rows("Select,Insert,Update,Delete,Create,Drop,Index,Alter")) + res.Check(testkit.Rows("Select,Insert,Update,Delete,Create,Drop,Index,Alter,Show View")) // Revoke each priv from the user. for _, v := range mysql.AllTablePrivs { diff --git a/go.mod b/go.mod index 96a7691ba76d8..6d991e86f8ad9 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20210308063835-39b884695fb8 github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8 - github.com/pingcap/parser v0.0.0-20210310110710-c7333a4927e6 + github.com/pingcap/parser v0.0.0-20210311132237-9841cb715606 github.com/pingcap/sysutil v0.0.0-20210221112134-a07bda3bde99 github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible github.com/pingcap/tipb v0.0.0-20210309080453-72c4feaa6da7 diff --git a/go.sum b/go.sum index ed705264b9304..ed4d46c091bab 100644 --- a/go.sum +++ b/go.sum @@ -493,8 +493,8 @@ github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIf github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8 h1:M+DNpOu/I3uDmwee6vcnoPd6GgSMqND4gxvDQ/W584U= github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20210310110710-c7333a4927e6 h1:V/6ioJmVUN4q6/aUpNdnT6OOPc48R3tnojcVfTrt4QU= -github.com/pingcap/parser v0.0.0-20210310110710-c7333a4927e6/go.mod h1:GbEr2PgY72/4XqPZzmzstlOU/+il/wrjeTNFs6ihsSE= +github.com/pingcap/parser v0.0.0-20210311132237-9841cb715606 h1:/d3CdGzpfCRbdKn38gYH4FGEXgTJCzfI8yroEfKcwbA= +github.com/pingcap/parser v0.0.0-20210311132237-9841cb715606/go.mod h1:GbEr2PgY72/4XqPZzmzstlOU/+il/wrjeTNFs6ihsSE= github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= github.com/pingcap/sysutil v0.0.0-20210221112134-a07bda3bde99 h1:/ogXgm4guJzow4UafiyXZ6ciAIPzxImaXYiFvTpKzKY= github.com/pingcap/sysutil v0.0.0-20210221112134-a07bda3bde99/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index a887538deed16..9b490455127ab 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -3511,8 +3511,10 @@ func (b *PlanBuilder) buildTableDual() *LogicalTableDual { } func (ds *DataSource) newExtraHandleSchemaCol() *expression.Column { + tp := types.NewFieldType(mysql.TypeLonglong) + tp.Flag = mysql.NotNullFlag | mysql.PriKeyFlag return &expression.Column{ - RetType: types.NewFieldType(mysql.TypeLonglong), + RetType: tp, UniqueID: ds.ctx.GetSessionVars().AllocPlanColumnID(), ID: model.ExtraHandleID, OrigName: fmt.Sprintf("%v.%v.%v", ds.DBName, ds.tableInfo.Name, model.ExtraHandleName), diff --git a/planner/core/logical_plan_test.go b/planner/core/logical_plan_test.go index d350477bddfb1..3d14cd0df65b0 100644 --- a/planner/core/logical_plan_test.go +++ b/planner/core/logical_plan_test.go @@ -57,7 +57,7 @@ type testPlanSuite struct { } func (s *testPlanSuite) SetUpSuite(c *C) { - s.is = infoschema.MockInfoSchema([]*model.TableInfo{MockSignedTable(), MockUnsignedTable(), MockView()}) + s.is = infoschema.MockInfoSchema([]*model.TableInfo{MockSignedTable(), MockUnsignedTable(), MockView(), MockNoPKTable()}) s.ctx = MockContext() s.ctx.GetSessionVars().EnableWindowFunction = true s.Parser = parser.New() @@ -280,6 +280,21 @@ func (s *testPlanSuite) TestDeriveNotNullConds(c *C) { } } +func (s *testPlanSuite) TestExtraPKNotNullFlag(c *C) { + defer testleak.AfterTest(c)() + sql := "select count(*) from t3" + ctx := context.Background() + comment := Commentf("for %s", sql) + stmt, err := s.ParseOneStmt(sql, "", "") + c.Assert(err, IsNil, comment) + p, _, err := BuildLogicalPlan(ctx, s.ctx, stmt, s.is) + c.Assert(err, IsNil, comment) + ds := p.(*LogicalProjection).children[0].(*LogicalAggregation).children[0].(*DataSource) + c.Assert(ds.Columns[2].Name.L, Equals, "_tidb_rowid") + c.Assert(ds.Columns[2].Flag, Equals, mysql.PriKeyFlag|mysql.NotNullFlag) + c.Assert(ds.schema.Columns[2].RetType.Flag, Equals, mysql.PriKeyFlag|mysql.NotNullFlag) +} + func buildLogicPlan4GroupBy(s *testPlanSuite, c *C, sql string) (Plan, error) { sqlMode := s.ctx.GetSessionVars().SQLMode mockedTableInfo := MockSignedTable() diff --git a/planner/core/mock.go b/planner/core/mock.go index 8eee3c8647b68..42e6141980e90 100644 --- a/planner/core/mock.go +++ b/planner/core/mock.go @@ -334,6 +334,34 @@ func MockUnsignedTable() *model.TableInfo { return table } +// MockNoPKTable is only used for plan related tests. +func MockNoPKTable() *model.TableInfo { + // column: a, b + col0 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 1, + Name: model.NewCIStr("a"), + FieldType: newLongType(), + ID: 2, + } + col1 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 2, + Name: model.NewCIStr("b"), + FieldType: newLongType(), + ID: 3, + } + // Column 'a', 'b' is not null. + col0.Flag = mysql.NotNullFlag + col1.Flag = mysql.UnsignedFlag + table := &model.TableInfo{ + Columns: []*model.ColumnInfo{col0, col1}, + Name: model.NewCIStr("t3"), + PKIsHandle: true, + } + return table +} + // MockView is only used for plan related tests. func MockView() *model.TableInfo { selectStmt := "select b,c,d from t" From 6049ed78a124e5778463af7e9683bdf2bb2ee5b7 Mon Sep 17 00:00:00 2001 From: Zhuhe Fang Date: Thu, 18 Mar 2021 17:45:36 +0800 Subject: [PATCH 27/44] test: add testleak after checking and testing (#23324) --- executor/tiflash_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/executor/tiflash_test.go b/executor/tiflash_test.go index 78b745fb62dfe..89b7e48af3bc8 100644 --- a/executor/tiflash_test.go +++ b/executor/tiflash_test.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/store/mockstore/unistore" "github.com/pingcap/tidb/store/tikv/mockstore/cluster" "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" ) type tiflashTestSuite struct { @@ -296,6 +297,8 @@ func (s *tiflashTestSuite) TestMppEnum(c *C) { } func (s *tiflashTestSuite) TestCancelMppTasks(c *C) { + testleak.BeforeTest() + defer testleak.AfterTest(c)() var hang = "github.com/pingcap/tidb/store/mockstore/unistore/mppRecvHang" tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") From 6e00871551d8a43f02bd1f7764a529e211311f74 Mon Sep 17 00:00:00 2001 From: Howie Date: Thu, 18 Mar 2021 18:01:36 +0800 Subject: [PATCH 28/44] excutor: fix the date precision of `builtinCastDurationAsStringSig.vecEvalString` #23314 #23286 (#23332) --- executor/aggregate_test.go | 13 ++++++++++++- expression/builtin_cast_vec.go | 3 ++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/executor/aggregate_test.go b/executor/aggregate_test.go index c7c55c2669e28..e967016ecfb13 100644 --- a/executor/aggregate_test.go +++ b/executor/aggregate_test.go @@ -769,7 +769,7 @@ func (s *testSuiteAgg) TestOnlyFullGroupBy(c *C) { tk.MustQuery("select max(a+b) from t") tk.MustQuery("select avg(a)+1 from t") tk.MustQuery("select count(c), 5 from t") - // test functinal depend on primary key + // test functional depend on primary key tk.MustQuery("select * from t group by a") // test functional depend on unique not null columns tk.MustQuery("select * from t group by b,d") @@ -1404,3 +1404,14 @@ func (s *testSuiteAgg) TestIssue23277(c *C) { tk.MustQuery("select avg(a) from t group by a").Sort().Check(testkit.Rows("-120.0000", "127.0000")) tk.MustExec("drop table t;") } + +// https://github.com/pingcap/tidb/issues/23314 +func (s *testSuiteAgg) TestIssue23314(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1(col1 time(2) NOT NULL)") + tk.MustExec("insert into t1 values(\"16:40:20.01\")") + res := tk.MustQuery("select col1 from t1 group by col1") + res.Check(testkit.Rows("16:40:20.01")) +} diff --git a/expression/builtin_cast_vec.go b/expression/builtin_cast_vec.go index 4ec1d138f8f41..e350d6154ce78 100644 --- a/expression/builtin_cast_vec.go +++ b/expression/builtin_cast_vec.go @@ -1304,12 +1304,13 @@ func (b *builtinCastDurationAsStringSig) vecEvalString(input *chunk.Chunk, resul var isNull bool sc := b.ctx.GetSessionVars().StmtCtx result.ReserveString(n) + fsp := b.args[0].GetType().Decimal for i := 0; i < n; i++ { if buf.IsNull(i) { result.AppendNull() continue } - res, err = types.ProduceStrWithSpecifiedTp(buf.GetDuration(i, 0).String(), b.tp, sc, false) + res, err = types.ProduceStrWithSpecifiedTp(buf.GetDuration(i, fsp).String(), b.tp, sc, false) if err != nil { return err } From a3e8617a3cd434b0d3e851a27983d9ab4a5eef80 Mon Sep 17 00:00:00 2001 From: Morgan Tocker Date: Thu, 18 Mar 2021 05:19:36 -0600 Subject: [PATCH 29/44] sessionctx: move interactive_timeout to supported sysvars (#23384) --- sessionctx/variable/noop.go | 1 - sessionctx/variable/sysvar.go | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sessionctx/variable/noop.go b/sessionctx/variable/noop.go index 28f98813daa28..11bb9885c3f03 100644 --- a/sessionctx/variable/noop.go +++ b/sessionctx/variable/noop.go @@ -146,7 +146,6 @@ var noopSysVars = []*SysVar{ {Scope: ScopeGlobal | ScopeSession, Name: BlockEncryptionMode, Value: "aes-128-ecb"}, {Scope: ScopeGlobal | ScopeSession, Name: "max_length_for_sort_data", Value: "1024", IsHintUpdatable: true}, {Scope: ScopeNone, Name: "character_set_system", Value: "utf8"}, - {Scope: ScopeGlobal | ScopeSession, Name: InteractiveTimeout, Value: "28800", Type: TypeUnsigned, MinValue: 1, MaxValue: secondsPerYear, AutoConvertOutOfRange: true}, {Scope: ScopeGlobal, Name: InnodbOptimizeFullTextOnly, Value: "0"}, {Scope: ScopeNone, Name: "character_sets_dir", Value: "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/charsets/"}, {Scope: ScopeGlobal | ScopeSession, Name: QueryCacheType, Value: BoolOff, Type: TypeEnum, PossibleValues: []string{BoolOff, BoolOn, "DEMAND"}}, diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 0a7c4fbb02c79..2cd2a0ac17182 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -542,7 +542,8 @@ var defaultSysVars = []*SysVar{ {Scope: ScopeGlobal | ScopeSession, Name: TransactionReadOnly, Value: "0"}, {Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10), Type: TypeInt, MinValue: -1, MaxValue: 1048576, AutoConvertOutOfRange: true}, {Scope: ScopeNone, Name: DataDir, Value: "/usr/local/mysql/data/"}, - {Scope: ScopeGlobal | ScopeSession, Name: WaitTimeout, Value: strconv.FormatInt(DefWaitTimeout, 10), Type: TypeUnsigned, MinValue: 0, MaxValue: 31536000, AutoConvertOutOfRange: true}, + {Scope: ScopeGlobal | ScopeSession, Name: WaitTimeout, Value: strconv.FormatInt(DefWaitTimeout, 10), Type: TypeUnsigned, MinValue: 0, MaxValue: secondsPerYear, AutoConvertOutOfRange: true}, + {Scope: ScopeGlobal | ScopeSession, Name: InteractiveTimeout, Value: "28800", Type: TypeUnsigned, MinValue: 1, MaxValue: secondsPerYear, AutoConvertOutOfRange: true}, {Scope: ScopeGlobal | ScopeSession, Name: InnodbLockWaitTimeout, Value: strconv.FormatInt(DefInnodbLockWaitTimeout, 10), Type: TypeUnsigned, MinValue: 1, MaxValue: 1073741824, AutoConvertOutOfRange: true}, {Scope: ScopeGlobal | ScopeSession, Name: GroupConcatMaxLen, Value: "1024", AutoConvertOutOfRange: true, IsHintUpdatable: true, Type: TypeUnsigned, MinValue: 4, MaxValue: math.MaxUint64, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) { // https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_group_concat_max_len From d370409faa0ddc3899859e34070e1ef236bde878 Mon Sep 17 00:00:00 2001 From: YIXIAO SHI Date: Thu, 18 Mar 2021 19:37:36 +0800 Subject: [PATCH 30/44] store/tikv: remove tidb/metrics from store/tikv (#23302) --- metrics/gprc.go | 27 --------------------------- metrics/metrics.go | 7 +++++-- store/tikv/client.go | 3 +-- store/tikv/client_batch.go | 7 +++---- store/tikv/metrics/metrics.go | 20 ++++++++++++++++++++ 5 files changed, 29 insertions(+), 35 deletions(-) delete mode 100644 metrics/gprc.go diff --git a/metrics/gprc.go b/metrics/gprc.go deleted file mode 100644 index 33875054b64a1..0000000000000 --- a/metrics/gprc.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2019 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Metrics to monitor gRPC service -var ( - GRPCConnTransientFailureCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "tidb", - Subsystem: "grpc", - Name: "connection_transient_failure_count", - Help: "Counter of gRPC connection transient failure", - }, []string{LblAddress, LblStore}) -) diff --git a/metrics/metrics.go b/metrics/metrics.go index daa5ffbc7bb14..29bc66852eca6 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -46,9 +46,12 @@ const ( opSucc = "ok" opFailed = "err" + TiDB = "tidb" LabelScope = "scope" ScopeGlobal = "global" ScopeSession = "session" + Server = "server" + TiKVClient = "tikvclient" ) // RetLabel returns "ok" when err == nil and "err" when err != nil. @@ -139,7 +142,6 @@ func RegisterMetrics() { prometheus.MustRegister(TotalCopProcHistogram) prometheus.MustRegister(TotalCopWaitHistogram) prometheus.MustRegister(HandleSchemaValidate) - prometheus.MustRegister(GRPCConnTransientFailureCounter) prometheus.MustRegister(MaxProcs) prometheus.MustRegister(GOGC) prometheus.MustRegister(ConnIdleDurationHistogram) @@ -147,6 +149,7 @@ func RegisterMetrics() { prometheus.MustRegister(TokenGauge) prometheus.MustRegister(ConfigStatus) - tikvmetrics.InitMetrics("tidb", "tikvclient") + tikvmetrics.InitMetrics(TiDB, TiKVClient) tikvmetrics.RegisterMetrics() + tikvmetrics.TiKVPanicCounter = PanicCounter // reset tidb metrics for tikv metrics } diff --git a/store/tikv/client.go b/store/tikv/client.go index f79a8fb25c3a8..255093c9f8ba8 100644 --- a/store/tikv/client.go +++ b/store/tikv/client.go @@ -34,7 +34,6 @@ import ( "github.com/pingcap/kvproto/pkg/tikvpb" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/kv" - tidbmetrics "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/store/tikv/config" "github.com/pingcap/tidb/store/tikv/logutil" "github.com/pingcap/tidb/store/tikv/metrics" @@ -384,7 +383,7 @@ func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.R clientConn := connArray.Get() if state := clientConn.GetState(); state == connectivity.TransientFailure { storeID := strconv.FormatUint(req.Context.GetPeer().GetStoreId(), 10) - tidbmetrics.GRPCConnTransientFailureCounter.WithLabelValues(addr, storeID).Inc() + metrics.TiKVGRPCConnTransientFailureCounter.WithLabelValues(addr, storeID).Inc() } if req.IsDebugReq() { diff --git a/store/tikv/client_batch.go b/store/tikv/client_batch.go index 5f36df341613e..dca4787de41d4 100644 --- a/store/tikv/client_batch.go +++ b/store/tikv/client_batch.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/tikvpb" "github.com/pingcap/parser/terror" - tidbmetrics "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/store/tikv/config" "github.com/pingcap/tidb/store/tikv/logutil" "github.com/pingcap/tidb/store/tikv/metrics" @@ -249,7 +248,7 @@ func (c *batchCommandsClient) send(request *tikvpb.BatchCommandsRequest, entries func (c *batchCommandsClient) recv() (resp *tikvpb.BatchCommandsResponse, err error) { defer func() { if r := recover(); r != nil { - tidbmetrics.PanicCounter.WithLabelValues(metrics.LabelBatchRecvLoop).Inc() + metrics.TiKVPanicCounter.WithLabelValues(metrics.LabelBatchRecvLoop).Inc() logutil.BgLogger().Error("batchCommandsClient.recv panic", zap.Reflect("r", r), zap.Stack("stack")) @@ -331,7 +330,7 @@ func (c *batchCommandsClient) reCreateStreamingClientOnce(perr error) error { func (c *batchCommandsClient) batchRecvLoop(cfg config.TiKVClient, tikvTransportLayerLoad *uint64) { defer func() { if r := recover(); r != nil { - tidbmetrics.PanicCounter.WithLabelValues(metrics.LabelBatchRecvLoop).Inc() + metrics.TiKVPanicCounter.WithLabelValues(metrics.LabelBatchRecvLoop).Inc() logutil.BgLogger().Error("batchRecvLoop", zap.Reflect("r", r), zap.Stack("stack")) @@ -448,7 +447,7 @@ func resetRequests(requests []*tikvpb.BatchCommandsRequest_Request) []*tikvpb.Ba func (a *batchConn) batchSendLoop(cfg config.TiKVClient) { defer func() { if r := recover(); r != nil { - tidbmetrics.PanicCounter.WithLabelValues(metrics.LabelBatchSendLoop).Inc() + metrics.TiKVPanicCounter.WithLabelValues(metrics.LabelBatchSendLoop).Inc() logutil.BgLogger().Error("batchSendLoop", zap.Reflect("r", r), zap.Stack("stack")) diff --git a/store/tikv/metrics/metrics.go b/store/tikv/metrics/metrics.go index 2af1abfcab912..158524bd38f85 100644 --- a/store/tikv/metrics/metrics.go +++ b/store/tikv/metrics/metrics.go @@ -51,6 +51,8 @@ var ( TiKVAsyncCommitTxnCounter *prometheus.CounterVec TiKVOnePCTxnCounter *prometheus.CounterVec TiKVStoreLimitErrorCounter *prometheus.CounterVec + TiKVGRPCConnTransientFailureCounter *prometheus.CounterVec + TiKVPanicCounter *prometheus.CounterVec ) // Label constants. @@ -355,6 +357,22 @@ func initMetrics(namespace, subsystem string) { Help: "store token is up to the limit, probably because one of the stores is the hotspot or unavailable", }, []string{LblAddress, LblStore}) + TiKVGRPCConnTransientFailureCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "connection_transient_failure_count", + Help: "Counter of gRPC connection transient failure", + }, []string{LblAddress, LblStore}) + + TiKVPanicCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "panic_total", + Help: "Counter of panic.", + }, []string{LblType}) + initShortcuts() } @@ -404,4 +422,6 @@ func RegisterMetrics() { prometheus.MustRegister(TiKVAsyncCommitTxnCounter) prometheus.MustRegister(TiKVOnePCTxnCounter) prometheus.MustRegister(TiKVStoreLimitErrorCounter) + prometheus.MustRegister(TiKVGRPCConnTransientFailureCounter) + prometheus.MustRegister(TiKVPanicCounter) } From ab19f061bd9816b7f495ef32165074a9b7580a64 Mon Sep 17 00:00:00 2001 From: MyonKeminta <9948422+MyonKeminta@users.noreply.github.com> Date: Thu, 18 Mar 2021 20:34:14 +0800 Subject: [PATCH 31/44] store/tikv: Support forwarding via follower when the region's leader is unreachable (#23244) Co-authored-by: MyonKeminta --- config/config.toml.example | 2 +- metrics/grafana/tidb.json | 93 +++++++ store/tikv/backoff.go | 3 +- store/tikv/config/client.go | 2 +- store/tikv/metrics/metrics.go | 12 + store/tikv/region_cache.go | 427 +++++++++++++++++++++++++----- store/tikv/region_cache_test.go | 52 ++++ store/tikv/region_request.go | 62 ++++- store/tikv/region_request_test.go | 134 ++++++++++ 9 files changed, 702 insertions(+), 85 deletions(-) diff --git a/config/config.toml.example b/config/config.toml.example index 940936845abf3..a2b55143c4f62 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -406,7 +406,7 @@ region-cache-ttl = 600 store-limit = 0 # store-liveness-timeout is used to control timeout for store liveness after sending request failed. -store-liveness-timeout = "5s" +store-liveness-timeout = "1s" # ttl-refreshed-txn-size decides whether a transaction should update its lock TTL. # If the size(in byte) of a transaction is large than `ttl-refreshed-txn-size`, it update the lock TTL during the 2PC. diff --git a/metrics/grafana/tidb.json b/metrics/grafana/tidb.json index c09e802892304..531bf04ff619e 100644 --- a/metrics/grafana/tidb.json +++ b/metrics/grafana/tidb.json @@ -7301,6 +7301,99 @@ "align": false, "alignLevel": null } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "description": "kv requests that's forwarded by different stores", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 15 + }, + "id": 219, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tidb_tikvclient_forward_request_counter{tidb_cluster=\"$tidb_cluster\"}[1m])) by (from_store, to_store, result)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{from_store}}-to-{{to_store}}-{{result}}", + "refId": "A", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "KV Request Forwarding OPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], "repeat": null, diff --git a/store/tikv/backoff.go b/store/tikv/backoff.go index 71ecfc65465d0..9173758fb48dc 100644 --- a/store/tikv/backoff.go +++ b/store/tikv/backoff.go @@ -92,7 +92,8 @@ func NewBackoffFn(base, cap, jitter int) func(ctx context.Context, maxSleepMs in } logutil.BgLogger().Debug("backoff", zap.Int("base", base), - zap.Int("sleep", sleep)) + zap.Int("sleep", sleep), + zap.Int("attempts", attempts)) realSleep := sleep // when set maxSleepMs >= 0 in `tikv.BackoffWithMaxSleep` will force sleep maxSleepMs milliseconds. diff --git a/store/tikv/config/client.go b/store/tikv/config/client.go index 97dc281722093..8e9169301c2f8 100644 --- a/store/tikv/config/client.go +++ b/store/tikv/config/client.go @@ -22,7 +22,7 @@ import ( const ( // DefStoreLivenessTimeout is the default value for store liveness timeout. - DefStoreLivenessTimeout = "5s" + DefStoreLivenessTimeout = "1s" ) // TiKVClient is the config for tikv client. diff --git a/store/tikv/metrics/metrics.go b/store/tikv/metrics/metrics.go index 158524bd38f85..1dadf1957c965 100644 --- a/store/tikv/metrics/metrics.go +++ b/store/tikv/metrics/metrics.go @@ -53,6 +53,7 @@ var ( TiKVStoreLimitErrorCounter *prometheus.CounterVec TiKVGRPCConnTransientFailureCounter *prometheus.CounterVec TiKVPanicCounter *prometheus.CounterVec + TiKVForwardRequestCounter *prometheus.CounterVec ) // Label constants. @@ -69,6 +70,8 @@ const ( LabelBatchRecvLoop = "batch-recv-loop" LabelBatchSendLoop = "batch-send-loop" LblAddress = "address" + LblFromStore = "from_store" + LblToStore = "to_store" ) func initMetrics(namespace, subsystem string) { @@ -373,6 +376,14 @@ func initMetrics(namespace, subsystem string) { Help: "Counter of panic.", }, []string{LblType}) + TiKVForwardRequestCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "forward_request_counter", + Help: "Counter of tikv request being forwarded through another node", + }, []string{LblFromStore, LblToStore, LblResult}) + initShortcuts() } @@ -424,4 +435,5 @@ func RegisterMetrics() { prometheus.MustRegister(TiKVStoreLimitErrorCounter) prometheus.MustRegister(TiKVGRPCConnTransientFailureCounter) prometheus.MustRegister(TiKVPanicCounter) + prometheus.MustRegister(TiKVForwardRequestCounter) } diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go index ae6832c18625e..b6e39c8c7b3bc 100644 --- a/store/tikv/region_cache.go +++ b/store/tikv/region_cache.go @@ -17,7 +17,6 @@ import ( "bytes" "context" "fmt" - "net/http" "sync" "sync/atomic" "time" @@ -29,6 +28,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/tikv/config" @@ -38,6 +38,11 @@ import ( atomic2 "go.uber.org/atomic" "go.uber.org/zap" "golang.org/x/sync/singleflight" + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/keepalive" ) const ( @@ -69,6 +74,7 @@ type AccessIndex int // it will be store as unsafe.Pointer and be load at once type RegionStore struct { workTiKVIdx AccessIndex // point to current work peer in meta.Peers and work store in stores(same idx) for tikv peer + proxyTiKVIdx AccessIndex // point to the tikv peer that can forward requests to the leader. -1 means not using proxy workTiFlashIdx int32 // point to current work peer in meta.Peers and work store in stores(same idx) for tiflash peer stores []*Store // stores in this region storeEpochs []uint32 // snapshots of store's epoch, need reload when `storeEpochs[curr] != stores[cur].fail` @@ -89,6 +95,7 @@ func (r *RegionStore) clone() *RegionStore { storeEpochs := make([]uint32, len(r.stores)) rs := &RegionStore{ workTiFlashIdx: r.workTiFlashIdx, + proxyTiKVIdx: r.proxyTiKVIdx, workTiKVIdx: r.workTiKVIdx, stores: r.stores, storeEpochs: storeEpochs, @@ -150,6 +157,7 @@ func (r *Region) init(c *RegionCache) error { // to avoid acquire storeMu in later access. rs := &RegionStore{ workTiKVIdx: 0, + proxyTiKVIdx: -1, workTiFlashIdx: 0, stores: make([]*Store, 0, len(r.meta.Peers)), storeEpochs: make([]uint32, 0, len(r.meta.Peers)), @@ -221,8 +229,8 @@ func (r *Region) scheduleReload() { atomic.CompareAndSwapInt32(&r.syncFlag, oldValue, needSync) } -// needReload checks whether region need reload. -func (r *Region) needReload() bool { +// checkNeedReloadAndMarkUpdated returns whether the region need reload and marks the region to be updated. +func (r *Region) checkNeedReloadAndMarkUpdated() bool { oldValue := atomic.LoadInt32(&r.syncFlag) if oldValue == updated { return false @@ -230,9 +238,15 @@ func (r *Region) needReload() bool { return atomic.CompareAndSwapInt32(&r.syncFlag, oldValue, updated) } +func (r *Region) checkNeedReload() bool { + v := atomic.LoadInt32(&r.syncFlag) + return v != updated +} + // RegionCache caches Regions loaded from PD. type RegionCache struct { - pdClient pd.Client + pdClient pd.Client + enableForwarding bool mu struct { sync.RWMutex // mutex protect cached region @@ -245,6 +259,12 @@ type RegionCache struct { } notifyCheckCh chan struct{} closeCh chan struct{} + + testingKnobs struct { + // Replace the requestLiveness function for test purpose. Note that in unit tests, if this is not set, + // requestLiveness always returns unreachable. + mockRequestLiveness func(s *Store, bo *Backoffer) livenessState + } } // NewRegionCache creates a RegionCache. @@ -259,6 +279,7 @@ func NewRegionCache(pdClient pd.Client) *RegionCache { c.closeCh = make(chan struct{}) interval := config.GetGlobalConfig().StoresRefreshInterval go c.asyncCheckAndResolveLoop(time.Duration(interval) * time.Second) + c.enableForwarding = config.GetGlobalConfig().EnableForwarding return c } @@ -289,7 +310,8 @@ func (c *RegionCache) asyncCheckAndResolveLoop(interval time.Duration) { } c.storeMu.RUnlock() for _, store := range stores { - store.reResolve(c) + _, err := store.reResolve(c) + terror.Log(err) } } } @@ -317,19 +339,24 @@ func (c *RegionCache) checkAndResolve(needCheckStores []*Store) { c.storeMu.RUnlock() for _, store := range needCheckStores { - store.reResolve(c) + _, err := store.reResolve(c) + terror.Log(err) } } // RPCContext contains data that is needed to send RPC to a region. type RPCContext struct { - Region RegionVerID - Meta *metapb.Region - Peer *metapb.Peer - AccessIdx AccessIndex - Store *Store - Addr string - AccessMode AccessMode + Region RegionVerID + Meta *metapb.Region + Peer *metapb.Peer + AccessIdx AccessIndex + Store *Store + Addr string + AccessMode AccessMode + ProxyStore *Store // nil means proxy is not used + ProxyAccessIdx AccessIndex // valid when ProxyStore is not nil + ProxyAddr string // valid when ProxyStore is not nil + TiKVNum int // Number of TiKV nodes among the region's peers. Assuming non-TiKV peers are all TiFlash peers. } func (c *RPCContext) String() string { @@ -337,8 +364,12 @@ func (c *RPCContext) String() string { if c.Store != nil { runStoreType = c.Store.storeType.Name() } - return fmt.Sprintf("region ID: %d, meta: %s, peer: %s, addr: %s, idx: %d, reqStoreType: %s, runStoreType: %s", + res := fmt.Sprintf("region ID: %d, meta: %s, peer: %s, addr: %s, idx: %d, reqStoreType: %s, runStoreType: %s", c.Region.GetID(), c.Meta, c.Peer, c.Addr, c.AccessIdx, c.AccessMode, runStoreType) + if c.ProxyStore != nil { + res += fmt.Sprintf(", proxy store id: %d, proxy addr: %s, proxy idx: %d", c.ProxyStore.storeID, c.ProxyAddr, c.ProxyAccessIdx) + } + return res } type storeSelectorOp struct { @@ -365,6 +396,12 @@ func (c *RegionCache) GetTiKVRPCContext(bo *Backoffer, id RegionVerID, replicaRe return nil, nil } + if cachedRegion.checkNeedReload() { + // TODO: This may cause a fake EpochNotMatch error, and reload the region after a backoff. It's better to reload + // the region directly here. + return nil, nil + } + if !cachedRegion.checkRegionCacheTTL(ts) { return nil, nil } @@ -395,12 +432,14 @@ func (c *RegionCache) GetTiKVRPCContext(bo *Backoffer, id RegionVerID, replicaRe } } }) + isLeaderReq := false switch replicaRead { case kv.ReplicaReadFollower: store, peer, accessIdx, storeIdx = cachedRegion.FollowerStorePeer(regionStore, followerStoreSeed, options) case kv.ReplicaReadMixed: store, peer, accessIdx, storeIdx = cachedRegion.AnyStorePeer(regionStore, followerStoreSeed, options) default: + isLeaderReq = true store, peer, accessIdx, storeIdx = cachedRegion.WorkStorePeer(regionStore) } addr, err := c.getStoreAddr(bo, cachedRegion, store, storeIdx) @@ -428,14 +467,41 @@ func (c *RegionCache) GetTiKVRPCContext(bo *Backoffer, id RegionVerID, replicaRe return nil, nil } + var ( + proxyStore *Store + proxyAddr string + proxyAccessIdx AccessIndex + proxyStoreIdx int + ) + if c.enableForwarding && isLeaderReq { + if atomic.LoadInt32(&store.needForwarding) == 0 { + regionStore.unsetProxyStoreIfNeeded(cachedRegion) + } else { + proxyStore, proxyAccessIdx, proxyStoreIdx, err = c.getProxyStore(cachedRegion, store, regionStore, accessIdx) + if err != nil { + return nil, err + } + if proxyStore != nil { + proxyAddr, err = c.getStoreAddr(bo, cachedRegion, proxyStore, proxyStoreIdx) + if err != nil { + return nil, err + } + } + } + } + return &RPCContext{ - Region: id, - Meta: cachedRegion.meta, - Peer: peer, - AccessIdx: accessIdx, - Store: store, - Addr: addr, - AccessMode: TiKVOnly, + Region: id, + Meta: cachedRegion.meta, + Peer: peer, + AccessIdx: accessIdx, + Store: store, + Addr: addr, + AccessMode: TiKVOnly, + ProxyStore: proxyStore, + ProxyAccessIdx: proxyAccessIdx, + ProxyAddr: proxyAddr, + TiKVNum: regionStore.accessStoreNum(TiKVOnly), }, nil } @@ -468,7 +534,8 @@ func (c *RegionCache) GetTiFlashRPCContext(bo *Backoffer, id RegionVerID) (*RPCC return nil, nil } if store.getResolveState() == needCheck { - store.reResolve(c) + _, err := store.reResolve(c) + terror.Log(err) } atomic.StoreInt32(®ionStore.workTiFlashIdx, int32(accessIdx)) peer := cachedRegion.meta.Peers[storeIdx] @@ -489,6 +556,7 @@ func (c *RegionCache) GetTiFlashRPCContext(bo *Backoffer, id RegionVerID) (*RPCC Store: store, Addr: addr, AccessMode: TiFlashOnly, + TiKVNum: regionStore.accessStoreNum(TiKVOnly), }, nil } @@ -550,7 +618,7 @@ func (c *RegionCache) findRegionByKey(bo *Backoffer, key []byte, isEndKey bool) c.mu.Lock() c.insertRegionToCache(r) c.mu.Unlock() - } else if r.needReload() { + } else if r.checkNeedReloadAndMarkUpdated() { // load region when it be marked as need reload. lr, err := c.loadRegion(bo, key, isEndKey) if err != nil { @@ -583,43 +651,74 @@ func (c *RegionCache) OnSendFail(bo *Backoffer, ctx *RPCContext, scheduleReload zap.Error(err)) return } + rs := r.getStore() + startForwarding := false + incEpochStoreIdx := -1 + if err != nil { storeIdx, s := rs.accessStore(ctx.AccessMode, ctx.AccessIdx) - followerRead := rs.workTiKVIdx != ctx.AccessIdx - - // send fail but store is reachable, keep retry current peer for replica leader request. - // but we still need switch peer for follower-read or learner-read(i.e. tiflash) - if ctx.Store.storeType == TiKV && !followerRead && s.requestLiveness(bo) == reachable { - return - } + leaderReq := ctx.Store.storeType == TiKV && rs.workTiKVIdx == ctx.AccessIdx + + // Mark the store as failure if it's not a redirection request because we + // can't know the status of the proxy store by it. + if ctx.ProxyStore == nil { + // send fail but store is reachable, keep retry current peer for replica leader request. + // but we still need switch peer for follower-read or learner-read(i.e. tiflash) + if leaderReq { + if s.requestLiveness(bo, c) == reachable { + return + } else if c.enableForwarding { + s.startHealthCheckLoopIfNeeded(c) + startForwarding = true + } + } - // invalidate regions in store. - epoch := rs.storeEpochs[storeIdx] - if atomic.CompareAndSwapUint32(&s.epoch, epoch, epoch+1) { - logutil.BgLogger().Info("mark store's regions need be refill", zap.String("store", s.addr)) - metrics.RegionCacheCounterWithInvalidateStoreRegionsOK.Inc() + // invalidate regions in store. + epoch := rs.storeEpochs[storeIdx] + if atomic.CompareAndSwapUint32(&s.epoch, epoch, epoch+1) { + logutil.BgLogger().Info("mark store's regions need be refill", zap.String("store", s.addr)) + incEpochStoreIdx = storeIdx + metrics.RegionCacheCounterWithInvalidateStoreRegionsOK.Inc() + } + // schedule a store addr resolve. + s.markNeedCheck(c.notifyCheckCh) } - - // schedule a store addr resolve. - s.markNeedCheck(c.notifyCheckCh) } // try next peer to found new leader. if ctx.AccessMode == TiKVOnly { - rs.switchNextTiKVPeer(r, ctx.AccessIdx) + if startForwarding || ctx.ProxyStore != nil { + var currentProxyIdx AccessIndex = -1 + if ctx.ProxyStore != nil { + currentProxyIdx = ctx.ProxyAccessIdx + } + // In case the epoch of the store is increased, try to avoid reloading the current region by also + // increasing the epoch stored in `rs`. + rs.switchNextProxyStore(r, currentProxyIdx, incEpochStoreIdx) + logutil.Logger(bo.ctx).Info("switch region proxy peer to next due to send request fail", + zap.Stringer("current", ctx), + zap.Bool("needReload", scheduleReload), + zap.Error(err)) + } else { + rs.switchNextTiKVPeer(r, ctx.AccessIdx) + logutil.Logger(bo.ctx).Info("switch region peer to next due to send request fail", + zap.Stringer("current", ctx), + zap.Bool("needReload", scheduleReload), + zap.Error(err)) + } } else { rs.switchNextFlashPeer(r, ctx.AccessIdx) + logutil.Logger(bo.ctx).Info("switch region tiflash peer to next due to send request fail", + zap.Stringer("current", ctx), + zap.Bool("needReload", scheduleReload), + zap.Error(err)) } // force reload region when retry all known peers in region. if scheduleReload { r.scheduleReload() } - logutil.Logger(bo.ctx).Info("switch region peer to next due to send request fail", - zap.Stringer("current", ctx), - zap.Bool("needReload", scheduleReload), - zap.Error(err)) } } @@ -629,7 +728,7 @@ func (c *RegionCache) LocateRegionByID(bo *Backoffer, regionID uint64) (*KeyLoca r := c.getRegionByIDFromCache(regionID) c.mu.RUnlock() if r != nil { - if r.needReload() { + if r.checkNeedReloadAndMarkUpdated() { lr, err := c.loadRegionByID(bo, regionID) if err != nil { // ignore error and use old region info. @@ -1131,6 +1230,30 @@ func (c *RegionCache) getStoreAddr(bo *Backoffer, region *Region, store *Store, } } +func (c *RegionCache) getProxyStore(region *Region, store *Store, rs *RegionStore, workStoreIdx AccessIndex) (proxyStore *Store, proxyAccessIdx AccessIndex, proxyStoreIdx int, err error) { + if !c.enableForwarding || store.storeType != TiKV || atomic.LoadInt32(&store.needForwarding) == 0 { + return + } + + if rs.proxyTiKVIdx >= 0 { + storeIdx, proxyStore := rs.accessStore(TiKVOnly, rs.proxyTiKVIdx) + return proxyStore, rs.proxyTiKVIdx, storeIdx, err + } + + tikvNum := rs.accessStoreNum(TiKVOnly) + for index := 0; index < tikvNum; index++ { + // Skip work store which is the actual store to be accessed + if index == int(workStoreIdx) { + continue + } + storeIdx, store := rs.accessStore(TiKVOnly, AccessIndex(index)) + rs.setProxyStoreIdx(region, AccessIndex(index)) + return store, AccessIndex(index), storeIdx, nil + } + + return nil, 0, 0, errors.New("the region leader is disconnected and no store is available ") +} + func (c *RegionCache) changeToActiveStore(region *Region, store *Store, storeIdx int) (addr string) { c.storeMu.RLock() store = c.storeMu.stores[store.storeID] @@ -1345,6 +1468,11 @@ func (r *RegionVerID) GetConfVer() uint64 { return r.confVer } +// String formats the RegionVerID to string +func (r *RegionVerID) String() string { + return fmt.Sprintf("{ region id: %v, ver: %v, confVer: %v }", r.id, r.ver, r.confVer) +} + // VerID returns the Region's RegionVerID. func (r *Region) VerID() RegionVerID { return RegionVerID{ @@ -1405,6 +1533,44 @@ func (r *RegionStore) switchNextTiKVPeer(rr *Region, currentPeerIdx AccessIndex) rr.compareAndSwapStore(r, newRegionStore) } +// switchNextProxyStore switches the index of the peer that will forward requests to the leader to the next peer. +// If proxy is currently not used on this region, the value of `currentProxyIdx` should be -1, and it will be moved to +// the first peer that can be the proxy. +func (r *RegionStore) switchNextProxyStore(rr *Region, currentProxyIdx AccessIndex, incEpochStoreIdx int) { + if r.proxyTiKVIdx != currentProxyIdx { + return + } + nextIdx := (currentProxyIdx + 1) % AccessIndex(r.accessStoreNum(TiKVOnly)) + // skips the current workTiKVIdx + if nextIdx == r.workTiKVIdx { + nextIdx = (nextIdx + 1) % AccessIndex(r.accessStoreNum(TiKVOnly)) + } + newRegionStore := r.clone() + newRegionStore.proxyTiKVIdx = nextIdx + if incEpochStoreIdx >= 0 { + newRegionStore.storeEpochs[incEpochStoreIdx]++ + } + rr.compareAndSwapStore(r, newRegionStore) +} + +func (r *RegionStore) setProxyStoreIdx(rr *Region, idx AccessIndex) { + if r.proxyTiKVIdx == idx { + return + } + + newRegionStore := r.clone() + newRegionStore.proxyTiKVIdx = idx + success := rr.compareAndSwapStore(r, newRegionStore) + logutil.BgLogger().Debug("try set proxy store index", + zap.Uint64("region", rr.GetID()), + zap.Int("index", int(idx)), + zap.Bool("success", success)) +} + +func (r *RegionStore) unsetProxyStoreIfNeeded(rr *Region) { + r.setProxyStoreIdx(rr, -1) +} + func (r *Region) findElectableStoreID() uint64 { if len(r.meta.Peers) == 0 { return 0 @@ -1457,6 +1623,11 @@ type Store struct { epoch uint32 // store fail epoch, see RegionStore.storeEpochs storeType StoreType // type of the store tokenCount atomic2.Int64 // used store token count + + // whether the store is disconnected due to some reason, therefore requests to the store needs to be + // forwarded by other stores. this is also the flag that a checkUntilHealth goroutine is running for this store. + // this mechanism is currently only applicable for TiKV stores. + needForwarding int32 } type resolveState uint64 @@ -1517,8 +1688,9 @@ func (s *Store) initResolve(bo *Backoffer, c *RegionCache) (addr string, err err } } -// reResolve try to resolve addr for store that need check. -func (s *Store) reResolve(c *RegionCache) { +// reResolve try to resolve addr for store that need check. Returns false if the region is in tombstone state or is +// deleted. +func (s *Store) reResolve(c *RegionCache) (bool, error) { var addr string store, err := c.pdClient.GetStore(context.Background(), s.storeID) if err != nil { @@ -1529,7 +1701,7 @@ func (s *Store) reResolve(c *RegionCache) { if err != nil { logutil.BgLogger().Error("loadStore from PD failed", zap.Uint64("id", s.storeID), zap.Error(err)) // we cannot do backoff in reResolve loop but try check other store and wait tick. - return + return false, err } if store == nil || store.State == metapb.StoreState_Tombstone { // store has be removed in PD, we should invalidate all regions using those store. @@ -1537,7 +1709,7 @@ func (s *Store) reResolve(c *RegionCache) { zap.Uint64("store", s.storeID), zap.String("add", s.addr)) atomic.AddUint32(&s.epoch, 1) metrics.RegionCacheCounterWithInvalidateStoreRegionsOK.Inc() - return + return false, nil } storeType := GetStoreTypeByMeta(store) @@ -1553,23 +1725,24 @@ func (s *Store) reResolve(c *RegionCache) { // all region used those oldState := s.getResolveState() if oldState == deleted { - return + return false, nil } newState := deleted if !s.compareAndSwapState(oldState, newState) { goto retryMarkDel } - return + return false, nil } retryMarkResolved: oldState := s.getResolveState() if oldState != needCheck { - return + return true, nil } newState := resolved if !s.compareAndSwapState(oldState, newState) { goto retryMarkResolved } + return true, nil } func (s *Store) getResolveState() resolveState { @@ -1642,18 +1815,73 @@ const ( unreachable ) -func (s *Store) requestLiveness(bo *Backoffer) (l livenessState) { +func (s *Store) startHealthCheckLoopIfNeeded(c *RegionCache) { + // This mechanism doesn't support non-TiKV stores currently. + if s.storeType != TiKV { + logutil.BgLogger().Info("[health check] skip running health check loop for non-tikv store", + zap.Uint64("storeID", s.storeID), zap.String("addr", s.addr)) + return + } + + // It may be already started by another thread. + if atomic.CompareAndSwapInt32(&s.needForwarding, 0, 1) { + go s.checkUntilHealth(c) + } +} + +func (s *Store) checkUntilHealth(c *RegionCache) { + defer atomic.CompareAndSwapInt32(&s.needForwarding, 1, 0) + + ticker := time.NewTicker(time.Second) + lastCheckPDTime := time.Now() + + // TODO(MyonKeminta): Set a more proper ctx here so that it can be interrupted immediately when the RegionCache is + // shutdown. + ctx := context.Background() + for { + select { + case <-c.closeCh: + return + case <-ticker.C: + if time.Since(lastCheckPDTime) > time.Second*30 { + lastCheckPDTime = time.Now() + + valid, err := s.reResolve(c) + if err != nil { + logutil.BgLogger().Warn("[health check] failed to re-resolve unhealthy store", zap.Error(err)) + } else if !valid { + logutil.BgLogger().Info("[health check] store meta deleted, stop checking", zap.Uint64("storeID", s.storeID), zap.String("addr", s.addr)) + return + } + } + + bo := NewNoopBackoff(ctx) + l := s.requestLiveness(bo, c) + if l == reachable { + logutil.BgLogger().Info("[health check] store became reachable", zap.Uint64("storeID", s.storeID)) + + return + } + } + } +} + +func (s *Store) requestLiveness(bo *Backoffer, c *RegionCache) (l livenessState) { + if c != nil && c.testingKnobs.mockRequestLiveness != nil { + return c.testingKnobs.mockRequestLiveness(s, bo) + } + if StoreLivenessTimeout == 0 { return unreachable } - saddr := s.saddr - if len(saddr) == 0 { + if s.getResolveState() != resolved { l = unknown return } - rsCh := livenessSf.DoChan(saddr, func() (interface{}, error) { - return invokeKVStatusAPI(saddr, StoreLivenessTimeout), nil + addr := s.addr + rsCh := livenessSf.DoChan(addr, func() (interface{}, error) { + return invokeKVStatusAPI(addr, StoreLivenessTimeout), nil }) var ctx context.Context if bo != nil { @@ -1671,7 +1899,7 @@ func (s *Store) requestLiveness(bo *Backoffer) (l livenessState) { return } -func invokeKVStatusAPI(saddr string, timeout time.Duration) (l livenessState) { +func invokeKVStatusAPI(addr string, timeout time.Duration) (l livenessState) { start := time.Now() defer func() { if l == reachable { @@ -1679,34 +1907,91 @@ func invokeKVStatusAPI(saddr string, timeout time.Duration) (l livenessState) { } else { metrics.StatusCountWithError.Inc() } - metrics.TiKVStatusDuration.WithLabelValues(saddr).Observe(time.Since(start).Seconds()) + metrics.TiKVStatusDuration.WithLabelValues(addr).Observe(time.Since(start).Seconds()) }() ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - url := fmt.Sprintf("%s://%s/status", config.InternalHTTPSchema(), saddr) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + + conn, cli, err := createKVHealthClient(ctx, addr) if err != nil { - logutil.BgLogger().Info("[liveness] build kv status request fail", zap.String("store", saddr), zap.Error(err)) + logutil.BgLogger().Info("[health check] create grpc connection failed", zap.String("store", addr), zap.Error(err)) l = unreachable return } - resp, err := config.InternalHTTPClient().Do(req) + defer func() { + err := conn.Close() + if err != nil { + logutil.BgLogger().Info("[health check] failed to close the grpc connection for health check", zap.String("store", addr), zap.Error(err)) + } + }() + + req := &healthpb.HealthCheckRequest{} + resp, err := cli.Check(ctx, req) if err != nil { - logutil.BgLogger().Info("[liveness] request kv status fail", zap.String("store", saddr), zap.Error(err)) + logutil.BgLogger().Info("[health check] check health error", zap.String("store", addr), zap.Error(err)) l = unreachable return } - defer func() { - err1 := resp.Body.Close() - if err1 != nil { - logutil.BgLogger().Debug("[liveness] close kv status api body failed", zap.String("store", saddr), zap.Error(err)) - } - }() - if resp.StatusCode != http.StatusOK { - logutil.BgLogger().Info("[liveness] request kv status fail", zap.String("store", saddr), zap.String("status", resp.Status)) + + status := resp.GetStatus() + if status == healthpb.HealthCheckResponse_UNKNOWN { + logutil.BgLogger().Info("[health check] check health returns unknown", zap.String("store", addr)) + l = unknown + return + } + + if status != healthpb.HealthCheckResponse_SERVING { + logutil.BgLogger().Info("[health check] service not serving", zap.Stringer("status", status)) l = unreachable return } + l = reachable return } + +func createKVHealthClient(ctx context.Context, addr string) (*grpc.ClientConn, healthpb.HealthClient, error) { + // Temporarily directly load the config from the global config, however it's not a good idea to let RegionCache to + // access it. + // TODO: Pass the config in a better way, or use the connArray inner the client directly rather than creating new + // connection. + + cfg := config.GetGlobalConfig() + + opt := grpc.WithInsecure() + if len(cfg.Security.ClusterSSLCA) != 0 { + tlsConfig, err := cfg.Security.ToTLSConfig() + if err != nil { + return nil, nil, errors.Trace(err) + } + opt = grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)) + } + keepAlive := cfg.TiKVClient.GrpcKeepAliveTime + keepAliveTimeout := cfg.TiKVClient.GrpcKeepAliveTimeout + conn, err := grpc.DialContext( + ctx, + addr, + opt, + grpc.WithInitialWindowSize(grpcInitialWindowSize), + grpc.WithInitialConnWindowSize(grpcInitialConnWindowSize), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{ + BaseDelay: 100 * time.Millisecond, // Default was 1s. + Multiplier: 1.6, // Default + Jitter: 0.2, // Default + MaxDelay: 3 * time.Second, // Default was 120s. + }, + MinConnectTimeout: 5 * time.Second, + }), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: time.Duration(keepAlive) * time.Second, + Timeout: time.Duration(keepAliveTimeout) * time.Second, + PermitWithoutStream: true, + }), + ) + if err != nil { + return nil, nil, errors.Trace(err) + } + cli := healthpb.NewHealthClient(conn) + return conn, cli, nil +} diff --git a/store/tikv/region_cache_test.go b/store/tikv/region_cache_test.go index 650650f1ee89a..5f7eecb3db9e4 100644 --- a/store/tikv/region_cache_test.go +++ b/store/tikv/region_cache_test.go @@ -519,6 +519,58 @@ func (s *testRegionCacheSuite) TestSendFailInvalidateRegionsInSameStore(c *C) { c.Assert(err, IsNil) } +func (s *testRegionCacheSuite) TestSendFailEnableForwarding(c *C) { + s.cache.enableForwarding = true + + // key range: ['' - 'm' - 'z'] + region2 := s.cluster.AllocID() + newPeers := s.cluster.AllocIDs(2) + s.cluster.Split(s.region1, region2, []byte("m"), newPeers, newPeers[0]) + + // Check the two regions. + loc1, err := s.cache.LocateKey(s.bo, []byte("a")) + c.Assert(err, IsNil) + c.Assert(loc1.Region.id, Equals, s.region1) + + // Invoke OnSendFail so that the store will be marked as needForwarding + ctx, err := s.cache.GetTiKVRPCContext(s.bo, loc1.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx, NotNil) + s.cache.OnSendFail(s.bo, ctx, false, errors.New("test error")) + + // ...then on next retry, proxy will be used + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc1.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx, NotNil) + c.Assert(ctx.ProxyStore, NotNil) + c.Assert(ctx.ProxyStore.storeID, Equals, s.store2) + + // Proxy will be also applied to other regions whose leader is on the store + loc2, err := s.cache.LocateKey(s.bo, []byte("x")) + c.Assert(err, IsNil) + c.Assert(loc2.Region.id, Equals, region2) + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc2.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + c.Assert(ctx, NotNil) + c.Assert(ctx.ProxyStore, NotNil) + c.Assert(ctx.ProxyStore.storeID, Equals, s.store2) + + // Recover the store + s.cache.testingKnobs.mockRequestLiveness = func(s *Store, bo *Backoffer) livenessState { + return reachable + } + // The proxy should be unset after several retries + for retry := 0; retry < 15; retry++ { + ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc1.Region, kv.ReplicaReadLeader, 0) + c.Assert(err, IsNil) + if ctx.ProxyStore == nil { + break + } + time.Sleep(time.Millisecond * 200) + } + c.Assert(ctx.ProxyStore, IsNil) +} + func (s *testRegionCacheSuite) TestSendFailedInMultipleNode(c *C) { // 3 nodes and no.1 is leader. store3 := s.cluster.AllocID() diff --git a/store/tikv/region_request.go b/store/tikv/region_request.go index 7182bf23d9e91..244534dab432f 100644 --- a/store/tikv/region_request.go +++ b/store/tikv/region_request.go @@ -62,11 +62,12 @@ var ShuttingDown uint32 // errors, since region range have changed, the request may need to split, so we // simply return the error to caller. type RegionRequestSender struct { - regionCache *RegionCache - client Client - storeAddr string - rpcError error - failStoreIDs map[uint64]struct{} + regionCache *RegionCache + client Client + storeAddr string + rpcError error + failStoreIDs map[uint64]struct{} + failProxyStoreIDs map[uint64]struct{} RegionRequestRuntimeStats } @@ -226,7 +227,8 @@ func (s *RegionRequestSender) SendReqCtx( if span := opentracing.SpanFromContext(bo.ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("regionRequest.SendReqCtx", opentracing.ChildOf(span.Context())) defer span1.Finish() - bo = bo.Clone() + // TODO(MyonKeminta): Make sure trace works without cloning the backoffer. + // bo = bo.Clone() bo.ctx = opentracing.ContextWithSpan(bo.ctx, span1) } @@ -263,7 +265,7 @@ func (s *RegionRequestSender) SendReqCtx( tryTimes := 0 for { - if (tryTimes > 0) && (tryTimes%100000 == 0) { + if (tryTimes > 0) && (tryTimes%1000 == 0) { logutil.Logger(bo.ctx).Warn("retry get ", zap.Uint64("region = ", regionID.GetID()), zap.Int("times = ", tryTimes)) } @@ -286,6 +288,7 @@ func (s *RegionRequestSender) SendReqCtx( // TODO: Change the returned error to something like "region missing in cache", // and handle this error like EpochNotMatch, which means to re-split the request and retry. + logutil.Logger(bo.ctx).Debug("throwing pseudo region error due to region not found in cache", zap.Stringer("region", ®ionID)) resp, err = tikvrpc.GenRegionErrorResp(req, &errorpb.Error{EpochNotMatch: &errorpb.EpochNotMatch{}}) return resp, nil, err } @@ -397,6 +400,16 @@ func (s *RegionRequestSender) sendReqToRegion(bo *Backoffer, rpcCtx *RPCContext, defer cancel() } + // sendToAddr is the first target address that will receive the request. If proxy is used, sendToAddr will point to + // the proxy that will forward the request to the final target. + sendToAddr := rpcCtx.Addr + if rpcCtx.ProxyStore == nil { + req.ForwardedHost = "" + } else { + req.ForwardedHost = rpcCtx.Addr + sendToAddr = rpcCtx.ProxyAddr + } + var sessionID uint64 if v := bo.ctx.Value(util.SessionID); v != nil { sessionID = v.(uint64) @@ -426,7 +439,7 @@ func (s *RegionRequestSender) sendReqToRegion(bo *Backoffer, rpcCtx *RPCContext, if !injectFailOnSend { start := time.Now() - resp, err = s.client.SendRequest(ctx, rpcCtx.Addr, req, timeout) + resp, err = s.client.SendRequest(ctx, sendToAddr, req, timeout) if s.Stats != nil { RecordRegionRequestRuntimeStats(s.Stats, req.Type, time.Since(start)) failpoint.Inject("tikvStoreRespResult", func(val failpoint.Value) { @@ -476,6 +489,16 @@ func (s *RegionRequestSender) sendReqToRegion(bo *Backoffer, rpcCtx *RPCContext, }) } + if rpcCtx.ProxyStore != nil { + fromStore := strconv.FormatUint(rpcCtx.ProxyStore.storeID, 10) + toStore := strconv.FormatUint(rpcCtx.Store.storeID, 10) + result := "ok" + if err != nil { + result = "fail" + } + metrics.TiKVForwardRequestCounter.WithLabelValues(fromStore, toStore, result).Inc() + } + if err != nil { s.rpcError = err @@ -526,7 +549,8 @@ func (s *RegionRequestSender) onSendFail(bo *Backoffer, ctx *RPCContext, err err if span := opentracing.SpanFromContext(bo.ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("regionRequest.onSendFail", opentracing.ChildOf(span.Context())) defer span1.Finish() - bo = bo.Clone() + // TODO(MyonKeminta): Make sure trace works without cloning the backoffer. + // bo = bo.Clone() bo.ctx = opentracing.ContextWithSpan(bo.ctx, span1) } // If it failed because the context is cancelled by ourself, don't retry. @@ -568,10 +592,25 @@ func (s *RegionRequestSender) NeedReloadRegion(ctx *RPCContext) (need bool) { if s.failStoreIDs == nil { s.failStoreIDs = make(map[uint64]struct{}) } + if s.failProxyStoreIDs == nil { + s.failProxyStoreIDs = make(map[uint64]struct{}) + } s.failStoreIDs[ctx.Store.storeID] = struct{}{} - need = len(s.failStoreIDs) == len(ctx.Meta.Peers) + if ctx.ProxyStore != nil { + s.failProxyStoreIDs[ctx.ProxyStore.storeID] = struct{}{} + } + + if ctx.AccessMode == TiKVOnly && len(s.failStoreIDs)+len(s.failProxyStoreIDs) >= ctx.TiKVNum { + need = true + } else if ctx.AccessMode == TiFlashOnly && len(s.failStoreIDs) >= len(ctx.Meta.Peers)-ctx.TiKVNum { + need = true + } else if len(s.failStoreIDs)+len(s.failProxyStoreIDs) >= len(ctx.Meta.Peers) { + need = true + } + if need { s.failStoreIDs = nil + s.failProxyStoreIDs = nil } return } @@ -599,7 +638,8 @@ func (s *RegionRequestSender) onRegionError(bo *Backoffer, ctx *RPCContext, seed if span := opentracing.SpanFromContext(bo.ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("tikv.onRegionError", opentracing.ChildOf(span.Context())) defer span1.Finish() - bo = bo.Clone() + // TODO(MyonKeminta): Make sure trace works without cloning the backoffer. + // bo = bo.Clone() bo.ctx = opentracing.ContextWithSpan(bo.ctx, span1) } diff --git a/store/tikv/region_request_test.go b/store/tikv/region_request_test.go index db95239f53d63..bb62439af043c 100644 --- a/store/tikv/region_request_test.go +++ b/store/tikv/region_request_test.go @@ -18,6 +18,7 @@ import ( "fmt" "net" "sync" + "sync/atomic" "time" . "github.com/pingcap/check" @@ -48,6 +49,7 @@ type testRegionRequestToSingleStoreSuite struct { } type testRegionRequestToThreeStoresSuite struct { + OneByOneSuite cluster *mocktikv.Cluster storeIDs []uint64 peerIDs []uint64 @@ -569,3 +571,135 @@ func (s *testRegionRequestToSingleStoreSuite) TestOnMaxTimestampNotSyncedError(c c.Assert(resp, NotNil) }() } + +func (s *testRegionRequestToThreeStoresSuite) loadAndGetLeaderStore(c *C) (*Store, string) { + region, err := s.regionRequestSender.regionCache.findRegionByKey(s.bo, []byte("a"), false) + c.Assert(err, IsNil) + leaderStore, leaderPeer, _, leaderStoreIdx := region.WorkStorePeer(region.getStore()) + c.Assert(leaderPeer.Id, Equals, s.leaderPeer) + leaderAddr, err := s.regionRequestSender.regionCache.getStoreAddr(s.bo, region, leaderStore, leaderStoreIdx) + c.Assert(err, IsNil) + return leaderStore, leaderAddr +} + +func (s *testRegionRequestToThreeStoresSuite) TestForwarding(c *C) { + s.regionRequestSender.regionCache.enableForwarding = true + + // First get the leader's addr from region cache + leaderStore, leaderAddr := s.loadAndGetLeaderStore(c) + + bo := NewBackoffer(context.Background(), 10000) + + // Simulate that the leader is network-partitioned but can be accessed by forwarding via a follower + innerClient := s.regionRequestSender.client + s.regionRequestSender.client = &fnClient{fn: func(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { + if addr == leaderAddr { + return nil, errors.New("simulated rpc error") + } + // MockTiKV doesn't support forwarding. Simulate forwarding here. + if len(req.ForwardedHost) != 0 { + addr = req.ForwardedHost + } + return innerClient.SendRequest(ctx, addr, req, timeout) + }} + + loc, err := s.regionRequestSender.regionCache.LocateKey(bo, []byte("k")) + c.Assert(err, IsNil) + c.Assert(loc.Region.GetID(), Equals, s.regionID) + req := tikvrpc.NewRequest(tikvrpc.CmdRawPut, &kvrpcpb.RawPutRequest{ + Key: []byte("k"), + Value: []byte("v1"), + }) + resp, ctx, err := s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, kv.TiKV) + c.Assert(err, IsNil) + regionErr, err := resp.GetRegionError() + c.Assert(err, IsNil) + c.Assert(regionErr, IsNil) + c.Assert(resp.Resp.(*kvrpcpb.RawPutResponse).Error, Equals, "") + c.Assert(ctx.Addr, Equals, leaderAddr) + c.Assert(ctx.ProxyStore, NotNil) + c.Assert(ctx.ProxyAddr, Not(Equals), leaderAddr) + c.Assert(ctx.ProxyAccessIdx, Not(Equals), ctx.AccessIdx) + c.Assert(err, IsNil) + + // Simulate recovering to normal + s.regionRequestSender.client = innerClient + s.regionRequestSender.regionCache.testingKnobs.mockRequestLiveness = func(s *Store, bo *Backoffer) livenessState { + return reachable + } + start := time.Now() + for { + if atomic.LoadInt32(&leaderStore.needForwarding) == 0 { + break + } + if time.Since(start) > 3*time.Second { + c.Fatal("store didn't recover to normal in time") + } + time.Sleep(time.Millisecond * 200) + } + s.regionRequestSender.regionCache.testingKnobs.mockRequestLiveness = nil + + req = tikvrpc.NewRequest(tikvrpc.CmdRawGet, &kvrpcpb.RawGetRequest{Key: []byte("k")}) + resp, ctx, err = s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, kv.TiKV) + c.Assert(err, IsNil) + regionErr, err = resp.GetRegionError() + c.Assert(err, IsNil) + c.Assert(regionErr, IsNil) + c.Assert(resp.Resp.(*kvrpcpb.RawGetResponse).Value, BytesEquals, []byte("v1")) + c.Assert(ctx.ProxyStore, IsNil) + + // Simulate server down + s.regionRequestSender.client = &fnClient{fn: func(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { + if addr == leaderAddr || req.ForwardedHost == leaderAddr { + return nil, errors.New("simulated rpc error") + } + + // MockTiKV doesn't support forwarding. Simulate forwarding here. + if len(req.ForwardedHost) != 0 { + addr = req.ForwardedHost + } + return innerClient.SendRequest(ctx, addr, req, timeout) + }} + // The leader is changed after a store is down. + newLeaderPeerID := s.peerIDs[0] + if newLeaderPeerID == s.leaderPeer { + newLeaderPeerID = s.peerIDs[1] + } + + c.Assert(newLeaderPeerID, Not(Equals), s.leaderPeer) + s.cluster.ChangeLeader(s.regionID, newLeaderPeerID) + + req = tikvrpc.NewRequest(tikvrpc.CmdRawPut, &kvrpcpb.RawPutRequest{ + Key: []byte("k"), + Value: []byte("v2"), + }) + resp, ctx, err = s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, kv.TiKV) + c.Assert(err, IsNil) + regionErr, err = resp.GetRegionError() + c.Assert(err, IsNil) + // After several retries, the region will be marked as needReload. + // Then SendReqCtx will throw a pseudo EpochNotMatch to tell the caller to reload the region. + c.Assert(regionErr.EpochNotMatch, NotNil) + c.Assert(ctx, IsNil) + c.Assert(len(s.regionRequestSender.failStoreIDs), Equals, 0) + c.Assert(len(s.regionRequestSender.failProxyStoreIDs), Equals, 0) + region := s.regionRequestSender.regionCache.getCachedRegionWithRLock(loc.Region) + c.Assert(region, NotNil) + c.Assert(region.checkNeedReload(), IsTrue) + + loc, err = s.regionRequestSender.regionCache.LocateKey(bo, []byte("k")) + c.Assert(err, IsNil) + req = tikvrpc.NewRequest(tikvrpc.CmdRawPut, &kvrpcpb.RawPutRequest{ + Key: []byte("k"), + Value: []byte("v2"), + }) + resp, ctx, err = s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, kv.TiKV) + c.Assert(err, IsNil) + regionErr, err = resp.GetRegionError() + c.Assert(err, IsNil) + c.Assert(regionErr, IsNil) + c.Assert(resp.Resp.(*kvrpcpb.RawPutResponse).Error, Equals, "") + // Leader changed + c.Assert(ctx.Store.storeID, Not(Equals), leaderStore.storeID) + c.Assert(ctx.ProxyStore, IsNil) +} From 915dde1f93df4e542bff1eb9a27147a3eede1954 Mon Sep 17 00:00:00 2001 From: lysu Date: Thu, 18 Mar 2021 21:28:35 +0800 Subject: [PATCH 32/44] tidb: fix parquet dep version (#23416) --- go.mod | 3 +-- go.sum | 73 +++------------------------------------------------------- 2 files changed, 4 insertions(+), 72 deletions(-) diff --git a/go.mod b/go.mod index 6d991e86f8ad9..efd86509ed37e 100644 --- a/go.mod +++ b/go.mod @@ -67,8 +67,7 @@ require ( github.com/uber-go/atomic v1.4.0 github.com/uber/jaeger-client-go v2.22.1+incompatible github.com/uber/jaeger-lib v2.4.0+incompatible // indirect - github.com/xitongsys/parquet-go v1.6.0 // indirect - github.com/xitongsys/parquet-go-source v0.0.0-20201108113611-f372b7d813be // indirect + github.com/xitongsys/parquet-go v1.5.5-0.20201110004701-b09c49d6d457 // indirect github.com/zhangjinpeng1987/raft v0.0.0-20200819064223-df31bb68a018 // indirect go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b go.uber.org/atomic v1.7.0 diff --git a/go.sum b/go.sum index ed4d46c091bab..696df53f1e402 100644 --- a/go.sum +++ b/go.sum @@ -5,44 +5,26 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0 h1:0E3eE8MX426vUOs7aHfI7aN1BrIzzzf4ccKCSfSjGmc= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -52,14 +34,11 @@ github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3U github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= github.com/Jeffail/gabs/v2 v2.5.1 h1:ANfZYjpMlfTTKebycu4X1AgkVWumFVDYQl7JwOr4mDk= github.com/Jeffail/gabs/v2 v2.5.1/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI= -github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= @@ -129,10 +108,8 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corona10/goimagehash v1.0.2/go.mod h1:/l9umBhvcHQXVtQO1V6Gp1yD20STawkhRnnX0D1bvVI= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -169,11 +146,9 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/frankban/quicktest v1.11.1 h1:stwUsXhUGliQs9t0ZS39BWCltFdOHgABiIlihop8AD4= @@ -182,7 +157,6 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/fake-gcs-server v1.17.0 h1:OeH75kBZcZa3ZE+zz/mFdJ2btt9FgqfjI7gIh9+5fvk= github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w= github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= @@ -205,19 +179,15 @@ github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.4 h1:ixzUSnHTd6hCemgtAJgluaTSGYpLNpJY4mA2DIkdOAo= github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w= @@ -243,13 +213,11 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= @@ -377,27 +345,21 @@ github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= @@ -405,11 +367,8 @@ github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.2 h1:v0NxxQ7fSFz/u1NQydPo6EGdq7va0J1BtsZmae6kzUg= github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -423,7 +382,6 @@ github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFW github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/ncw/directio v1.0.4 h1:CojwI07mCEmRkajgx42Pf8jyCwTs1ji9/Ij9/PJG12k= github.com/ncw/directio v1.0.4/go.mod h1:CKGdcN7StAaqjT7Qack3lAXeX4pjnyc46YeqZH1yWVY= -github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= @@ -437,7 +395,6 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= @@ -451,7 +408,6 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= @@ -535,11 +491,9 @@ github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -547,14 +501,10 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44 h1:tB9NOR21++IjLyVx3/PCPhWMwqGNCMQEH96A6dMZ/gc= github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.19.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.20.12+incompatible h1:6VEGkOXP/eP4o2Ilk8cSsX0PhOEfX6leqAnD+urrp9M= @@ -563,7 +513,6 @@ github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= @@ -604,7 +553,6 @@ github.com/swaggo/gin-swagger v1.2.0/go.mod h1:qlH2+W7zXGZkczuL+r2nEBR2JTT+/lX05 github.com/swaggo/http-swagger v0.0.0-20200308142732-58ac5e232fba/go.mod h1:O1lAbCgAAX/KZ80LM/OXwtWFI/5TvZlwxSg8Cq08PV0= github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= github.com/swaggo/swag v1.6.3/go.mod h1:wcc83tB4Mb2aNiL/HP4MFeQdpHUrca+Rp/DRNgWAUio= -github.com/swaggo/swag v1.6.6-0.20200529100950-7c765ddd0476 h1:UjnSXdNPIG+5FJ6xLQODEdk7gSnJlMldu3sPAxxCO+4= github.com/swaggo/swag v1.6.6-0.20200529100950-7c765ddd0476/go.mod h1:xDhTyuFIujYiN3DKWC/H/83xcfHp+UE/IzWWampG7Zc= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw= github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= @@ -635,9 +583,7 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/unrolled/render v1.0.1/go.mod h1:gN9T0NhL4Bfbwu8ann7Ry/TGHYfosul+J0obPf6NBdM= -github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli/v2 v2.1.1 h1:Qt8FeAtxE/vfdrLmR3rxR6JRE0RoVmbXu8+6kZtYU4k= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= @@ -646,12 +592,11 @@ github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgq github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xitongsys/parquet-go v1.5.1/go.mod h1:xUxwM8ELydxh4edHGegYq1pA8NnMKDx0K/GyB0o2bww= -github.com/xitongsys/parquet-go v1.6.0 h1:j6YrTVZdQx5yywJLIOklZcKVsCoSD1tqOVRXyTBFSjs= -github.com/xitongsys/parquet-go v1.6.0/go.mod h1:pheqtXeHQFzxJk45lRQ0UIGIivKnLXvialZSFWs81A8= +github.com/xitongsys/parquet-go v1.5.5-0.20201110004701-b09c49d6d457 h1:tBbuFCtyJNKT+BFAv6qjvTFpVdy97IYNaBwGUXifIUs= +github.com/xitongsys/parquet-go v1.5.5-0.20201110004701-b09c49d6d457/go.mod h1:pheqtXeHQFzxJk45lRQ0UIGIivKnLXvialZSFWs81A8= github.com/xitongsys/parquet-go-source v0.0.0-20190524061010-2b72cbee77d5/go.mod h1:xxCx7Wpym/3QCo6JhujJX51dzSXrwmb0oH6FQb39SEA= +github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0 h1:a742S4V5A15F93smuVxA60LQWsrCnN8bKeWDBARU1/k= github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0/go.mod h1:HYhIKsdns7xz80OgkbgJYrtQY7FjHWHKH6cvN7+czGE= -github.com/xitongsys/parquet-go-source v0.0.0-20201108113611-f372b7d813be h1:33jqDHcXK6vfgtLossgwZmTXyLCdPZU3/KZ3988bk3Q= -github.com/xitongsys/parquet-go-source v0.0.0-20201108113611-f372b7d813be/go.mod h1:SQSSW1CBj/egoUhnaTXihUlDayvpp01Fn8qwuEpK5bY= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -667,7 +612,6 @@ go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b h1:3kC4J3eQF6p1UEfQ go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -707,7 +651,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= @@ -718,7 +661,6 @@ golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxT golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= @@ -734,7 +676,6 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -769,7 +710,6 @@ golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -778,14 +718,12 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -823,7 +761,6 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -849,7 +786,6 @@ golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -910,7 +846,6 @@ google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0 h1:TgDr+1inK2XVUKZx3BYAqQg/GwucGdBkzZjWaTg/I+A= @@ -934,7 +869,6 @@ google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBr google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb h1:ADPHZzpzM4tk4V4S5cnCrr5SwzvlrPRmqqCuJDB8UTs= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -990,7 +924,6 @@ gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 3813da0140e0b7f14a3fea4d1dbd19296f6b2a3f Mon Sep 17 00:00:00 2001 From: Lei Zhao Date: Thu, 18 Mar 2021 23:41:36 +0800 Subject: [PATCH 33/44] store/tikv: forward requests by BatchCommands (#23243) --- store/tikv/client.go | 27 +- store/tikv/client_batch.go | 618 ++++++++++++++++++------------ store/tikv/client_fail_test.go | 85 ++++ store/tikv/client_test.go | 212 ++++++++-- store/tikv/region_cache_test.go | 10 +- store/tikv/region_request_test.go | 10 +- 6 files changed, 662 insertions(+), 300 deletions(-) diff --git a/store/tikv/client.go b/store/tikv/client.go index 255093c9f8ba8..e9db387f763a3 100644 --- a/store/tikv/client.go +++ b/store/tikv/client.go @@ -181,14 +181,16 @@ func (a *connArray) Init(addr string, security config.Security, idleNotify *uint if allowBatch { batchClient := &batchCommandsClient{ - target: a.target, - conn: conn, - batched: sync.Map{}, - idAlloc: 0, - closed: 0, - tikvClientCfg: cfg.TiKVClient, - tikvLoad: &a.tikvTransportLayerLoad, - dialTimeout: a.dialTimeout, + target: a.target, + conn: conn, + forwardedClients: make(map[string]*batchCommandsStream), + batched: sync.Map{}, + epoch: 0, + closed: 0, + tikvClientCfg: cfg.TiKVClient, + tikvLoad: &a.tikvTransportLayerLoad, + dialTimeout: a.dialTimeout, + tryLock: tryLock{sync.NewCond(new(sync.Mutex)), false}, } a.batchCommandsClients = append(a.batchCommandsClients, batchClient) } @@ -357,7 +359,6 @@ func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.R c.recycleMu.Unlock() } - // enableBatch means TiDB can send BatchCommands to the connection. It doesn't mean TiDB must do it. // TiDB will not send batch commands to TiFlash, to resolve the conflict with Batch Cop Request. enableBatch := req.StoreTp != kv.TiDB && req.StoreTp != kv.TiFlash c.recycleMu.RLock() @@ -367,16 +368,12 @@ func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.R return nil, errors.Trace(err) } - // TiDB uses [gRPC-metadata](https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) to - // indicate a request needs forwarding. gRPC doesn't support setting a metadata for each request in a stream, - // so we don't use BatchCommands for forwarding for now. - canBatch := enableBatch && req.ForwardedHost == "" // TiDB RPC server supports batch RPC, but batch connection will send heart beat, It's not necessary since // request to TiDB is not high frequency. - if config.GetGlobalConfig().TiKVClient.MaxBatchSize > 0 && canBatch { + if config.GetGlobalConfig().TiKVClient.MaxBatchSize > 0 && enableBatch { if batchReq := req.ToBatchCommandsRequest(); batchReq != nil { defer trace.StartRegion(ctx, req.Type.String()).End() - return sendBatchRequest(ctx, addr, connArray.batchConn, batchReq, timeout) + return sendBatchRequest(ctx, addr, req.ForwardedHost, connArray.batchConn, batchReq, timeout) } } diff --git a/store/tikv/client_batch.go b/store/tikv/client_batch.go index dca4787de41d4..39a669c56c68e 100644 --- a/store/tikv/client_batch.go +++ b/store/tikv/client_batch.go @@ -34,8 +34,126 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/metadata" ) +type batchCommandsEntry struct { + ctx context.Context + req *tikvpb.BatchCommandsRequest_Request + res chan *tikvpb.BatchCommandsResponse_Response + // forwardedHost is the address of a store which will handle the request. + // It's different from the address the request sent to. + forwardedHost string + // canceled indicated the request is canceled or not. + canceled int32 + err error +} + +func (b *batchCommandsEntry) isCanceled() bool { + return atomic.LoadInt32(&b.canceled) == 1 +} + +func (b *batchCommandsEntry) error(err error) { + b.err = err + close(b.res) +} + +// batchCommandsBuilder collects a batch of `batchCommandsEntry`s to build +// `BatchCommandsRequest`s. +type batchCommandsBuilder struct { + // Each BatchCommandsRequest_Request sent to a store has a unique identity to + // distinguish its response. + idAlloc uint64 + entries []*batchCommandsEntry + requests []*tikvpb.BatchCommandsRequest_Request + requestIDs []uint64 + // In most cases, there isn't any forwardingReq. + forwardingReqs map[string]*tikvpb.BatchCommandsRequest +} + +func (b *batchCommandsBuilder) len() int { + return len(b.entries) +} + +func (b *batchCommandsBuilder) push(entry *batchCommandsEntry) { + b.entries = append(b.entries, entry) +} + +// build builds BatchCommandsRequests and calls collect() for each valid entry. +// The first return value is the request that doesn't need forwarding. +// The second is a map that maps forwarded hosts to requests. +func (b *batchCommandsBuilder) build( + collect func(id uint64, e *batchCommandsEntry), +) (*tikvpb.BatchCommandsRequest, map[string]*tikvpb.BatchCommandsRequest) { + for _, e := range b.entries { + if e.isCanceled() { + continue + } + if collect != nil { + collect(b.idAlloc, e) + } + if e.forwardedHost == "" { + b.requestIDs = append(b.requestIDs, b.idAlloc) + b.requests = append(b.requests, e.req) + } else { + batchReq, ok := b.forwardingReqs[e.forwardedHost] + if !ok { + batchReq = &tikvpb.BatchCommandsRequest{} + b.forwardingReqs[e.forwardedHost] = batchReq + } + batchReq.RequestIds = append(batchReq.RequestIds, b.idAlloc) + batchReq.Requests = append(batchReq.Requests, e.req) + } + b.idAlloc++ + } + var req *tikvpb.BatchCommandsRequest + if len(b.requests) > 0 { + req = &tikvpb.BatchCommandsRequest{ + Requests: b.requests, + RequestIds: b.requestIDs, + } + } + return req, b.forwardingReqs +} + +func (b *batchCommandsBuilder) cancel(e error) { + for _, entry := range b.entries { + entry.error(e) + } +} + +// reset resets the builder to the initial state. +// Should call it before collecting a new batch. +func (b *batchCommandsBuilder) reset() { + // NOTE: We can't simply set entries = entries[:0] here. + // The data in the cap part of the slice would reference the prewrite keys whose + // underlying memory is borrowed from memdb. The reference cause GC can't release + // the memdb, leading to serious memory leak problems in the large transaction case. + for i := 0; i < len(b.entries); i++ { + b.entries[i] = nil + } + b.entries = b.entries[:0] + for i := 0; i < len(b.requests); i++ { + b.requests[i] = nil + } + b.requests = b.requests[:0] + b.requestIDs = b.requestIDs[:0] + + for k := range b.forwardingReqs { + delete(b.forwardingReqs, k) + } +} + +func newBatchCommandsBuilder(maxBatchSize uint) *batchCommandsBuilder { + return &batchCommandsBuilder{ + idAlloc: 0, + entries: make([]*batchCommandsEntry, 0, maxBatchSize), + requests: make([]*tikvpb.BatchCommandsRequest_Request, 0, maxBatchSize), + requestIDs: make([]uint64, 0, maxBatchSize), + forwardingReqs: make(map[string]*tikvpb.BatchCommandsRequest), + } +} + type batchConn struct { // An atomic flag indicates whether the batch is idle or not. // 0 for busy, others for idle. @@ -47,6 +165,8 @@ type batchConn struct { tikvTransportLayerLoad uint64 closed chan struct{} + reqBuilder *batchCommandsBuilder + // Notify rpcClient to check the idle flag idleNotify *uint32 idleDetect *time.Timer @@ -63,9 +183,9 @@ func newBatchConn(connCount, maxBatchSize uint, idleNotify *uint32) *batchConn { batchCommandsClients: make([]*batchCommandsClient, 0, connCount), tikvTransportLayerLoad: 0, closed: make(chan struct{}), - - idleNotify: idleNotify, - idleDetect: time.NewTimer(idleTimeout), + reqBuilder: newBatchCommandsBuilder(maxBatchSize), + idleNotify: idleNotify, + idleDetect: time.NewTimer(idleTimeout), } } @@ -76,8 +196,6 @@ func (a *batchConn) isIdle() bool { // fetchAllPendingRequests fetches all pending requests from the channel. func (a *batchConn) fetchAllPendingRequests( maxBatchSize int, - entries *[]*batchCommandsEntry, - requests *[]*tikvpb.BatchCommandsRequest_Request, ) time.Time { // Block on the first element. var headEntry *batchCommandsEntry @@ -100,18 +218,16 @@ func (a *batchConn) fetchAllPendingRequests( return time.Now() } ts := time.Now() - *entries = append(*entries, headEntry) - *requests = append(*requests, headEntry.req) + a.reqBuilder.push(headEntry) // This loop is for trying best to collect more requests. - for len(*entries) < maxBatchSize { + for a.reqBuilder.len() < maxBatchSize { select { case entry := <-a.batchCommandsCh: if entry == nil { return ts } - *entries = append(*entries, entry) - *requests = append(*requests, entry.req) + a.reqBuilder.push(entry) default: return ts } @@ -120,24 +236,20 @@ func (a *batchConn) fetchAllPendingRequests( } // fetchMorePendingRequests fetches more pending requests from the channel. -func fetchMorePendingRequests( - ch chan *batchCommandsEntry, +func (a *batchConn) fetchMorePendingRequests( maxBatchSize int, batchWaitSize int, maxWaitTime time.Duration, - entries *[]*batchCommandsEntry, - requests *[]*tikvpb.BatchCommandsRequest_Request, ) { // Try to collect `batchWaitSize` requests, or wait `maxWaitTime`. after := time.NewTimer(maxWaitTime) - for len(*entries) < batchWaitSize { + for a.reqBuilder.len() < batchWaitSize { select { - case entry := <-ch: + case entry := <-a.batchCommandsCh: if entry == nil { return } - *entries = append(*entries, entry) - *requests = append(*requests, entry.req) + a.reqBuilder.push(entry) case <-after.C: return } @@ -147,64 +259,209 @@ func fetchMorePendingRequests( // Do an additional non-block try. Here we test the lengh with `maxBatchSize` instead // of `batchWaitSize` because trying best to fetch more requests is necessary so that // we can adjust the `batchWaitSize` dynamically. - for len(*entries) < maxBatchSize { + for a.reqBuilder.len() < maxBatchSize { select { - case entry := <-ch: + case entry := <-a.batchCommandsCh: if entry == nil { return } - *entries = append(*entries, entry) - *requests = append(*requests, entry.req) + a.reqBuilder.push(entry) default: return } } } +const idleTimeout = 3 * time.Minute + +func (a *batchConn) batchSendLoop(cfg config.TiKVClient) { + defer func() { + if r := recover(); r != nil { + metrics.TiKVPanicCounter.WithLabelValues(metrics.LabelBatchSendLoop).Inc() + logutil.BgLogger().Error("batchSendLoop", + zap.Reflect("r", r), + zap.Stack("stack")) + logutil.BgLogger().Info("restart batchSendLoop") + go a.batchSendLoop(cfg) + } + }() + + bestBatchWaitSize := cfg.BatchWaitSize + for { + a.reqBuilder.reset() + + start := a.fetchAllPendingRequests(int(cfg.MaxBatchSize)) + a.pendingRequests.Observe(float64(len(a.batchCommandsCh))) + a.batchSize.Observe(float64(a.reqBuilder.len())) + + // curl -XPUT -d 'return(true)' http://0.0.0.0:10080/fail/github.com/pingcap/tidb/store/tikv/mockBlockOnBatchClient + failpoint.Inject("mockBlockOnBatchClient", func(val failpoint.Value) { + if val.(bool) { + time.Sleep(1 * time.Hour) + } + }) + + if a.reqBuilder.len() < int(cfg.MaxBatchSize) && cfg.MaxBatchWaitTime > 0 { + // If the target TiKV is overload, wait a while to collect more requests. + if atomic.LoadUint64(&a.tikvTransportLayerLoad) >= uint64(cfg.OverloadThreshold) { + metrics.TiKvBatchWaitOverLoad.Add(1) + a.fetchMorePendingRequests(int(cfg.MaxBatchSize), int(bestBatchWaitSize), cfg.MaxBatchWaitTime) + } + } + length := a.reqBuilder.len() + if uint(length) == 0 { + // The batch command channel is closed. + return + } else if uint(length) < bestBatchWaitSize && bestBatchWaitSize > 1 { + // Waits too long to collect requests, reduce the target batch size. + bestBatchWaitSize-- + } else if uint(length) > bestBatchWaitSize+4 && bestBatchWaitSize < cfg.MaxBatchSize { + bestBatchWaitSize++ + } + + a.getClientAndSend() + metrics.TiKVBatchSendLatency.Observe(float64(time.Since(start))) + } +} + +func (a *batchConn) getClientAndSend() { + // Choose a connection by round-robbin. + var ( + cli *batchCommandsClient + target string + ) + for i := 0; i < len(a.batchCommandsClients); i++ { + a.index = (a.index + 1) % uint32(len(a.batchCommandsClients)) + target = a.batchCommandsClients[a.index].target + // The lock protects the batchCommandsClient from been closed while it's inuse. + if a.batchCommandsClients[a.index].tryLockForSend() { + cli = a.batchCommandsClients[a.index] + break + } + } + if cli == nil { + logutil.BgLogger().Warn("no available connections", zap.String("target", target)) + metrics.TiKVNoAvailableConnectionCounter.Inc() + + // Please ensure the error is handled in region cache correctly. + a.reqBuilder.cancel(errors.New("no available connections")) + return + } + defer cli.unlockForSend() + + req, forwardingReqs := a.reqBuilder.build(func(id uint64, e *batchCommandsEntry) { + cli.batched.Store(id, e) + if trace.IsEnabled() { + trace.Log(e.ctx, "rpc", "send") + } + }) + if req != nil { + cli.send("", req) + } + for forwardedHost, req := range forwardingReqs { + cli.send(forwardedHost, req) + } +} + type tryLock struct { - sync.RWMutex + *sync.Cond reCreating bool } func (l *tryLock) tryLockForSend() bool { - l.RLock() + l.L.Lock() if l.reCreating { - l.RUnlock() + l.L.Unlock() return false } return true } func (l *tryLock) unlockForSend() { - l.RUnlock() + l.L.Unlock() } func (l *tryLock) lockForRecreate() { - l.Lock() + l.L.Lock() + for l.reCreating { + l.Wait() + } l.reCreating = true - l.Unlock() - + l.L.Unlock() } func (l *tryLock) unlockForRecreate() { - l.Lock() + l.L.Lock() l.reCreating = false - l.Unlock() + l.Broadcast() + l.L.Unlock() +} + +type batchCommandsStream struct { + tikvpb.Tikv_BatchCommandsClient + forwardedHost string +} + +func (s *batchCommandsStream) recv() (resp *tikvpb.BatchCommandsResponse, err error) { + defer func() { + if r := recover(); r != nil { + metrics.TiKVPanicCounter.WithLabelValues(metrics.LabelBatchRecvLoop).Inc() + logutil.BgLogger().Error("batchCommandsClient.recv panic", + zap.Reflect("r", r), + zap.Stack("stack")) + err = errors.SuspendStack(errors.New("batch conn recv paniced")) + } + }() + failpoint.Inject("gotErrorInRecvLoop", func(_ failpoint.Value) (resp *tikvpb.BatchCommandsResponse, err error) { + err = errors.New("injected error in batchRecvLoop") + return + }) + // When `conn.Close()` is called, `client.Recv()` will return an error. + resp, err = s.Recv() + return +} + +// recreate creates a new BatchCommands stream. The conn should be ready for work. +func (s *batchCommandsStream) recreate(conn *grpc.ClientConn) error { + tikvClient := tikvpb.NewTikvClient(conn) + ctx := context.TODO() + // Set metadata for forwarding stream. + if s.forwardedHost != "" { + ctx = metadata.AppendToOutgoingContext(ctx, forwardMetadataKey, s.forwardedHost) + } + streamClient, err := tikvClient.BatchCommands(ctx) + if err != nil { + return errors.Trace(err) + } + s.Tikv_BatchCommandsClient = streamClient + return nil } type batchCommandsClient struct { // The target host. target string - conn *grpc.ClientConn - client tikvpb.Tikv_BatchCommandsClient - batched sync.Map - idAlloc uint64 + conn *grpc.ClientConn + // client and forwardedClients are protected by tryLock. + // + // client is the stream that needn't forwarding. + client *batchCommandsStream + // TiDB uses [gRPC-metadata](https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) to + // indicate a request needs forwarding. gRPC doesn't support setting a metadata for each request in a stream, + // so we need to create a stream for each forwarded host. + // + // forwardedClients are clients that need forwarding. It's a map that maps forwarded hosts to streams + forwardedClients map[string]*batchCommandsStream + batched sync.Map tikvClientCfg config.TiKVClient tikvLoad *uint64 dialTimeout time.Duration + // Increased in each reconnection. + // It's used to prevent the connection from reconnecting multiple times + // due to one failure because there may be more than 1 `batchRecvLoop`s. + epoch uint64 // closed indicates the batch client is closed explicitly or not. closed int32 // tryLock protects client when re-create the streaming. @@ -215,64 +472,43 @@ func (c *batchCommandsClient) isStopped() bool { return atomic.LoadInt32(&c.closed) != 0 } -func (c *batchCommandsClient) send(request *tikvpb.BatchCommandsRequest, entries []*batchCommandsEntry) { - for i, requestID := range request.RequestIds { - c.batched.Store(requestID, entries[i]) - if trace.IsEnabled() { - trace.Log(entries[i].ctx, "rpc", "send") - } - } - - err := c.initBatchClient() +func (c *batchCommandsClient) send(forwardedHost string, req *tikvpb.BatchCommandsRequest) { + err := c.initBatchClient(forwardedHost) if err != nil { logutil.BgLogger().Warn( "init create streaming fail", zap.String("target", c.target), + zap.String("forwardedHost", forwardedHost), zap.Error(err), ) c.failPendingRequests(err) return } - if err := c.client.Send(request); err != nil { + client := c.client + if forwardedHost != "" { + client = c.forwardedClients[forwardedHost] + } + if err := client.Send(req); err != nil { logutil.BgLogger().Info( "sending batch commands meets error", zap.String("target", c.target), - zap.Uint64s("requestIDs", request.RequestIds), + zap.String("forwardedHost", forwardedHost), + zap.Uint64s("requestIDs", req.RequestIds), zap.Error(err), ) c.failPendingRequests(err) } } -func (c *batchCommandsClient) recv() (resp *tikvpb.BatchCommandsResponse, err error) { - defer func() { - if r := recover(); r != nil { - metrics.TiKVPanicCounter.WithLabelValues(metrics.LabelBatchRecvLoop).Inc() - logutil.BgLogger().Error("batchCommandsClient.recv panic", - zap.Reflect("r", r), - zap.Stack("stack")) - err = errors.SuspendStack(errors.New("batch conn recv paniced")) - } - }() - failpoint.Inject("gotErrorInRecvLoop", func(_ failpoint.Value) (resp *tikvpb.BatchCommandsResponse, err error) { - err = errors.New("injected error in batchRecvLoop") - return - }) - // When `conn.Close()` is called, `client.Recv()` will return an error. - resp, err = c.client.Recv() - return -} - // `failPendingRequests` must be called in locked contexts in order to avoid double closing channels. func (c *batchCommandsClient) failPendingRequests(err error) { failpoint.Inject("panicInFailPendingRequests", nil) c.batched.Range(func(key, value interface{}) bool { id, _ := key.(uint64) entry, _ := value.(*batchCommandsEntry) - entry.err = err c.batched.Delete(id) - close(entry.res) + entry.error(err) return true }) } @@ -301,33 +537,30 @@ func (c *batchCommandsClient) waitConnReady() (err error) { return } -func (c *batchCommandsClient) reCreateStreamingClientOnce(perr error) error { - c.failPendingRequests(perr) // fail all pending requests. - +func (c *batchCommandsClient) recreateStreamingClientOnce(streamClient *batchCommandsStream) error { err := c.waitConnReady() // Re-establish a application layer stream. TCP layer is handled by gRPC. if err == nil { - tikvClient := tikvpb.NewTikvClient(c.conn) - var streamClient tikvpb.Tikv_BatchCommandsClient - streamClient, err = tikvClient.BatchCommands(context.TODO()) + err := streamClient.recreate(c.conn) if err == nil { logutil.BgLogger().Info( "batchRecvLoop re-create streaming success", zap.String("target", c.target), + zap.String("forwardedHost", streamClient.forwardedHost), ) - c.client = streamClient return nil } } logutil.BgLogger().Info( "batchRecvLoop re-create streaming fail", zap.String("target", c.target), + zap.String("forwardedHost", streamClient.forwardedHost), zap.Error(err), ) return err } -func (c *batchCommandsClient) batchRecvLoop(cfg config.TiKVClient, tikvTransportLayerLoad *uint64) { +func (c *batchCommandsClient) batchRecvLoop(cfg config.TiKVClient, tikvTransportLayerLoad *uint64, streamClient *batchCommandsStream) { defer func() { if r := recover(); r != nil { metrics.TiKVPanicCounter.WithLabelValues(metrics.LabelBatchRecvLoop).Inc() @@ -335,12 +568,13 @@ func (c *batchCommandsClient) batchRecvLoop(cfg config.TiKVClient, tikvTransport zap.Reflect("r", r), zap.Stack("stack")) logutil.BgLogger().Info("restart batchRecvLoop") - go c.batchRecvLoop(cfg, tikvTransportLayerLoad) + go c.batchRecvLoop(cfg, tikvTransportLayerLoad, streamClient) } }() + epoch := atomic.LoadUint64(&c.epoch) for { - resp, err := c.recv() + resp, err := streamClient.recv() if err != nil { if c.isStopped() { return @@ -348,11 +582,12 @@ func (c *batchCommandsClient) batchRecvLoop(cfg config.TiKVClient, tikvTransport logutil.BgLogger().Info( "batchRecvLoop fails when receiving, needs to reconnect", zap.String("target", c.target), + zap.String("forwardedHost", streamClient.forwardedHost), zap.Error(err), ) now := time.Now() - if stopped := c.reCreateStreamingClient(err); stopped { + if stopped := c.recreateStreamingClient(err, streamClient, &epoch); stopped { return } metrics.TiKVBatchClientUnavailable.Observe(time.Since(now).Seconds()) @@ -365,7 +600,7 @@ func (c *batchCommandsClient) batchRecvLoop(cfg config.TiKVClient, tikvTransport if !ok { // this maybe caused by batchCommandsClient#send meets ambiguous error that request has be sent to TiKV but still report a error. // then TiKV will send response back though stream and reach here. - logutil.BgLogger().Warn("batchRecvLoop receives outdated response", zap.Uint64("requestID", requestID)) + logutil.BgLogger().Warn("batchRecvLoop receives outdated response", zap.Uint64("requestID", requestID), zap.String("forwardedHost", streamClient.forwardedHost)) continue } entry := value.(*batchCommandsEntry) @@ -389,17 +624,50 @@ func (c *batchCommandsClient) batchRecvLoop(cfg config.TiKVClient, tikvTransport } } -func (c *batchCommandsClient) reCreateStreamingClient(err error) (stopped bool) { - // Forbids the batchSendLoop using the old client. +func (c *batchCommandsClient) recreateStreamingClient(err error, streamClient *batchCommandsStream, epoch *uint64) (stopped bool) { + // Forbids the batchSendLoop using the old client and + // blocks other streams trying to recreate. c.lockForRecreate() defer c.unlockForRecreate() + // Each batchCommandsStream has a batchRecvLoop. There is only one stream waiting for + // the connection ready in every epoch to prevent the connection from reconnecting + // multiple times due to one failure. + // + // Check it in the locked scope to prevent the stream which gets the token from + // reconnecting lately, i.e. + // goroutine 1 | goroutine 2 + // CAS success | + // | CAS failure + // | lockForRecreate + // | recreate error + // | unlockForRecreate + // lockForRecreate | + // waitConnReady | + // recreate | + // unlockForRecreate | + waitConnReady := atomic.CompareAndSwapUint64(&c.epoch, *epoch, *epoch+1) + if !waitConnReady { + *epoch = atomic.LoadUint64(&c.epoch) + if err := streamClient.recreate(c.conn); err != nil { + logutil.BgLogger().Info( + "batchRecvLoop re-create streaming fail", + zap.String("target", c.target), + zap.String("forwardedHost", streamClient.forwardedHost), + zap.Error(err), + ) + } + return c.isStopped() + } + *epoch++ + + c.failPendingRequests(err) // fail all pending requests. b := NewBackofferWithVars(context.Background(), math.MaxInt32, nil) for { // try to re-create the streaming in the loop. if c.isStopped() { return true } - err1 := c.reCreateStreamingClientOnce(err) + err1 := c.recreateStreamingClientOnce(streamClient) if err1 == nil { break } @@ -412,149 +680,19 @@ func (c *batchCommandsClient) reCreateStreamingClient(err error) (stopped bool) return false } -type batchCommandsEntry struct { - ctx context.Context - req *tikvpb.BatchCommandsRequest_Request - res chan *tikvpb.BatchCommandsResponse_Response - - // canceled indicated the request is canceled or not. - canceled int32 - err error -} - -func (b *batchCommandsEntry) isCanceled() bool { - return atomic.LoadInt32(&b.canceled) == 1 -} - -const idleTimeout = 3 * time.Minute - -func resetEntries(entries []*batchCommandsEntry) []*batchCommandsEntry { - for i := 0; i < len(entries); i++ { - entries[i] = nil +func (c *batchCommandsClient) newBatchStream(forwardedHost string) (*batchCommandsStream, error) { + batchStream := &batchCommandsStream{forwardedHost: forwardedHost} + if err := batchStream.recreate(c.conn); err != nil { + return nil, errors.Trace(err) } - entries = entries[:0] - return entries + return batchStream, nil } -func resetRequests(requests []*tikvpb.BatchCommandsRequest_Request) []*tikvpb.BatchCommandsRequest_Request { - for i := 0; i < len(requests); i++ { - requests[i] = nil - } - requests = requests[:0] - return requests -} - -func (a *batchConn) batchSendLoop(cfg config.TiKVClient) { - defer func() { - if r := recover(); r != nil { - metrics.TiKVPanicCounter.WithLabelValues(metrics.LabelBatchSendLoop).Inc() - logutil.BgLogger().Error("batchSendLoop", - zap.Reflect("r", r), - zap.Stack("stack")) - logutil.BgLogger().Info("restart batchSendLoop") - go a.batchSendLoop(cfg) - } - }() - - entries := make([]*batchCommandsEntry, 0, cfg.MaxBatchSize) - requests := make([]*tikvpb.BatchCommandsRequest_Request, 0, cfg.MaxBatchSize) - requestIDs := make([]uint64, 0, cfg.MaxBatchSize) - - var bestBatchWaitSize = cfg.BatchWaitSize - for { - // NOTE: We can't simply set entries = entries[:0] here. - // The data in the cap part of the slice would reference the prewrite keys whose - // underlying memory is borrowed from memdb. The reference cause GC can't release - // the memdb, leading to serious memory leak problems in the large transaction case. - entries = resetEntries(entries) - requests = resetRequests(requests) - requestIDs = requestIDs[:0] - - start := a.fetchAllPendingRequests(int(cfg.MaxBatchSize), &entries, &requests) - a.pendingRequests.Observe(float64(len(a.batchCommandsCh))) - a.batchSize.Observe(float64(len(requests))) - - // curl -XPUT -d 'return(true)' http://0.0.0.0:10080/fail/github.com/pingcap/tidb/store/tikv/mockBlockOnBatchClient - failpoint.Inject("mockBlockOnBatchClient", func(val failpoint.Value) { - if val.(bool) { - time.Sleep(1 * time.Hour) - } - }) - - if len(entries) < int(cfg.MaxBatchSize) && cfg.MaxBatchWaitTime > 0 { - // If the target TiKV is overload, wait a while to collect more requests. - if atomic.LoadUint64(&a.tikvTransportLayerLoad) >= uint64(cfg.OverloadThreshold) { - metrics.TiKvBatchWaitOverLoad.Add(1) - fetchMorePendingRequests( - a.batchCommandsCh, int(cfg.MaxBatchSize), int(bestBatchWaitSize), - cfg.MaxBatchWaitTime, &entries, &requests, - ) - } - } - length := len(requests) - if uint(length) == 0 { - // The batch command channel is closed. - return - } else if uint(length) < bestBatchWaitSize && bestBatchWaitSize > 1 { - // Waits too long to collect requests, reduce the target batch size. - bestBatchWaitSize-- - } else if uint(length) > bestBatchWaitSize+4 && bestBatchWaitSize < cfg.MaxBatchSize { - bestBatchWaitSize++ - } - - entries, requests = removeCanceledRequests(entries, requests) - if len(entries) == 0 { - continue // All requests are canceled. - } - - a.getClientAndSend(entries, requests, requestIDs) - metrics.TiKVBatchSendLatency.Observe(float64(time.Since(start))) - } -} - -func (a *batchConn) getClientAndSend(entries []*batchCommandsEntry, requests []*tikvpb.BatchCommandsRequest_Request, requestIDs []uint64) { - // Choose a connection by round-robbin. - var ( - cli *batchCommandsClient - target string - ) - for i := 0; i < len(a.batchCommandsClients); i++ { - a.index = (a.index + 1) % uint32(len(a.batchCommandsClients)) - target = a.batchCommandsClients[a.index].target - // The lock protects the batchCommandsClient from been closed while it's inuse. - if a.batchCommandsClients[a.index].tryLockForSend() { - cli = a.batchCommandsClients[a.index] - break - } - } - if cli == nil { - logutil.BgLogger().Warn("no available connections", zap.String("target", target)) - metrics.TiKVNoAvailableConnectionCounter.Inc() - - for _, entry := range entries { - // Please ensure the error is handled in region cache correctly. - entry.err = errors.New("no available connections") - close(entry.res) - } - return - } - defer cli.unlockForSend() - - maxBatchID := atomic.AddUint64(&cli.idAlloc, uint64(len(requests))) - for i := 0; i < len(requests); i++ { - requestID := uint64(i) + maxBatchID - uint64(len(requests)) - requestIDs = append(requestIDs, requestID) - } - req := &tikvpb.BatchCommandsRequest{ - Requests: requests, - RequestIds: requestIDs, +func (c *batchCommandsClient) initBatchClient(forwardedHost string) error { + if forwardedHost == "" && c.client != nil { + return nil } - - cli.send(req, entries) -} - -func (c *batchCommandsClient) initBatchClient() error { - if c.client != nil { + if _, ok := c.forwardedClients[forwardedHost]; ok { return nil } @@ -562,14 +700,16 @@ func (c *batchCommandsClient) initBatchClient() error { return err } - // Initialize batch streaming clients. - tikvClient := tikvpb.NewTikvClient(c.conn) - streamClient, err := tikvClient.BatchCommands(context.TODO()) + streamClient, err := c.newBatchStream(forwardedHost) if err != nil { return errors.Trace(err) } - c.client = streamClient - go c.batchRecvLoop(c.tikvClientCfg, c.tikvLoad) + if forwardedHost == "" { + c.client = streamClient + } else { + c.forwardedClients[forwardedHost] = streamClient + } + go c.batchRecvLoop(c.tikvClientCfg, c.tikvLoad, streamClient) return nil } @@ -585,33 +725,21 @@ func (a *batchConn) Close() { close(a.closed) } -// removeCanceledRequests removes canceled requests before sending. -func removeCanceledRequests(entries []*batchCommandsEntry, - requests []*tikvpb.BatchCommandsRequest_Request) ([]*batchCommandsEntry, []*tikvpb.BatchCommandsRequest_Request) { - validEntries := entries[:0] - validRequests := requests[:0] - for _, e := range entries { - if !e.isCanceled() { - validEntries = append(validEntries, e) - validRequests = append(validRequests, e.req) - } - } - return validEntries, validRequests -} - func sendBatchRequest( ctx context.Context, addr string, + forwardedHost string, batchConn *batchConn, req *tikvpb.BatchCommandsRequest_Request, timeout time.Duration, ) (*tikvrpc.Response, error) { entry := &batchCommandsEntry{ - ctx: ctx, - req: req, - res: make(chan *tikvpb.BatchCommandsResponse_Response, 1), - canceled: 0, - err: nil, + ctx: ctx, + req: req, + res: make(chan *tikvpb.BatchCommandsResponse_Response, 1), + forwardedHost: forwardedHost, + canceled: 0, + err: nil, } timer := time.NewTimer(timeout) defer timer.Stop() diff --git a/store/tikv/client_fail_test.go b/store/tikv/client_fail_test.go index cadf1dbe06f6e..25b7e36d2c95b 100644 --- a/store/tikv/client_fail_test.go +++ b/store/tikv/client_fail_test.go @@ -16,10 +16,12 @@ package tikv import ( "context" "fmt" + "sync/atomic" "time" . "github.com/pingcap/check" "github.com/pingcap/failpoint" + "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/tikvpb" "github.com/pingcap/tidb/store/tikv/config" "github.com/pingcap/tidb/store/tikv/tikvrpc" @@ -67,3 +69,86 @@ func (s *testClientFailSuite) TestPanicInRecvLoop(c *C) { _, err = rpcClient.SendRequest(context.Background(), addr, req, time.Second*4) c.Assert(err, IsNil) } + +func (s *testClientFailSuite) TestRecvErrorInMultipleRecvLoops(c *C) { + server, port := startMockTikvService() + c.Assert(port > 0, IsTrue) + defer server.Stop() + addr := fmt.Sprintf("%s:%d", "127.0.0.1", port) + + // Enable batch and limit the connection count to 1 so that + // there is only one BatchCommands stream for each host or forwarded host. + defer config.UpdateGlobal(func(conf *config.Config) { + conf.TiKVClient.MaxBatchSize = 128 + conf.TiKVClient.GrpcConnectionCount = 1 + })() + rpcClient := NewRPCClient(config.Security{}) + defer rpcClient.closeConns() + + // Create 4 BatchCommands streams. + prewriteReq := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, &kvrpcpb.PrewriteRequest{}) + forwardedHosts := []string{"", "127.0.0.1:6666", "127.0.0.1:7777", "127.0.0.1:8888"} + for _, forwardedHost := range forwardedHosts { + prewriteReq.ForwardedHost = forwardedHost + _, err := rpcClient.SendRequest(context.Background(), addr, prewriteReq, 10*time.Second) + c.Assert(err, IsNil) + } + connArray, err := rpcClient.getConnArray(addr, true) + c.Assert(connArray, NotNil) + c.Assert(err, IsNil) + batchConn := connArray.batchConn + c.Assert(batchConn, NotNil) + c.Assert(len(batchConn.batchCommandsClients), Equals, 1) + batchClient := batchConn.batchCommandsClients[0] + c.Assert(batchClient.client, NotNil) + c.Assert(batchClient.client.forwardedHost, Equals, "") + c.Assert(len(batchClient.forwardedClients), Equals, 3) + for _, forwardedHosts := range forwardedHosts[1:] { + c.Assert(batchClient.forwardedClients[forwardedHosts].forwardedHost, Equals, forwardedHosts) + } + + // Save all streams + clientSave := batchClient.client.Tikv_BatchCommandsClient + forwardedClientsSave := make(map[string]tikvpb.Tikv_BatchCommandsClient) + for host, client := range batchClient.forwardedClients { + forwardedClientsSave[host] = client.Tikv_BatchCommandsClient + } + epoch := atomic.LoadUint64(&batchClient.epoch) + + fp := "github.com/pingcap/tidb/store/tikv/gotErrorInRecvLoop" + // Send a request to each stream to trigger reconnection. + for _, forwardedHost := range forwardedHosts { + c.Assert(failpoint.Enable(fp, `1*return("0")`), IsNil) + prewriteReq.ForwardedHost = forwardedHost + _, err := rpcClient.SendRequest(context.Background(), addr, prewriteReq, 10*time.Second) + c.Assert(err, IsNil) + time.Sleep(100 * time.Millisecond) + c.Assert(failpoint.Disable(fp), IsNil) + } + + // Wait for finishing reconnection. + for { + batchClient.lockForRecreate() + if atomic.LoadUint64(&batchClient.epoch) != epoch { + batchClient.unlockForRecreate() + break + } + batchClient.unlockForRecreate() + time.Sleep(time.Millisecond * 100) + } + + // send request after reconnection. + for _, forwardedHost := range forwardedHosts { + prewriteReq.ForwardedHost = forwardedHost + _, err := rpcClient.SendRequest(context.Background(), addr, prewriteReq, 10*time.Second) + c.Assert(err, IsNil) + } + // Should only reconnect once. + c.Assert(atomic.LoadUint64(&batchClient.epoch), Equals, epoch+1) + // All streams are refreshed. + c.Assert(batchClient.client.Tikv_BatchCommandsClient, Not(Equals), clientSave) + c.Assert(len(batchClient.forwardedClients), Equals, len(forwardedClientsSave)) + for host, clientSave := range forwardedClientsSave { + c.Assert(batchClient.forwardedClients[host].Tikv_BatchCommandsClient, Not(Equals), clientSave) + } +} diff --git a/store/tikv/client_test.go b/store/tikv/client_test.go index fed26779f5196..3828422b02328 100644 --- a/store/tikv/client_test.go +++ b/store/tikv/client_test.go @@ -70,40 +70,16 @@ func (s *testClientSerialSuite) TestConn(c *C) { c.Assert(conn3, IsNil) } -func (s *testClientSuite) TestRemoveCanceledRequests(c *C) { - req := new(tikvpb.BatchCommandsRequest_Request) - entries := []*batchCommandsEntry{ - {canceled: 1, req: req}, - {canceled: 0, req: req}, - {canceled: 1, req: req}, - {canceled: 1, req: req}, - {canceled: 0, req: req}, - } - entryPtr := &entries[0] - requests := make([]*tikvpb.BatchCommandsRequest_Request, len(entries)) - for i := range entries { - requests[i] = entries[i].req - } - entries, requests = removeCanceledRequests(entries, requests) - c.Assert(len(entries), Equals, 2) - for _, e := range entries { - c.Assert(e.isCanceled(), IsFalse) - } - c.Assert(len(requests), Equals, 2) - newEntryPtr := &entries[0] - c.Assert(entryPtr, Equals, newEntryPtr) -} - func (s *testClientSuite) TestCancelTimeoutRetErr(c *C) { req := new(tikvpb.BatchCommandsRequest_Request) a := newBatchConn(1, 1, nil) ctx, cancel := context.WithCancel(context.TODO()) cancel() - _, err := sendBatchRequest(ctx, "", a, req, 2*time.Second) + _, err := sendBatchRequest(ctx, "", "", a, req, 2*time.Second) c.Assert(errors.Cause(err), Equals, context.Canceled) - _, err = sendBatchRequest(context.Background(), "", a, req, 0) + _, err = sendBatchRequest(context.Background(), "", "", a, req, 0) c.Assert(errors.Cause(err), Equals, context.DeadlineExceeded) } @@ -227,16 +203,15 @@ func (s *testClientSuite) TestCollapseResolveLock(c *C) { } } -func (s *testClientSuite) TestForwardMetadata(c *C) { +func (s *testClientSerialSuite) TestForwardMetadataByUnaryCall(c *C) { server, port := startMockTikvService() c.Assert(port > 0, IsTrue) defer server.Stop() addr := fmt.Sprintf("%s:%d", "127.0.0.1", port) - // Enable batch and limit the connection count to 1 so that - // there is only one BatchCommands stream. + // Disable batch. defer config.UpdateGlobal(func(conf *config.Config) { - conf.TiKVClient.MaxBatchSize = 128 + conf.TiKVClient.MaxBatchSize = 0 conf.TiKVClient.GrpcConnectionCount = 1 })() rpcClient := NewRPCClient(config.Security{}) @@ -261,14 +236,13 @@ func (s *testClientSuite) TestForwardMetadata(c *C) { _, err := rpcClient.SendRequest(context.Background(), addr, prewriteReq, 10*time.Second) c.Assert(err, IsNil) } - // checkCnt should be 1 because BatchCommands is a stream-stream call. - c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(1)) + c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(3)) // CopStream represents unary-stream call. copStreamReq := tikvrpc.NewRequest(tikvrpc.CmdCopStream, &coprocessor.Request{}) _, err := rpcClient.SendRequest(context.Background(), addr, copStreamReq, 10*time.Second) c.Assert(err, IsNil) - c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(2)) + c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(4)) checkCnt = 0 forwardedHost := "127.0.0.1:6666" @@ -296,3 +270,175 @@ func (s *testClientSuite) TestForwardMetadata(c *C) { c.Assert(err, IsNil) c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(4)) } + +func (s *testClientSerialSuite) TestForwardMetadataByBatchCommands(c *C) { + server, port := startMockTikvService() + c.Assert(port > 0, IsTrue) + defer server.Stop() + addr := fmt.Sprintf("%s:%d", "127.0.0.1", port) + + // Enable batch and limit the connection count to 1 so that + // there is only one BatchCommands stream for each host or forwarded host. + defer config.UpdateGlobal(func(conf *config.Config) { + conf.TiKVClient.MaxBatchSize = 128 + conf.TiKVClient.GrpcConnectionCount = 1 + })() + rpcClient := NewRPCClient(config.Security{}) + defer rpcClient.closeConns() + + var checkCnt uint64 + setCheckHandler := func(forwardedHost string) { + server.setMetaChecker(func(ctx context.Context) error { + atomic.AddUint64(&checkCnt, 1) + md, ok := metadata.FromIncomingContext(ctx) + if forwardedHost == "" { + if ok { + vals := md.Get(forwardMetadataKey) + c.Assert(len(vals), Equals, 0) + } + } else { + c.Assert(ok, IsTrue) + vals := md.Get(forwardMetadataKey) + c.Assert(vals, DeepEquals, []string{forwardedHost}) + + } + return nil + }) + } + + prewriteReq := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, &kvrpcpb.PrewriteRequest{}) + forwardedHosts := []string{"", "127.0.0.1:6666", "127.0.0.1:7777", "127.0.0.1:8888"} + for i, forwardedHost := range forwardedHosts { + setCheckHandler(forwardedHost) + prewriteReq.ForwardedHost = forwardedHost + for i := 0; i < 3; i++ { + _, err := rpcClient.SendRequest(context.Background(), addr, prewriteReq, 10*time.Second) + c.Assert(err, IsNil) + } + // checkCnt should be i because there is a stream for each forwardedHost. + c.Assert(atomic.LoadUint64(&checkCnt), Equals, 1+uint64(i)) + } + + checkCnt = 0 + // CopStream is a unary-stream call which doesn't support batch. + copStreamReq := tikvrpc.NewRequest(tikvrpc.CmdCopStream, &coprocessor.Request{}) + // Check no corresponding metadata if forwardedHost is empty. + setCheckHandler("") + _, err := rpcClient.SendRequest(context.Background(), addr, copStreamReq, 10*time.Second) + c.Assert(err, IsNil) + c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(1)) + + copStreamReq.ForwardedHost = "127.0.0.1:6666" + // Check the metadata exists. + setCheckHandler(copStreamReq.ForwardedHost) + _, err = rpcClient.SendRequest(context.Background(), addr, copStreamReq, 10*time.Second) + c.Assert(err, IsNil) + c.Assert(atomic.LoadUint64(&checkCnt), Equals, uint64(2)) +} + +func (s *testClientSuite) TestBatchCommandsBuilder(c *C) { + builder := newBatchCommandsBuilder(128) + + // Test no forwarding requests. + builder.reset() + req := new(tikvpb.BatchCommandsRequest_Request) + for i := 0; i < 10; i++ { + builder.push(&batchCommandsEntry{req: req}) + c.Assert(builder.len(), Equals, i+1) + } + entryMap := make(map[uint64]*batchCommandsEntry) + batchedReq, forwardingReqs := builder.build(func(id uint64, e *batchCommandsEntry) { + entryMap[id] = e + }) + c.Assert(len(batchedReq.GetRequests()), Equals, 10) + c.Assert(len(batchedReq.GetRequestIds()), Equals, 10) + c.Assert(len(entryMap), Equals, 10) + for i, id := range batchedReq.GetRequestIds() { + c.Assert(id, Equals, uint64(i)) + c.Assert(entryMap[id].req, Equals, batchedReq.GetRequests()[i]) + } + c.Assert(len(forwardingReqs), Equals, 0) + c.Assert(builder.idAlloc, Equals, uint64(10)) + + // Test collecting forwarding requests. + builder.reset() + forwardedHosts := []string{"", "127.0.0.1:6666", "127.0.0.1:7777", "127.0.0.1:8888"} + for i := range forwardedHosts { + for j, host := range forwardedHosts { + // Each forwarded host has incremental count of requests + // and interleaves with each other. + if i <= j { + builder.push(&batchCommandsEntry{req: req, forwardedHost: host}) + } + } + } + entryMap = make(map[uint64]*batchCommandsEntry) + batchedReq, forwardingReqs = builder.build(func(id uint64, e *batchCommandsEntry) { + entryMap[id] = e + }) + c.Assert(len(batchedReq.GetRequests()), Equals, 1) + c.Assert(len(batchedReq.GetRequestIds()), Equals, 1) + c.Assert(len(forwardingReqs), Equals, 3) + for i, host := range forwardedHosts[1:] { + c.Assert(len(forwardingReqs[host].GetRequests()), Equals, i+2) + c.Assert(len(forwardingReqs[host].GetRequestIds()), Equals, i+2) + } + c.Assert(builder.idAlloc, Equals, uint64(10+builder.len())) + c.Assert(len(entryMap), Equals, builder.len()) + for host, forwardingReq := range forwardingReqs { + for i, id := range forwardingReq.GetRequestIds() { + c.Assert(entryMap[id].req, Equals, forwardingReq.GetRequests()[i]) + c.Assert(entryMap[id].forwardedHost, Equals, host) + } + } + + // Test not collecting canceled requests + builder.reset() + entries := []*batchCommandsEntry{ + {canceled: 1, req: req}, + {canceled: 0, req: req}, + {canceled: 1, req: req}, + {canceled: 1, req: req}, + {canceled: 0, req: req}, + } + for _, entry := range entries { + builder.push(entry) + } + entryMap = make(map[uint64]*batchCommandsEntry) + batchedReq, forwardingReqs = builder.build(func(id uint64, e *batchCommandsEntry) { + entryMap[id] = e + }) + c.Assert(len(batchedReq.GetRequests()), Equals, 2) + c.Assert(len(batchedReq.GetRequestIds()), Equals, 2) + c.Assert(len(forwardingReqs), Equals, 0) + c.Assert(len(entryMap), Equals, 2) + for i, id := range batchedReq.GetRequestIds() { + c.Assert(entryMap[id].req, Equals, batchedReq.GetRequests()[i]) + c.Assert(entryMap[id].isCanceled(), IsFalse) + } + + // Test canceling all requests + builder.reset() + entries = entries[:0] + for i := 0; i < 3; i++ { + entry := &batchCommandsEntry{req: req, res: make(chan *tikvpb.BatchCommandsResponse_Response, 1)} + entries = append(entries, entry) + builder.push(entry) + } + err := errors.New("error") + builder.cancel(err) + for _, entry := range entries { + _, ok := <-entry.res + c.Assert(ok, IsFalse) + c.Assert(entry.err, Equals, err) + } + + // Test reset + builder.reset() + c.Assert(builder.len(), Equals, 0) + c.Assert(len(builder.entries), Equals, 0) + c.Assert(len(builder.requests), Equals, 0) + c.Assert(len(builder.requestIDs), Equals, 0) + c.Assert(len(builder.forwardingReqs), Equals, 0) + c.Assert(builder.idAlloc, Not(Equals), 0) +} diff --git a/store/tikv/region_cache_test.go b/store/tikv/region_cache_test.go index 5f7eecb3db9e4..80b89afc3cc83 100644 --- a/store/tikv/region_cache_test.go +++ b/store/tikv/region_cache_test.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "math/rand" + "sync/atomic" "testing" "time" "unsafe" @@ -527,6 +528,11 @@ func (s *testRegionCacheSuite) TestSendFailEnableForwarding(c *C) { newPeers := s.cluster.AllocIDs(2) s.cluster.Split(s.region1, region2, []byte("m"), newPeers, newPeers[0]) + var storeState uint32 = uint32(unreachable) + s.cache.testingKnobs.mockRequestLiveness = func(s *Store, bo *Backoffer) livenessState { + return livenessState(atomic.LoadUint32(&storeState)) + } + // Check the two regions. loc1, err := s.cache.LocateKey(s.bo, []byte("a")) c.Assert(err, IsNil) @@ -556,9 +562,7 @@ func (s *testRegionCacheSuite) TestSendFailEnableForwarding(c *C) { c.Assert(ctx.ProxyStore.storeID, Equals, s.store2) // Recover the store - s.cache.testingKnobs.mockRequestLiveness = func(s *Store, bo *Backoffer) livenessState { - return reachable - } + atomic.StoreUint32(&storeState, uint32(reachable)) // The proxy should be unset after several retries for retry := 0; retry < 15; retry++ { ctx, err = s.cache.GetTiKVRPCContext(s.bo, loc1.Region, kv.ReplicaReadLeader, 0) diff --git a/store/tikv/region_request_test.go b/store/tikv/region_request_test.go index bb62439af043c..a066c71518a37 100644 --- a/store/tikv/region_request_test.go +++ b/store/tikv/region_request_test.go @@ -602,6 +602,10 @@ func (s *testRegionRequestToThreeStoresSuite) TestForwarding(c *C) { } return innerClient.SendRequest(ctx, addr, req, timeout) }} + var storeState uint32 = uint32(unreachable) + s.regionRequestSender.regionCache.testingKnobs.mockRequestLiveness = func(s *Store, bo *Backoffer) livenessState { + return livenessState(atomic.LoadUint32(&storeState)) + } loc, err := s.regionRequestSender.regionCache.LocateKey(bo, []byte("k")) c.Assert(err, IsNil) @@ -624,9 +628,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestForwarding(c *C) { // Simulate recovering to normal s.regionRequestSender.client = innerClient - s.regionRequestSender.regionCache.testingKnobs.mockRequestLiveness = func(s *Store, bo *Backoffer) livenessState { - return reachable - } + atomic.StoreUint32(&storeState, uint32(reachable)) start := time.Now() for { if atomic.LoadInt32(&leaderStore.needForwarding) == 0 { @@ -637,7 +639,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestForwarding(c *C) { } time.Sleep(time.Millisecond * 200) } - s.regionRequestSender.regionCache.testingKnobs.mockRequestLiveness = nil + atomic.StoreUint32(&storeState, uint32(unreachable)) req = tikvrpc.NewRequest(tikvrpc.CmdRawGet, &kvrpcpb.RawGetRequest{Key: []byte("k")}) resp, ctx, err = s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, kv.TiKV) From e79ac3d978cf81ffaf3d75edebc98bb03ea9e018 Mon Sep 17 00:00:00 2001 From: Yilin Chen Date: Fri, 19 Mar 2021 10:17:34 +0800 Subject: [PATCH 34/44] ddl: delay before changing column from null to not null (#23364) * ddl: delay before changing column from null to not null Signed-off-by: Yilin Chen Co-authored-by: Ti Chi Robot <71242396+ti-chi-bot@users.noreply.github.com> Co-authored-by: Arenatlx <314806019@qq.com> --- ddl/column.go | 11 +++++++++-- ddl/ddl.go | 11 +++++++++++ ddl/reorg.go | 9 +-------- store/tikv/2pc.go | 4 +++- 4 files changed, 24 insertions(+), 11 deletions(-) diff --git a/ddl/column.go b/ddl/column.go index 8932957593a6d..85a9b0d92f8a6 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -825,7 +825,7 @@ func (w *worker) onModifyColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver in } if !needChangeColumnData(oldCol, jobParam.newCol) { - return w.doModifyColumn(t, job, dbInfo, tblInfo, jobParam.newCol, oldCol, jobParam.pos) + return w.doModifyColumn(d, t, job, dbInfo, tblInfo, jobParam.newCol, oldCol, jobParam.pos) } if jobParam.changingCol == nil { @@ -1384,11 +1384,18 @@ func updateChangingInfo(changingCol *model.ColumnInfo, changingIdxs []*model.Ind // doModifyColumn updates the column information and reorders all columns. It does not support modifying column data. func (w *worker) doModifyColumn( - t *meta.Meta, job *model.Job, dbInfo *model.DBInfo, tblInfo *model.TableInfo, + d *ddlCtx, t *meta.Meta, job *model.Job, dbInfo *model.DBInfo, tblInfo *model.TableInfo, newCol, oldCol *model.ColumnInfo, pos *ast.ColumnPosition) (ver int64, _ error) { // Column from null to not null. if !mysql.HasNotNullFlag(oldCol.Flag) && mysql.HasNotNullFlag(newCol.Flag) { noPreventNullFlag := !mysql.HasPreventNullInsertFlag(oldCol.Flag) + + // lease = 0 means it's in an integration test. In this case we don't delay so the test won't run too slowly. + // We need to check after the flag is set + if d.lease > 0 && !noPreventNullFlag { + delayForAsyncCommit() + } + // Introduce the `mysql.PreventNullInsertFlag` flag to prevent users from inserting or updating null values. err := modifyColsFromNull2NotNull(w, dbInfo, tblInfo, []*model.ColumnInfo{oldCol}, newCol.Name, oldCol.Tp != newCol.Tp) if err != nil { diff --git a/ddl/ddl.go b/ddl/ddl.go index 50eafa62e160b..ad41c3b53abac 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -668,6 +668,17 @@ type RecoverInfo struct { CurAutoRandID int64 } +// delayForAsyncCommit sleeps `SafeWindow + AllowedClockDrift` before a DDL job finishes. +// It should be called before any DDL that could break data consistency. +// This provides a safe window for async commit and 1PC to commit with an old schema. +func delayForAsyncCommit() { + cfg := config.GetGlobalConfig().TiKVClient.AsyncCommit + duration := cfg.SafeWindow + cfg.AllowedClockDrift + logutil.BgLogger().Info("sleep before DDL finishes to make async commit and 1PC safe", + zap.Duration("duration", duration)) + time.Sleep(duration) +} + var ( // RunInGoTest is used to identify whether ddl in running in the test. RunInGoTest bool diff --git a/ddl/reorg.go b/ddl/reorg.go index 2318dd6860081..1487ad6df334f 100644 --- a/ddl/reorg.go +++ b/ddl/reorg.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" @@ -158,15 +157,9 @@ func (rc *reorgCtx) clean() { } func (w *worker) runReorgJob(t *meta.Meta, reorgInfo *reorgInfo, tblInfo *model.TableInfo, lease time.Duration, f func() error) error { - // Sleep for reorgDelay before doing reorganization. - // This provides a safe window for async commit and 1PC to commit with an old schema. // lease = 0 means it's in an integration test. In this case we don't delay so the test won't run too slowly. if lease > 0 { - cfg := config.GetGlobalConfig().TiKVClient.AsyncCommit - reorgDelay := cfg.SafeWindow + cfg.AllowedClockDrift - logutil.BgLogger().Info("sleep before reorganization to make async commit safe", - zap.Duration("duration", reorgDelay)) - time.Sleep(reorgDelay) + delayForAsyncCommit() } job := reorgInfo.Job diff --git a/store/tikv/2pc.go b/store/tikv/2pc.go index 1ee8398576a2b..32a7bff64a2eb 100644 --- a/store/tikv/2pc.go +++ b/store/tikv/2pc.go @@ -1001,7 +1001,9 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) { } } - failpoint.Inject("beforePrewrite", nil) + if c.sessionID > 0 { + failpoint.Inject("beforePrewrite", nil) + } c.prewriteStarted = true var binlogChan <-chan BinlogWriteResult From 96a0cda96ea06fa37885e8996b0fd549774d80e7 Mon Sep 17 00:00:00 2001 From: tangenta Date: Fri, 19 Mar 2021 15:45:36 +0800 Subject: [PATCH 35/44] go.mod: update BR dependency to the latest (#23429) --- config/config.go | 3 --- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/config/config.go b/config/config.go index 322294b987827..1dfbb7ea88e0a 100644 --- a/config/config.go +++ b/config/config.go @@ -125,8 +125,6 @@ type Config struct { IndexLimit int `toml:"index-limit" json:"index-limit"` TableColumnCountLimit uint32 `toml:"table-column-count-limit" json:"table-column-count-limit"` GracefulWaitBeforeShutdown int `toml:"graceful-wait-before-shutdown" json:"graceful-wait-before-shutdown"` - // AlterPrimaryKey is used to control alter primary key feature. - AlterPrimaryKey bool `toml:"alter-primary-key" json:"alter-primary-key"` // TreatOldVersionUTF8AsUTF8MB4 is use to treat old version table/column UTF8 charset as UTF8MB4. This is for compatibility. // Currently not support dynamic modify, because this need to reload all old version schema. TreatOldVersionUTF8AsUTF8MB4 bool `toml:"treat-old-version-utf8-as-utf8mb4" json:"treat-old-version-utf8-as-utf8mb4"` @@ -560,7 +558,6 @@ var defaultConf = Config{ MaxIndexLength: 3072, IndexLimit: 64, TableColumnCountLimit: 1017, - AlterPrimaryKey: false, TreatOldVersionUTF8AsUTF8MB4: true, EnableTableLock: false, DelayCleanTableLock: 0, diff --git a/go.mod b/go.mod index efd86509ed37e..b4c75f5b838d5 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 github.com/pierrec/lz4 v2.5.2+incompatible // indirect github.com/pingcap/badger v1.5.1-0.20200908111422-2e78ee155d19 - github.com/pingcap/br v5.0.0-nightly.0.20210317100924-d95f9fdfcd29+incompatible + github.com/pingcap/br v5.0.0-nightly.0.20210318140754-0b223bc5358c+incompatible github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce diff --git a/go.sum b/go.sum index 696df53f1e402..95676245bcffd 100644 --- a/go.sum +++ b/go.sum @@ -417,8 +417,8 @@ github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUM github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/badger v1.5.1-0.20200908111422-2e78ee155d19 h1:IXpGy7y9HyoShAFmzW2OPF0xCA5EOoSTyZHwsgYk9Ro= github.com/pingcap/badger v1.5.1-0.20200908111422-2e78ee155d19/go.mod h1:LyrqUOHZrUDf9oGi1yoz1+qw9ckSIhQb5eMa1acOLNQ= -github.com/pingcap/br v5.0.0-nightly.0.20210317100924-d95f9fdfcd29+incompatible h1:K3DXUdxw67vH8nehT2yYavJIgYbNxl3hw0zZIkQdoyw= -github.com/pingcap/br v5.0.0-nightly.0.20210317100924-d95f9fdfcd29+incompatible/go.mod h1:ymVmo50lQydxib0tmK5hHk4oteB7hZ0IMCArunwy3UQ= +github.com/pingcap/br v5.0.0-nightly.0.20210318140754-0b223bc5358c+incompatible h1:62AFEix4KGoKvrdyvWNkeFUoRsXsAMlgZJdOC6d9eZM= +github.com/pingcap/br v5.0.0-nightly.0.20210318140754-0b223bc5358c+incompatible/go.mod h1:ymVmo50lQydxib0tmK5hHk4oteB7hZ0IMCArunwy3UQ= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= From 92b1b8e0e9598952917ab16fff0e17bcdac74992 Mon Sep 17 00:00:00 2001 From: you06 Date: Fri, 19 Mar 2021 17:25:36 +0800 Subject: [PATCH 36/44] planner: check schema stale for plan cache when forUpdateRead (#22381) --- executor/prepared.go | 1 + planner/core/cache.go | 1 + planner/core/common_plans.go | 10 +++++- planner/core/optimizer.go | 4 +++ planner/core/planbuilder.go | 5 +++ planner/optimize.go | 14 ++++++++ session/pessimistic_test.go | 66 ++++++++++++++++++++++++++++++++++++ session/session.go | 16 +++++++-- 8 files changed, 114 insertions(+), 3 deletions(-) diff --git a/executor/prepared.go b/executor/prepared.go index cef541a757c91..970638dd68cc0 100644 --- a/executor/prepared.go +++ b/executor/prepared.go @@ -220,6 +220,7 @@ func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error { VisitInfos: destBuilder.GetVisitInfo(), NormalizedSQL: normalized, SQLDigest: digest, + ForUpdateRead: destBuilder.GetIsForUpdateRead(), } return vars.AddPreparedStmt(e.ID, preparedObj) } diff --git a/planner/core/cache.go b/planner/core/cache.go index 1e87a984331b0..80cd27c930890 100644 --- a/planner/core/cache.go +++ b/planner/core/cache.go @@ -195,4 +195,5 @@ type CachedPrepareStmt struct { NormalizedPlan string SQLDigest string PlanDigest string + ForUpdateRead bool } diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index ffb42db8d87b2..cd398df35e9ee 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -297,7 +297,8 @@ func (e *Execute) setFoundInPlanCache(sctx sessionctx.Context, opt bool) error { } func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema, preparedStmt *CachedPrepareStmt) error { - stmtCtx := sctx.GetSessionVars().StmtCtx + sessVars := sctx.GetSessionVars() + stmtCtx := sessVars.StmtCtx prepared := preparedStmt.PreparedAst stmtCtx.UseCache = prepared.UseCache var cacheKey kvcache.Key @@ -397,6 +398,12 @@ REBUILD: e.Plan = p _, isTableDual := p.(*PhysicalTableDual) if !isTableDual && prepared.UseCache && !stmtCtx.OptimDependOnMutableConst { + // rebuild key to exclude kv.TiFlash when stmt is not read only + if _, isolationReadContainTiFlash := sessVars.IsolationReadEngines[kv.TiFlash]; isolationReadContainTiFlash && !IsReadOnly(stmt, sessVars) { + delete(sessVars.IsolationReadEngines, kv.TiFlash) + cacheKey = NewPSTMTPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion) + sessVars.IsolationReadEngines[kv.TiFlash] = struct{}{} + } cached := NewPSTMTPlanCacheValue(p, names, stmtCtx.TblInfo2UnionScan, tps) preparedStmt.NormalizedPlan, preparedStmt.PlanDigest = NormalizePlan(p) stmtCtx.SetPlanDigest(preparedStmt.NormalizedPlan, preparedStmt.PlanDigest) @@ -1348,5 +1355,6 @@ func IsPointUpdateByAutoCommit(ctx sessionctx.Context, p Plan) (bool, error) { if _, isFastSel := updPlan.SelectPlan.(*PointGetPlan); isFastSel { return true, nil } + return false, nil } diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go index a81b0f3896d18..67ef1639ef398 100644 --- a/planner/core/optimizer.go +++ b/planner/core/optimizer.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/planner/property" "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/types" utilhint "github.com/pingcap/tidb/util/hint" "github.com/pingcap/tidb/util/set" @@ -39,6 +40,9 @@ var OptimizeAstNode func(ctx context.Context, sctx sessionctx.Context, node ast. // AllowCartesianProduct means whether tidb allows cartesian join without equal conditions. var AllowCartesianProduct = atomic.NewBool(true) +// IsReadOnly check whether the ast.Node is a read only statement. +var IsReadOnly func(node ast.Node, vars *variable.SessionVars) bool + const ( flagGcSubstitute uint64 = 1 << iota flagPrunColumns diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index ed1777031d43c..1523f4fbf86ae 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -521,6 +521,11 @@ func (b *PlanBuilder) GetVisitInfo() []visitInfo { return b.visitInfo } +// GetIsForUpdateRead gets if the PlanBuilder use forUpdateRead +func (b *PlanBuilder) GetIsForUpdateRead() bool { + return b.isForUpdateRead +} + // GetDBTableInfo gets the accessed dbs and tables info. func (b *PlanBuilder) GetDBTableInfo() []stmtctx.TableEntry { var tables []stmtctx.TableEntry diff --git a/planner/optimize.go b/planner/optimize.go index 3f511360fe54e..ec9bfef67d0a7 100644 --- a/planner/optimize.go +++ b/planner/optimize.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/planner/cascades" + "github.com/pingcap/tidb/planner/core" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" @@ -236,6 +237,18 @@ func optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in } sctx.GetSessionVars().RewritePhaseInfo.DurationRewrite = time.Since(beginRewrite) + if execPlan, ok := p.(*plannercore.Execute); ok { + execID := execPlan.ExecID + if execPlan.Name != "" { + execID = sctx.GetSessionVars().PreparedStmtNameToID[execPlan.Name] + } + if preparedPointer, ok := sctx.GetSessionVars().PreparedStmts[execID]; ok { + if preparedObj, ok := preparedPointer.(*core.CachedPrepareStmt); ok && preparedObj.ForUpdateRead { + is = domain.GetDomain(sctx).InfoSchema() + } + } + } + sctx.GetSessionVars().StmtCtx.Tables = builder.GetDBTableInfo() activeRoles := sctx.GetSessionVars().ActiveRoles // Check privilege. Maybe it's better to move this to the Preprocess, but @@ -556,4 +569,5 @@ func setFoundInBinding(sctx sessionctx.Context, opt bool) error { func init() { plannercore.OptimizeAstNode = Optimize + plannercore.IsReadOnly = IsReadOnly } diff --git a/session/pessimistic_test.go b/session/pessimistic_test.go index a49b01c348a3a..90146de91594d 100644 --- a/session/pessimistic_test.go +++ b/session/pessimistic_test.go @@ -28,11 +28,13 @@ import ( "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/kv" + plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/testkit" ) @@ -2459,6 +2461,70 @@ func (s *testPessimisticSuite) TestIssue21498(c *C) { } } +func (s *testPessimisticSuite) TestPlanCacheSchemaChange(c *C) { + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + + tk := testkit.NewTestKitWithInit(c, s.store) + tk2 := testkit.NewTestKitWithInit(c, s.store) + tk3 := testkit.NewTestKitWithInit(c, s.store) + ctx := context.Background() + + tk.MustExec("use test") + tk2.MustExec("use test") + tk3.MustExec("use test") + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (id int primary key, v int, unique index iv (v), vv int)") + tk.MustExec("insert into t values(1, 1, 1), (2, 2, 2), (4, 4, 4)") + + tk.MustExec("set tidb_enable_amend_pessimistic_txn = 1") + tk2.MustExec("set tidb_enable_amend_pessimistic_txn = 1") + + //generate plan cache + tk.MustExec("prepare update_stmt from 'update t set vv = vv + 1 where v = ?'") + tk.MustExec("set @v = 1") + tk.MustExec("execute update_stmt using @v") + + stmtID, _, _, err := tk2.Se.PrepareStmt("update t set vv = vv + 1 where v = ?") + c.Assert(err, IsNil) + _, err = tk2.Se.ExecutePreparedStmt(ctx, stmtID, []types.Datum{types.NewDatum(1)}) + c.Assert(err, IsNil) + + tk.MustExec("begin pessimistic") + tk2.MustExec("begin pessimistic") + + tk3.MustExec("alter table t drop index iv") + tk3.MustExec("update t set v = 3 where v = 2") + tk3.MustExec("update t set v = 5 where v = 4") + + tk.MustExec("set @v = 2") + tk.MustExec("execute update_stmt using @v") + tk.CheckExecResult(0, 0) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + tk.MustExec("set @v = 3") + tk.MustExec("execute update_stmt using @v") + tk.CheckExecResult(1, 0) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + _, err = tk2.Se.ExecutePreparedStmt(ctx, stmtID, []types.Datum{types.NewDatum(4)}) + c.Assert(err, IsNil) + tk2.CheckExecResult(0, 0) + tk2.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + _, err = tk2.Se.ExecutePreparedStmt(ctx, stmtID, []types.Datum{types.NewDatum(5)}) + c.Assert(err, IsNil) + tk2.CheckExecResult(1, 0) + tk2.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("commit") + tk2.MustExec("commit") + + tk.MustQuery("select * from t").Check(testkit.Rows("1 1 3", "2 3 3", "4 5 5")) +} + func (s *testPessimisticSuite) TestAsyncCommitCalTSFail(c *C) { atomic.StoreUint64(&tikv.ManagedLockTTL, 5000) defer func() { diff --git a/session/session.go b/session/session.go index 5c0f4ea71d4fd..65ae81fbad608 100644 --- a/session/session.go +++ b/session/session.go @@ -1623,7 +1623,12 @@ func (s *session) cachedPlanExec(ctx context.Context, stmtID uint32, prepareStmt *plannercore.CachedPrepareStmt, args []types.Datum) (sqlexec.RecordSet, error) { prepared := prepareStmt.PreparedAst // compile ExecStmt - is := infoschema.GetInfoSchema(s) + var is infoschema.InfoSchema + if prepareStmt.ForUpdateRead { + is = domain.GetDomain(s).InfoSchema() + } else { + is = infoschema.GetInfoSchema(s) + } execAst := &ast.ExecuteStmt{ExecID: stmtID} if err := executor.ResetContextOfStmt(s, execAst); err != nil { return nil, err @@ -1664,9 +1669,16 @@ func (s *session) cachedPlanExec(ctx context.Context, s.PrepareTSFuture(ctx) stmtCtx.Priority = kv.PriorityHigh resultSet, err = runStmt(ctx, s, stmt) + case nil: + // cache is invalid + if prepareStmt.ForUpdateRead { + s.PrepareTSFuture(ctx) + } + resultSet, err = runStmt(ctx, s, stmt) default: + err = errors.Errorf("invalid cached plan type %T", prepared.CachedPlan) prepared.CachedPlan = nil - return nil, errors.Errorf("invalid cached plan type") + return nil, err } return resultSet, err } From 514a4ee6715ad59b2636ae62fe1926d0cfe83360 Mon Sep 17 00:00:00 2001 From: Kenan Yao Date: Fri, 19 Mar 2021 17:59:36 +0800 Subject: [PATCH 37/44] statistics: remove existing deleted extended stats when add a new one (#23119) --- statistics/handle/handle.go | 36 ++++++++++++++++++++++------- statistics/handle/handle_test.go | 39 ++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 8 deletions(-) diff --git a/statistics/handle/handle.go b/statistics/handle/handle.go index 6e8b3b3d3f607..8a3311a54cf72 100644 --- a/statistics/handle/handle.go +++ b/statistics/handle/handle.go @@ -31,7 +31,6 @@ import ( "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/infoschema" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -910,7 +909,7 @@ func (h *Handle) extendedStatsFromStorage(reader *statsReader, table *statistics } else { table.ExtendedStats = statistics.NewExtendedStatsColl() } - rows, _, err := reader.read("select name, status, type, column_ids, stats, version from mysql.stats_extended where table_id = %? and status in (%?, %?) and version > %?", physicalID, StatsStatusAnalyzed, StatsStatusDeleted, lastVersion) + rows, _, err := reader.read("select name, status, type, column_ids, stats, version from mysql.stats_extended where table_id = %? and status in (%?, %?, %?) and version > %?", physicalID, StatsStatusInited, StatsStatusAnalyzed, StatsStatusDeleted, lastVersion) if err != nil || len(rows) == 0 { return table, nil } @@ -918,7 +917,7 @@ func (h *Handle) extendedStatsFromStorage(reader *statsReader, table *statistics lastVersion = mathutil.MaxUint64(lastVersion, row.GetUint64(5)) name := row.GetString(0) status := uint8(row.GetInt64(1)) - if status == StatsStatusDeleted { + if status == StatsStatusDeleted || status == StatsStatusInited { delete(table.ExtendedStats.Stats, name) } else { item := &statistics.ExtendedStatsItem{ @@ -1229,7 +1228,7 @@ func (h *Handle) InsertExtendedStats(statsName string, colIDs []int64, tp int, t strColIDs := string(bytes) h.mu.Lock() defer h.mu.Unlock() - ctx := context.TODO() + ctx := context.Background() exec := h.mu.ctx.(sqlexec.SQLExecutor) _, err = exec.ExecuteInternal(ctx, "begin pessimistic") if err != nil { @@ -1238,17 +1237,38 @@ func (h *Handle) InsertExtendedStats(statsName string, colIDs []int64, tp int, t defer func() { err = finishTransaction(ctx, exec, err) }() + // No need to use `exec.ExecuteInternal` since we have acquired the lock. + rows, _, err := h.execRestrictedSQL(ctx, "SELECT name FROM mysql.stats_extended WHERE name = %? and table_id = %? and status in (%?, %?)", statsName, tableID, StatsStatusInited, StatsStatusAnalyzed) + if err != nil { + return errors.Trace(err) + } + if len(rows) > 0 { + if ifNotExists { + return nil + } + return errors.New(fmt.Sprintf("extended statistics '%s' for the specified table already exists", statsName)) + } + // Remove the existing 'deleted' records. + if _, err = exec.ExecuteInternal(ctx, "DELETE FROM mysql.stats_extended WHERE name = %? and table_id = %?", statsName, tableID); err != nil { + return err + } + // Remove the cache item, which is necessary for cases like a cluster with 3 tidb instances, e.g, a, b and c. + // If tidb-a executes `alter table drop stats_extended` to mark the record as 'deleted', and before this operation + // is synchronized to other tidb instances, tidb-b executes `alter table add stats_extended`, which would delete + // the record from the table, tidb-b should delete the cached item synchronously. While for tidb-c, it has to wait for + // next `Update()` to remove the cached item then. + h.removeExtendedStatsItem(tableID, statsName) txn, err := h.mu.ctx.Txn(true) if err != nil { return errors.Trace(err) } version := txn.StartTS() const sql = "INSERT INTO mysql.stats_extended(name, type, table_id, column_ids, version, status) VALUES (%?, %?, %?, %?, %?, %?)" - _, err = exec.ExecuteInternal(ctx, sql, statsName, tp, tableID, strColIDs, version, StatsStatusInited) - // Key exists, but `if not exists` is specified, so we ignore this error. - if kv.ErrKeyExists.Equal(err) && ifNotExists { - err = nil + if _, err = exec.ExecuteInternal(ctx, sql, statsName, tp, tableID, strColIDs, version, StatsStatusInited); err != nil { + return err } + // Bump version in `mysql.stats_meta` to trigger stats cache refresh. + _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID) return } diff --git a/statistics/handle/handle_test.go b/statistics/handle/handle_test.go index b4a2baca5da94..db1ac09cc0ee2 100644 --- a/statistics/handle/handle_test.go +++ b/statistics/handle/handle_test.go @@ -2102,3 +2102,42 @@ func (s *testStatsSuite) TestHideExtendedStatsSwitch(c *C) { } tk.MustQuery("show variables like 'tidb_enable_extended_stats'").Check(testkit.Rows()) } + +func (s *testStatsSuite) TestRepetitiveAddDropExtendedStats(c *C) { + defer cleanEnv(c, s.store, s.do) + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("set session tidb_enable_extended_stats = on") + tk.MustExec("use test") + tk.MustExec("create table t(a int, b int)") + tk.MustExec("insert into t values(1,1),(2,2),(3,3)") + tk.MustExec("alter table t add stats_extended s1 correlation(a,b)") + tk.MustQuery("select name, status from mysql.stats_extended where name = 's1'").Sort().Check(testkit.Rows( + "s1 0", + )) + result := tk.MustQuery("show stats_extended where db_name = 'test' and table_name = 't'") + c.Assert(len(result.Rows()), Equals, 0) + tk.MustExec("analyze table t") + tk.MustQuery("select name, status from mysql.stats_extended where name = 's1'").Sort().Check(testkit.Rows( + "s1 1", + )) + result = tk.MustQuery("show stats_extended where db_name = 'test' and table_name = 't'") + c.Assert(len(result.Rows()), Equals, 1) + tk.MustExec("alter table t drop stats_extended s1") + tk.MustQuery("select name, status from mysql.stats_extended where name = 's1'").Sort().Check(testkit.Rows( + "s1 2", + )) + result = tk.MustQuery("show stats_extended where db_name = 'test' and table_name = 't'") + c.Assert(len(result.Rows()), Equals, 0) + tk.MustExec("alter table t add stats_extended s1 correlation(a,b)") + tk.MustQuery("select name, status from mysql.stats_extended where name = 's1'").Sort().Check(testkit.Rows( + "s1 0", + )) + result = tk.MustQuery("show stats_extended where db_name = 'test' and table_name = 't'") + c.Assert(len(result.Rows()), Equals, 0) + tk.MustExec("analyze table t") + tk.MustQuery("select name, status from mysql.stats_extended where name = 's1'").Sort().Check(testkit.Rows( + "s1 1", + )) + result = tk.MustQuery("show stats_extended where db_name = 'test' and table_name = 't'") + c.Assert(len(result.Rows()), Equals, 1) +} From 5d7bc9f239f9a1beefbfe3f2277d9048e5432058 Mon Sep 17 00:00:00 2001 From: MyonKeminta <9948422+MyonKeminta@users.noreply.github.com> Date: Fri, 19 Mar 2021 18:45:36 +0800 Subject: [PATCH 38/44] store/tikv: Distinguish req type in forwarding ops metrics (#23438) --- metrics/grafana/tidb.json | 93 +++++++++++++++++++++++++++++++++++ store/tikv/metrics/metrics.go | 2 +- store/tikv/region_request.go | 2 +- 3 files changed, 95 insertions(+), 2 deletions(-) diff --git a/metrics/grafana/tidb.json b/metrics/grafana/tidb.json index 531bf04ff619e..f50704f9f731c 100644 --- a/metrics/grafana/tidb.json +++ b/metrics/grafana/tidb.json @@ -7394,6 +7394,99 @@ "align": false, "alignLevel": null } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "description": "kv requests that's forwarded by different stores, grouped by request type", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 15 + }, + "id": 220, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tidb_tikvclient_forward_request_counter{tidb_cluster=\"$tidb_cluster\"}[1m])) by (type, result)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}-{{result}}", + "refId": "A", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "KV Request Forwarding OPS by Type", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], "repeat": null, diff --git a/store/tikv/metrics/metrics.go b/store/tikv/metrics/metrics.go index 1dadf1957c965..b2fa582c5b69f 100644 --- a/store/tikv/metrics/metrics.go +++ b/store/tikv/metrics/metrics.go @@ -382,7 +382,7 @@ func initMetrics(namespace, subsystem string) { Subsystem: subsystem, Name: "forward_request_counter", Help: "Counter of tikv request being forwarded through another node", - }, []string{LblFromStore, LblToStore, LblResult}) + }, []string{LblFromStore, LblToStore, LblType, LblResult}) initShortcuts() } diff --git a/store/tikv/region_request.go b/store/tikv/region_request.go index 244534dab432f..030b4259789df 100644 --- a/store/tikv/region_request.go +++ b/store/tikv/region_request.go @@ -496,7 +496,7 @@ func (s *RegionRequestSender) sendReqToRegion(bo *Backoffer, rpcCtx *RPCContext, if err != nil { result = "fail" } - metrics.TiKVForwardRequestCounter.WithLabelValues(fromStore, toStore, result).Inc() + metrics.TiKVForwardRequestCounter.WithLabelValues(fromStore, toStore, req.Type.String(), result).Inc() } if err != nil { From 6f34626d92c144eb2a58e906bd2c265804b49374 Mon Sep 17 00:00:00 2001 From: jianzhiyao <739319867@qq.com> Date: Fri, 19 Mar 2021 19:03:36 +0800 Subject: [PATCH 39/44] config: add no delay option for tcp connection (#22757) --- config/config.go | 2 ++ config/config_test.go | 8 ++++++++ server/server.go | 3 +++ 3 files changed, 13 insertions(+) diff --git a/config/config.go b/config/config.go index 1dfbb7ea88e0a..7161b4c65a77b 100644 --- a/config/config.go +++ b/config/config.go @@ -411,6 +411,7 @@ type Performance struct { TxnEntrySizeLimit uint64 `toml:"txn-entry-size-limit" json:"txn-entry-size-limit"` TxnTotalSizeLimit uint64 `toml:"txn-total-size-limit" json:"txn-total-size-limit"` TCPKeepAlive bool `toml:"tcp-keep-alive" json:"tcp-keep-alive"` + TCPNoDelay bool `toml:"tcp-no-delay" json:"tcp-no-delay"` CrossJoin bool `toml:"cross-join" json:"cross-join"` RunAutoAnalyze bool `toml:"run-auto-analyze" json:"run-auto-analyze"` DistinctAggPushDown bool `toml:"distinct-agg-push-down" json:"agg-push-down-join"` @@ -596,6 +597,7 @@ var defaultConf = Config{ ServerMemoryQuota: 0, MemoryUsageAlarmRatio: 0.8, TCPKeepAlive: true, + TCPNoDelay: true, CrossJoin: true, StatsLease: "3s", RunAutoAnalyze: true, diff --git a/config/config_test.go b/config/config_test.go index 1a4c7762597fc..62cc87d2a713e 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -201,6 +201,7 @@ stores-refresh-interval = 30 enable-forwarding = true [performance] txn-total-size-limit=2000 +tcp-no-delay = false [tikv-client] commit-timeout="41s" max-batch-size=128 @@ -242,6 +243,7 @@ spilled-file-encryption-method = "plaintext" // Test that the value will be overwritten by the config file. c.Assert(conf.Performance.TxnTotalSizeLimit, Equals, uint64(2000)) + c.Assert(conf.Performance.TCPNoDelay, Equals, false) c.Assert(conf.TiKVClient.CommitTimeout, Equals, "41s") c.Assert(conf.TiKVClient.AsyncCommit.KeysLimit, Equals, uint(123)) @@ -586,3 +588,9 @@ func (s *testConfigSuite) TestSecurityValid(c *C) { c.Assert(c1.Valid() == nil, Equals, tt.valid) } } + +func (s *testConfigSuite) TestTcpNoDelay(c *C) { + c1 := NewConfig() + //check default value + c.Assert(c1.Performance.TCPNoDelay, Equals, true) +} diff --git a/server/server.go b/server/server.go index 3900e1d06441c..3ff25d46e5167 100644 --- a/server/server.go +++ b/server/server.go @@ -173,6 +173,9 @@ func (s *Server) newConn(conn net.Conn) *clientConn { if err := tcpConn.SetKeepAlive(s.cfg.Performance.TCPKeepAlive); err != nil { logutil.BgLogger().Error("failed to set tcp keep alive option", zap.Error(err)) } + if err := tcpConn.SetNoDelay(s.cfg.Performance.TCPNoDelay); err != nil { + logutil.BgLogger().Error("failed to set tcp no delay option", zap.Error(err)) + } } cc.setConn(conn) cc.salt = fastrand.Buf(20) From 02837f428ff9a5e6a89022c8447befac38ea19fc Mon Sep 17 00:00:00 2001 From: Morgan Tocker Date: Fri, 19 Mar 2021 05:19:36 -0600 Subject: [PATCH 40/44] docs: add proposal for Security Enhanced Mode (#23223) --- .../2021-03-09-security-enhanced-mode.md | 227 ++++++++++++++++++ 1 file changed, 227 insertions(+) create mode 100644 docs/design/2021-03-09-security-enhanced-mode.md diff --git a/docs/design/2021-03-09-security-enhanced-mode.md b/docs/design/2021-03-09-security-enhanced-mode.md new file mode 100644 index 0000000000000..2d7cdb1db1a4d --- /dev/null +++ b/docs/design/2021-03-09-security-enhanced-mode.md @@ -0,0 +1,227 @@ +# Proposal: + +- Author(s): [morgo](https://github.com/morgo) +- Last updated: March 9, 2021 +- Discussion at: N/A + +## Table of Contents + +* [Introduction](#introduction) +* [Motivation or Background](#motivation-or-background) +* [Detailed Design](#detailed-design) +* [Test Design](#test-design) + * [Functional Tests](#functional-tests) + * [Scenario Tests](#scenario-tests) + * [Compatibility Tests](#compatibility-tests) + * [Benchmark Tests](#benchmark-tests) +* [Impacts & Risks](#impacts--risks) +* [Investigation & Alternatives](#investigation--alternatives) +* [Unresolved Questions](#unresolved-questions) + +## Introduction + +This document was created to discuss the design of Security Enhanced Mode. It comes from the DBaaS requirement that `SUPER` users must not be able to perform certain actions that could comprimise the system. + +### Terminology + +* **Configuration Option**: The name of a variable as set in a configuration file. +* **System Variable** (aka sysvar): The name of a variable that is set in a running TiDB server using the MySQL protocol. +* **Super**: The primary MySQL "admin" privilege, which is intended to be superseded by MySQL’s "dynamic" (fine-grained) privileges starting from MySQL 8.0. + +## Motivation or Background + +Currently the MySQL `SUPER` privilege encapsulates a very large set of system capabilities. It does not follow the best practices of allocating _fine grained access_ to users based only on their system-access requirements. + +This is particularly problematic in a DBaaS scenario such as TiDB Cloud where the `SUPER` privilege has elements that are required by both end users (TiDB Cloud Customers) and system operations (PingCAP SREs). + +The design of Security Enhanced Mode (SEM) takes the approach of: + +1. Restricting `SUPER` to a set of capabilities that are safe for end users. +2. Implementation of dynamic privileges ([issue #22439](https://github.com/pingcap/tidb/issues/22439)). + +This approach was requested by product management based on the broad "in the wild" association of `SUPER` as "the MySQL admin privilege". Thus, proposals to create a new lesser-`SUPER` privilege have already been discussed and rejected. + +The design and name of "Security Enhanced" is inspired by prior art with SELinux and AppArmor. + +## Detailed Design + +A boolean option called `EnableEnhancedSecurity` (default `FALSE`) will be added as a TiDB configuration option. The following subheadings describe the behavior when `EnableEnhancedSecurity` is set to `TRUE`. + +### System Variables + +The following system variables will be hidden: + +* variable.TiDBDDLSlowOprThreshold, +* variable.TiDBAllowRemoveAutoInc, +* variable.TiDBCheckMb4ValueInUTF8, +* variable.TiDBConfig, +* variable.TiDBEnableSlowLog, +* variable.TiDBEnableTelemetry, +* variable.TiDBExpensiveQueryTimeThreshold, +* variable.TiDBForcePriority, +* variable.TiDBGeneralLog, +* variable.TiDBMetricSchemaRangeDuration, +* variable.TiDBMetricSchemaStep, +* variable.TiDBOptWriteRowID, +* variable.TiDBPProfSQLCPU, +* variable.TiDBRecordPlanInSlowLog, +* variable.TiDBRowFormatVersion, +* variable.TiDBSlowQueryFile, +* variable.TiDBSlowLogThreshold, +* variable.TiDBEnableCollectExecutionInfo, +* variable.TiDBMemoryUsageAlarmRatio, +* variable.TiDBRedactLog + +The following system variables will be reset to defaults: + +* variable.Hostname + +### Status Variables + +The following status variables will be hidden: + +* tidb_gc_leader_desc + +### Information Schema Tables + +The following tables will be hidden: + +* cluster_config +* cluster_hardware +* cluster_load +* cluster_log +* cluster_systeminfo +* inspection_result +* inspection_rules +* inspection_summary +* metrics_summary +* metrics_summary_by_label +* metrics_tables +* tidb_hot_regions + +The following tables will be modified to hide columns: + +* tikv_store_status + * The address, capacity, available, start_ts and uptime columns will return NULL. +* Tidb_servers_info + * The “IP” column will return NULL. +* cluster_* tables + * The “instance” column will show the server ID instead of the server IP address. + +### Performance Schema Tables + +The following tables will be hidden: + + * pd_profile_allocs + * pd_profile_block + * pd_profile_cpu + * pd_profile_goroutines + * pd_profile_memory + * pd_profile_mutex + * tidb_profile_allocs + * tidb_profile_block + * tidb_profile_cpu + * tidb_profile_goroutines + * tidb_profile_memory + * tidb_profile_mutex + * tikv_profile_cpu + +### System (mysql) Tables + +The following tables will be hidden: + +* expr_pushdown_blacklist +* gc_delete_range +* gc_delete_range_done +* opt_rule_blacklist +* tidb +* global_variables + +### Metrics Schema + +All tables will be hidden, including the schema itself. + +### Commands + +* `SHOW CONFIG` is disabled. +* `SET CONFIG` is disabled by the `CONFIG` Privilege (no change necessary) +* The `BACKUP` and `RESTORE` commands prevent local backups and restores. +* The statement `SELECT .. INTO OUTFILE` is disabled (this is the only current usage of the `FILE` privilege, effectively disabling `FILE`. For compatibility `GRANT` and `REVOKE` of `FILE` will not be affected.) + +### Restricted Dynamic Privileges + +TiDB currently permits the `SUPER` privilege as a substitute for any dynamic privilege. This is not 100% MySQL compatible - MySQL accepts SUPER in most cases, but not in GRANT context. However, TiDB requires this extension because: + +* The visitorInfo framework does not permit OR conditions +* GRANT ALL in TiDB does not actually grant each of the individual privileges (historical difference) + +When SEM is enabled, `SUPER` will no longer be permitted as a substitute for any `RESTRICTED_*` privilege. The distinction that this only applies when SEM is enabled, helps continue to work around the current server limitations. + +## Test Design + +### Functional Tests + +The integration test suite will run with `EnableEnhancedSecurity=FALSE`, but new integration tests will be written to cover specific use cases. + +Unit tests will be added to cover the enabling and disabling of sysvars, and tables. + +Tests will need to check that invisible tables are both non-visible and non-grantable (it should work, since visibility can be plugged into the privilege manager directly). + +If the user with `SUPER` privilege grants privileges related to these tables to other users, for example, `GRANT SELECT, INSERT, UPDATE ON information_schema.cluster_config TO 'userA'@'%%';` -- it should fail. + +### Scenario Tests + +It is important that users can still use TiDB with all connectors when `SEM` is enabled, and that the TiDB documentation makes sense for users with `SEM` enabled. + +It is not expected that any user scenarios are affected by `SEM`, but see "Impact & Risks" for additional discussion behind design decisions. + +### Compatibility Tests + +We will need to consider the impact on tools. When SEM is disabled, no impact is expected. When SEM is enabled, it should be possible to make recommendations to the tools team so that they can still access meta data required to operate in DBaaS environment: + +* Lightning and BR will not work currently with SEM + https://github.com/pingcap/tidb/pull/21988 +* In 5.0 the recommended method for BR/Lightning to get TiKV GC stats should change. +* There is one PR still pending for obtaining statistics: https://github.com/pingcap/tidb/pull/22286 + +### Benchmark Tests + +No performance impact is expected. + +#### Documentation Plan + +Documentation is critically impacted by SEM, since it should be possible for a manual page to cover the use-case of SEM both enabled and disabled. + +Supporting PRs will be required to modify both documentation and functionality so that system variables and/or tables that are hidden by SEM are not required. For example: + +* https://github.com/pingcap/tidb/pull/22286 +* https://github.com/pingcap/tidb/pull/21988 +* https://github.com/pingcap/docs/pull/4552 + +* A further change to move the `new_collation_enabled` variable from mysql.tidb to a status variable has been identified, as it appears on several manual pages. No PR has been created yet. + +## Impacts & Risks + +The impact of `SEM` only applies in the case that it is enabled, which it is only intended to be on DBaaS (although users of on-premises installations of TiDB may also consider enabling it). + +The intention behind SEM is to reduce the impact on end users, who can continue to use `SUPER` as the defacto "admin" privilege (versus alternatives such as mentioned below). The larger impact will be on System Operators, who will need fine grained privileges to replace the `SUPER` privilege. + +The largest risk with `SEM` enabled is application/MySQL compatibility. There are a number of SEM behaviors which have been discussed, with the following outcomes: + +| Suggestion | Observed Risk | Outcome | +| --------------- | --------------- | --------------- | +| Is it possible to make a system variable non-readable by a non privileged user? | MySQL does not have a semantic where a sysvar would ever be non readable. Non-settable however is fine.| Variables will either be invisible or visible. Never non-readable, although non-writeable is possible (example: sql_log_bin). | +| Is it possible to hide columns in information schema? | Users may depend on ordinality of information_schema table column order. This is particularly likely with tables with useful columns at the start. | Columns will appear with NULL values when they must be hidden.| +| Is it possible to hide sysvars such as hostname? | For MySQL-specific sysvars, there is an increased likelihood applications will read them, and result in an error if they are not present. | For a specific case like hostname, it is a requirement to return a placeholder value such as ‘localhost’, rather than hide the variable. | + +Users will also be able to observe if the system they are using has enhanced security mode enabled via the system variable, `tidb_enable_enhanced_security` (read-only). + +## Investigation & Alternatives + +The alternative to SEM is to implement fine-grained privileges for end users. This idea has been discussed and rejected. See "Motivation or Background" for context. + +Amazon RDS also uses the approach of not granting `SUPER` to users, and instead offering a set of custom stored procedures to support use-cases that would usually require `SUPER`. This idea has been rejected. + +### Unresolved Questions + +None + From 520f2bbd775d6c4de21e13899de313c268f300d9 Mon Sep 17 00:00:00 2001 From: Morgan Tocker Date: Fri, 19 Mar 2021 05:35:36 -0600 Subject: [PATCH 41/44] docs: Add Proposal for dynamic privileges (#23224) --- docs/design/2021-03-09-dynamic-privileges.md | 359 +++++++++++++++++++ 1 file changed, 359 insertions(+) create mode 100644 docs/design/2021-03-09-dynamic-privileges.md diff --git a/docs/design/2021-03-09-dynamic-privileges.md b/docs/design/2021-03-09-dynamic-privileges.md new file mode 100644 index 0000000000000..13fe9688dcd93 --- /dev/null +++ b/docs/design/2021-03-09-dynamic-privileges.md @@ -0,0 +1,359 @@ +# Proposal: + +- Author(s): [morgo](https://github.com/morgo) +- Last updated: March 09, 2021 +- Discussion at: N/A + +## Table of Contents + +* [Introduction](#introduction) +* [Motivation or Background](#motivation-or-background) +* [Detailed Design](#detailed-design) +* [Test Design](#test-design) + * [Functional Tests](#functional-tests) + * [Scenario Tests](#scenario-tests) + * [Compatibility Tests](#compatibility-tests) + * [Benchmark Tests](#benchmark-tests) +* [Impacts & Risks](#impacts--risks) +* [Investigation & Alternatives](#investigation--alternatives) +* [Unresolved Questions](#unresolved-questions) + +## Introduction + +This document was created to discuss the design of Dynamic Privileges. It is intended to be implemented in combination with Security Enhanced Mode, but there no interdependencies between the two features. + +## Motivation or Background + +MySQL 8.0 introduced the concept of “dynamic privileges” (see [WL#8131](https://dev.mysql.com/worklog/task/?id=8131)). The intention behind this functionality is that plugins can create new named privileges to suit their purposes, such as “Firewall Admin” or “Audit Admin” instead of requiring the `SUPER` privilege, which becomes overloaded and too coarse. + +Dynamic privileges are **not to be confused with** SQL Roles (RBAC). They work together just fine. Consider the following testcase which demonstrates the features working together: + +```sql + mustExec(c, rootSe, "CREATE USER notsuper") + mustExec(c, rootSe, "CREATE USER otheruser") + mustExec(c, rootSe, "CREATE ROLE anyrolename") + mustExec(c, rootSe, "SET tidb_enable_dynamic_privileges=1") + + se := newSession(c, s.store, s.dbName) + c.Assert(se.Auth(&auth.UserIdentity{Username: "notsuper", Hostname: "%"}, nil, nil), IsTrue) + mustExec(c, se, "SET tidb_enable_dynamic_privileges=1") + + // test SYSTEM_VARIABLES_ADMIN + _, err := se.ExecuteInternal(context.Background(), "SET GLOBAL wait_timeout = 86400") + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the SUPER or SYSTEM_VARIABLES_ADMIN privilege(s) for this operation") + mustExec(c, rootSe, "GRANT SYSTEM_VARIABLES_admin ON *.* TO notsuper") + mustExec(c, se, "SET GLOBAL wait_timeout = 86400") + + // test ROLE_ADMIN + _, err = se.ExecuteInternal(context.Background(), "GRANT anyrolename TO otheruser") + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the SUPER or ROLE_ADMIN privilege(s) for this operation") + mustExec(c, rootSe, "GRANT ROLE_ADMIN ON *.* TO notsuper") + mustExec(c, se, "GRANT anyrolename TO otheruser") + + // revoke SYSTEM_VARIABLES_ADMIN, confirm it is dropped + mustExec(c, rootSe, "REVOKE SYSTEM_VARIABLES_AdmIn ON *.* FROM notsuper") + _, err = se.ExecuteInternal(context.Background(), "SET GLOBAL wait_timeout = 86000") + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the SUPER or SYSTEM_VARIABLES_ADMIN privilege(s) for this operation") + + // grant super, confirm that it is also a substitute for SYSTEM_VARIABLES_ADMIN + mustExec(c, rootSe, "GRANT SUPER ON *.* TO notsuper") + mustExec(c, se, "SET GLOBAL wait_timeout = 86400") + + // revoke SUPER, assign SYSTEM_VARIABLES_ADMIN to anyrolename. + // confirm that a dynamic privilege can be inherited from a role. + mustExec(c, rootSe, "REVOKE SUPER ON *.* FROM notsuper") + mustExec(c, rootSe, "GRANT SYSTEM_VARIABLES_AdmIn ON *.* TO anyrolename") + mustExec(c, rootSe, "GRANT anyrolename TO notsuper") + + // It's not a default role, this should initially fail: + _, err = se.ExecuteInternal(context.Background(), "SET GLOBAL wait_timeout = 86400") + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the SUPER or SYSTEM_VARIABLES_ADMIN privilege(s) for this operation") + mustExec(c, se, "SET ROLE anyrolename") + mustExec(c, se, "SET GLOBAL wait_timeout = 87000") +``` + +Dynamic privileges are different from static privileges in the following ways: + +* The name of the privilege is “DYNAMIC”. A plugin registers it, and the server has no prior knowledge of its existence. +* Privileges can only be global scoped. +* Privileges are stored in the table `mysql.global_grants` and not `mysql.user`. +* The `GRANT OPTION` is stored for each dynamic privilege (versus for the user as a whole). + +We have the same requirement for fine grained privileges in TiDB, so it makes sense to adopt a similar implementation of dynamic privileges. This document describes both the implementation of the framework for dynamic privileges and an initial set of dynamic privileges that are required to be implemented. + +## Detailed Design + +Implementing Dynamic Privileges requires the following work to be completed. + +### Persistence + +For TiDB, we can use the same table structure as MySQL: + +```sql +CREATE TABLE `global_grants` ( + `USER` char(32) NOT NULL DEFAULT '', + `HOST` char(255) NOT NULL DEFAULT '', + `PRIV` char(32) NOT NULL DEFAULT '', + `WITH_GRANT_OPTION` enum('N','Y') NOT NULL DEFAULT 'N', + PRIMARY KEY (`USER`,`HOST`,`PRIV`) +); +``` + +There is an existing table called “global_priv” which initially looked like it provided similar functionality, except: +* The priv is expected to be a JSON encoded string +* There is no column named `WITH_GRANT_OPTION`. + +I looked at repurposing this table (which stores TLS options), but because the `PRIV` value is the data and not the key, it gets messy. I instead plan to use the same schema as MySQL. + +This table will persist dynamic privileges. Similar to MySQL, the cache is read into memory and cached (privilege/privileges/cache.go). Dynamic privileges will be cached in the same way as existing privileges. + +### Privilege Checking API + +Checking for existence of a Dynamic privilege needs a different function from normal privilege checks. I.e. + +``` +// RequestVerification(activeRole []*auth.RoleIdentity, db, table, +// column string, priv mysql.PrivilegeType) bool +if pm.RequestVerification(activeRoles, "", "", "", mysql.ProcessPriv) { + // has processPrivilege +} +``` + +This is not suitable because: +* The privilege `mysql.ProcessPriv` must be predefined (i.e. it's not dynamic). +* The 3 empty string values (schema, table, column) are never applicable to dynamic privileges. +* Dynamic privileges are grantable individually. There may be scenarios where code wants to check if a user has both a dynamic privilege and the ability to grant it (such as in the output of `SHOW GRANTS`). + +I propose the following: + +``` +// RequestDynamicVerification(activeRole []*auth.RoleIdentity, priv string, grantable bool) bool +if pm.RequestDynamicVerification(activeRoles, "BACKUP_ADMIN", false) { + // has backup admin privilege +} +``` + +### Plugin API + +There will need to be a way for plugins to register new dynamic privileges via their OnInit method. I propose the following: + +``` +import ( + "github.com/pingcap/tidb/privilege/privileges" +) + +err = privileges.RegisterDynamicPrivilege("AUDIT_ADMIN") +if err != nil { + return err +} +``` + +### Metadata Commands + +#### SHOW GRANTS + +The output of `SHOW GRANTS` needs to be modified to show each of the dynamic privileges applicable to a user, following static privileges. I.e. +``` +mysql [localhost:8023] {root} (test) > show grants for 'u1'; ++---------------------------------------------------------+ +| Grants for u1@% | ++---------------------------------------------------------+ +| GRANT USAGE ON *.* TO `u1`@`%` | +| GRANT BINLOG_ADMIN ON *.* TO `u1`@`%` | +| GRANT BACKUP_ADMIN ON *.* TO `u1`@`%` WITH GRANT OPTION | ++---------------------------------------------------------+ +3 rows in set (0.00 sec) +``` +#### Information_schema + +The table `user_privileges` should show a hybrid of both static and dynamic privileges: + +``` +mysql [localhost:8023] {root} (information_schema) > mysql [localhost:8023] {root} where grantee = "'u1'@'%'" + -> ; ++----------+---------------+----------------+--------------+ +| GRANTEE | TABLE_CATALOG | PRIVILEGE_TYPE | IS_GRANTABLE | ++----------+---------------+----------------+--------------+ +| 'u1'@'%' | def | USAGE | NO | +| 'u1'@'%' | def | BINLOG_ADMIN | NO | +| 'u1'@'%' | def | BACKUP_ADMIN | YES | ++----------+---------------+----------------+--------------+ +3 rows in set (0.00 sec) +``` + +#### SHOW CREATE USER + +No change + +#### CREATE USER + +No change + +#### GRANT / REVOKE + +Needs to support the syntax of a privilege being either a static privilege, or a dynamic privilege. Dynamic privileges only support `*.*` + +``` +mysql [localhost:8023] {root} (information_schema) > grant select on *.* to 'u1'; +Query OK, 0 rows affected (0.00 sec) + +mysql [localhost:8023] {root} (information_schema) > grant binlog_admin on test.* to 'u1'; +ERROR 3619 (HY000): Illegal privilege level specified for BINLOG_ADMIN +mysql [localhost:8023] {root} (information_schema) > grant binlog_admin on *.* to 'u1'; +Query OK, 0 rows affected (0.01 sec) +```` + +`GRANT ALL` will also GRANT each of the `DYNAMIC` privileges that are registered with the server at the time the command is executed: + +``` +mysql [localhost:8023] {root} (test) > show grants for u1; ++------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Grants for u1@% | ++------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, SUPER, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, CREATE ROLE, DROP ROLE ON *.* TO `u1`@`%` | +| GRANT APPLICATION_PASSWORD_ADMIN,AUDIT_ADMIN,BINLOG_ADMIN,BINLOG_ENCRYPTION_ADMIN,CLONE_ADMIN,CONNECTION_ADMIN,ENCRYPTION_KEY_ADMIN,FLUSH_OPTIMIZER_COSTS,FLUSH_STATUS,FLUSH_TABLES,FLUSH_USER_RESOURCES,GROUP_REPLICATION_ADMIN,INNODB_REDO_LOG_ARCHIVE,INNODB_REDO_LOG_ENABLE,PERSIST_RO_VARIABLES_ADMIN,REPLICATION_APPLIER,REPLICATION_SLAVE_ADMIN,RESOURCE_GROUP_ADMIN,RESOURCE_GROUP_USER,ROLE_ADMIN,SERVICE_CONNECTION_ADMIN,SESSION_VARIABLES_ADMIN,SET_USER_ID,SHOW_ROUTINE,SYSTEM_USER,SYSTEM_VARIABLES_ADMIN,TABLE_ENCRYPTION_ADMIN,XA_RECOVER_ADMIN ON *.* TO `u1`@`%` | +| GRANT BACKUP_ADMIN ON *.* TO `u1`@`%` WITH GRANT OPTION | ++------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +3 rows in set (0.00 sec) +``` + +Currently TiDB does not expand `GRANT ALL` when the value is read back from `SHOW GRANTS`. It is possible to maintain this current behavior difference. + +#### Alter User + +No change + +### Initial Set of Dynamic Privileges + +#### Borrowed from MySQL + +| Privilege Name | Description | Notes | +| --------------- | --------------- | --------------- | +| `BACKUP_ADMIN` | Enables BR backups and restores, as well as lightning restores. | Currently this required `SUPER`. It will now require `BACKUP_ADMIN` or `SUPER`. | +| `SYSTEM_VARIABLES_ADMIN` | Allows changing any GLOBAL system variable. | Currently this required `SUPER`. It will now require `SYSTEM_VARIABLES_ADMIN` or `SUPER`. | +| `ROLE_ADMIN` | Allows granting and revoking roles. | Won’t allow revoking on restricted_users (see below). | +| `CONNECTION_ADMIN` | Allows killing connections. | Like `PROCESS` static privilege, but slightly more restrictive (no show processlist). | + +#### TiDB Extensions + +| Privilege Name | Description | Notes | +| --------------- | --------------- | --------------- | +| `RESTRICTED_SYSTEM_VARIABLES_ADMIN` | Allows changing a restricted `GLOBAL` system variable. | Currently in SEM all high risk variables are unloaded. TBD, it might be required in future that they are only visible/settable to those with this privilege and not SUPER. | +| `RESTRICTED_STATUS_VARIABLES_ADMIN` | Allows observing restricted status variables. | i.e. `SHOW GLOBAL STATUS` by default hides some statistics when `SEM` is enabled. | +| `RESTRICTED_CONNECTION_ADMIN` | A special privilege to say that their connections, etc. can’t be killed by SUPER users AND they can kill connections by all other users. Affects `KILL`, `KILL TIDB` commands. | It is intended for the CloudAdmin user in DBaaS. | +| `RESTRICTED_USER` | A special privilege to say that their access can’t be changed by `SUPER` users. Statements `DROP USER`, `SET PASSWORD`, `ALTER USER`, `REVOKE` are all limited. | It is intended for the CloudAdmin user in DbaaS. | +| `RESTRICTED_TABLES` | A special privilege which means that the SEM hidden table semantic doesn’t apply. | It is intended for the CloudAdmin user in DbaaS. | + +### Parser Changes + +The parser already supports `DYNAMIC` privileges. This can be confirmed by the following patch to TiDB, where they are sent as the static type of `ExtendedPriv`: + +``` +diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go +index 90d5b9e82..d1644ee83 100644 +--- a/planner/core/planbuilder.go ++++ b/planner/core/planbuilder.go +@@ -2298,6 +2298,11 @@ func collectVisitInfoFromGrantStmt(sctx sessionctx.Context, vi []visitInfo, stmt + + var allPrivs []mysql.PrivilegeType + for _, item := range stmt.Privs { ++ ++ if item.Priv == mysql.ExtendedPriv { ++ fmt.Printf("### Attempting to set DYNAMIC privilege: %s\n", item.Name) ++ } ++ + if item.Priv == mysql.AllPriv { + switch stmt.Level.Level { + case ast.GrantLevelGlobal: +``` + +This results in the following written to the log file: +``` +mysql> grant acdc on *.* to u1; +ERROR 8121 (HY000): privilege check fail + +### Attempting to set DYNAMIC privilege: acdc +``` + +It might be possible that changes are still required if `ExtendedPriv` is not supported in all contexts (REVOKE, etc). + +Note that creating a role with the same name as a DYNAMIC privilege is supported. A `GRANT` statement can be attributed to ROLES when it omits the ON *.* syntax. Thus: + +``` +GRANT BINLOG_ADMIN TO u1; // grants the role binlog_admin +GRANT BINLOG_ADMIN ON *.* TO u1; // grants the dynamic privilege binlog_admin +``` + +This same nuance applies to MySQL. + +### Documentation Plan + +The statement reference pages for each of the affected metadata commands will need to be updated to describe dynamic privileges. + +There will also need to be documentation specific to `DYNAMIC` privileges to describe how it works, and the purpose of fine-grained access control. + +## Test Design + +Testing dynamic privileges is a little bit complex because of the various ways privileges can be inherited by users: + +* Direct `GRANT` to the user +* Granting to a role that the user inherits. + +### Functional Tests + +Unit tests will be added to cover the semantics around role/dynamic privilege precedence, including logical restoring in a different order. + +Unit tests will also cover each of the "initial set of Dynamic privileges". Tests will include both directly assigning the privileges and assigning via a ROLE (RBAC). + +Integration testing needs to test with global kill enabled/disabled. + +### Scenario Tests + +The use-cases required by the DBaaS team should be validated when combined with `security-enhanced-mode`. They are: + +| Account Name | root | cloudAdmin | +| --------------- | --------------- | --------------- | +| Backup & Restore to cloud | Y | Y | +| File privilege | N | N | +| Read or set variables | Y | Y | +| set restricted variables(some of them even can not be read) | N | Y | +| Read or set restricted system tables | N | Y | +| DROP USER cloudAdmin | N | Hard to N(unless some hardcoded) | +| REVOKE cloudAdmin | N | Hard to N(unless some hardcoded) | +| Show processlist, Access to threads belong to other users | Y | Y | +| Change password if the password of SUPER user is forgotten | N | Y | +| Kill connections belong to cloudAdmin | N | Y | +| SHUTDOWN / RESTART | N | Y (graceful shutdown on k8s for tidb-server) | + +Scenario testing will be required for: +* all the dynamic privileges +* several user-defined dynamic privileges + +### Compatibility Tests + +The introduction of `DYNAMIC` privileges is not expected to introduce any compatibility issues, because backwards compatibility is ensured. However, plugins should migrate to registering their own dynamic privileges and not rely on the use of `SUPER`. This is considered an enhancement, and not included in-scope for the initial introduction of dynamic privileges (which introduces the framework for plugins to use). + +## Impact & Risks + +In its initial release, dynamic privilege usage will be controlled by an experimental feature flag (`tidb_enable_dynamic_privileges`), which is modifyable on a GLOBAL or SESSION basis. The implementation will be via restricting `GRANT` and `REVOKE` statements from creating dynamic privileges (it is too invasive to conditionally modify the ast visitor functionaliy). + +For backwards compatibility, the MySQL-compatible dynamic privileges will also permit `SUPER`. This helps prevent upgrade issues, such a when TiDB was bootstrapped `GRANT ALL ON *.*` would not have granted all the dynamic privileges. There might be some impact on Upgrade/Downgrade story if eventually the `BACKUP_ADMIN` privilege is used instead of `SUPER`, but for the initial release I am planning to allow either. + +## Investigation & Alternatives + +An alternative could be to support fine-grained access in a TiDB specific way. Because the MySQL functionality overlaps nicely, it doesn’t really make sense not to follow. +The initial implementation of dynamic privileges only implements a subset of MySQL’s [dynamic privileges](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html) (see table 6.3). Given that these are supposed to be “dynamic”, I don’t think this is a problem. + +## Unresolved Questions + +### Adding new dynamic privileges to a lower privileged user + +In the case that `cloudAdmin` does not have `SUPER`, but requires additional fine grained privileges granted at a later date, there are several potential solutions: + +1. Write a `session/bootstrap.go` task to "split" an existing `DYNAMIC` privilege into two. i.e. users with privilege `XYZ` now have `XYZ` and `ZYX`. +2. Allow the privilege `SELECT, INSERT, UPDATE ON mysql.*` to `cloudAdmin` + `RELOAD` on `*.*`. This will allow `cloudAdmin` to insert `ZYX` into the `global_grant` table, and then run `FLUSH PRIVILEGES` to reload the privilege cache. +3. Add an API for plugins that register new dynamic privileges, such that on first installation they can say `ZYX` is also satisfied by `XYZ` (triggering an internal copy of privileges). +4. Support a feature similar to MySQL's [`--init-file`](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_init_file) which executes with unrestricted privileges. +5. Make the privilegemanager completely pluggable (it is currently an interface, and extending it to plugins is not a difficult stretch). Make cloudAdmin privileges embedded into the cloud-specific privilege manager, and not dependent on the internal system tables. + +The current recommended method is (1), since the method (2) does not effectively restrict the credentials of `cloudAdmin`. (3) is a workaround for the fact that `visitInfo` does not support OR conditions for privileges. (4) and (5) have merit, but require development outside the scope of this current proposal. From 60460be42f65a91177430cc9ea2aa4a1e91130a1 Mon Sep 17 00:00:00 2001 From: tangenta Date: Fri, 19 Mar 2021 19:51:36 +0800 Subject: [PATCH 42/44] *: unify NeedRestoredData and CommonHandleNeedRestoredData (#23266) --- executor/mem_reader.go | 2 +- table/tables/index.go | 2 +- table/tables/tables.go | 6 +++--- tablecodec/tablecodec.go | 6 +++--- types/etc.go | 4 +++- types/field_type.go | 9 --------- util/rowcodec/decoder.go | 4 ++-- 7 files changed, 13 insertions(+), 20 deletions(-) diff --git a/executor/mem_reader.go b/executor/mem_reader.go index 79bcc7d96288f..f6023c93c5b1a 100644 --- a/executor/mem_reader.go +++ b/executor/mem_reader.go @@ -272,7 +272,7 @@ func (m *memTableReader) getRowData(handle kv.Handle, value []byte) ([][]byte, e offset := colIDs[id] if m.table.IsCommonHandle { for i, colID := range m.pkColIDs { - if colID == col.ID && !types.CommonHandleNeedRestoredData(&col.FieldType) { + if colID == col.ID && !types.NeedRestoredData(&col.FieldType) { // Only try to decode handle when there is no corresponding column in the value. // This is because the information in handle may be incomplete in some cases. // For example, prefixed clustered index like 'primary key(col1(1))' only store the leftmost 1 char in the handle. diff --git a/table/tables/index.go b/table/tables/index.go index 2fd978383b12a..744418d7df665 100644 --- a/table/tables/index.go +++ b/table/tables/index.go @@ -120,7 +120,7 @@ func NewIndex(physicalID int64, tblInfo *model.TableInfo, indexInfo *model.Index prefix: prefix, phyTblID: physicalID, } - index.needRestoredData = index.checkNeedRestoredData() + index.needRestoredData = NeedRestoredData(indexInfo.Columns, tblInfo.Columns) return index } diff --git a/table/tables/tables.go b/table/tables/tables.go index 7dd3894898a52..ee1a526a183ea 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -908,7 +908,7 @@ func DecodeRawRowData(ctx sessionctx.Context, meta *model.TableInfo, h kv.Handle } continue } - if col.IsCommonHandleColumn(meta) && !types.CommonHandleNeedRestoredData(&col.FieldType) { + if col.IsCommonHandleColumn(meta) && !types.NeedRestoredData(&col.FieldType) { if containFullCol, idxInHandle := containFullColInHandle(meta, col); containFullCol { dtBytes := h.EncodedCol(idxInHandle) _, dt, err := codec.DecodeOne(dtBytes) @@ -935,7 +935,7 @@ func DecodeRawRowData(ctx sessionctx.Context, meta *model.TableInfo, h kv.Handle if col == nil { continue } - if col.IsPKHandleColumn(meta) || (col.IsCommonHandleColumn(meta) && !types.CommonHandleNeedRestoredData(&col.FieldType)) { + if col.IsPKHandleColumn(meta) || (col.IsCommonHandleColumn(meta) && !types.NeedRestoredData(&col.FieldType)) { if _, isPrefix := prefixCols[col.ID]; !isPrefix { continue } @@ -1411,7 +1411,7 @@ func CanSkip(info *model.TableInfo, col *table.Column, value *types.Datum) bool continue } canSkip := idxCol.Length == types.UnspecifiedLength - canSkip = canSkip && !types.CommonHandleNeedRestoredData(&col.FieldType) + canSkip = canSkip && !types.NeedRestoredData(&col.FieldType) return canSkip } } diff --git a/tablecodec/tablecodec.go b/tablecodec/tablecodec.go index 9b7b933305eca..b6331240934c1 100644 --- a/tablecodec/tablecodec.go +++ b/tablecodec/tablecodec.go @@ -478,7 +478,7 @@ func DecodeHandleToDatumMap(handle kv.Handle, handleColIDs []int64, if id != hid { continue } - if types.CommonHandleNeedRestoredData(ft) { + if types.NeedRestoredData(ft) { continue } d, err := decodeHandleToDatum(handle, ft, idx) @@ -1165,7 +1165,7 @@ func GenIndexValueForClusteredIndexVersion1(sc *stmtctx.StatementContext, tblInf if idxInfo.Global { idxVal = encodePartitionID(idxVal, partitionID) } - if collate.NewCollationEnabled() && (IdxValNeedRestoredData || len(handleRestoredData) > 0) { + if IdxValNeedRestoredData || len(handleRestoredData) > 0 { colIds := make([]int64, 0, len(idxInfo.Columns)) allRestoredData := make([]types.Datum, 0, len(handleRestoredData)+len(idxInfo.Columns)) for i, idxCol := range idxInfo.Columns { @@ -1221,7 +1221,7 @@ func genIndexValueVersion0(sc *stmtctx.StatementContext, tblInfo *model.TableInf idxVal = encodePartitionID(idxVal, partitionID) newEncode = true } - if collate.NewCollationEnabled() && IdxValNeedRestoredData { + if IdxValNeedRestoredData { colIds := make([]int64, len(idxInfo.Columns)) for i, col := range idxInfo.Columns { colIds[i] = tblInfo.Columns[col.Offset].ID diff --git a/types/etc.go b/types/etc.go index 90d7da96287d3..db1bc53c8e012 100644 --- a/types/etc.go +++ b/types/etc.go @@ -108,7 +108,9 @@ func IsNonBinaryStr(ft *FieldType) bool { // NeedRestoredData returns if a type needs restored data. // If the type is char and the collation is _bin, NeedRestoredData() returns false. func NeedRestoredData(ft *FieldType) bool { - if IsNonBinaryStr(ft) && !(collate.IsBinCollation(ft.Collate) && !IsTypeVarchar(ft.Tp)) { + if collate.NewCollationEnabled() && + IsNonBinaryStr(ft) && + !(collate.IsBinCollation(ft.Collate) && !IsTypeVarchar(ft.Tp)) { return true } return false diff --git a/types/field_type.go b/types/field_type.go index d5272fa699002..5ce6c7712d7f6 100644 --- a/types/field_type.go +++ b/types/field_type.go @@ -20,7 +20,6 @@ import ( "github.com/pingcap/parser/mysql" ast "github.com/pingcap/parser/types" "github.com/pingcap/tidb/types/json" - "github.com/pingcap/tidb/util/collate" utilMath "github.com/pingcap/tidb/util/math" ) @@ -1295,11 +1294,3 @@ func SetBinChsClnFlag(ft *FieldType) { // VarStorageLen indicates this column is a variable length column. const VarStorageLen = ast.VarStorageLen - -// CommonHandleNeedRestoredData indicates whether the column can be decoded directly from the common handle. -// If can, then returns false. Otherwise returns true. -func CommonHandleNeedRestoredData(ft *FieldType) bool { - return collate.NewCollationEnabled() && - ft.EvalType() == ETString && - !mysql.HasBinaryFlag(ft.Flag) -} diff --git a/util/rowcodec/decoder.go b/util/rowcodec/decoder.go index a9ab7ab9abb20..69a78d1de7d43 100644 --- a/util/rowcodec/decoder.go +++ b/util/rowcodec/decoder.go @@ -255,7 +255,7 @@ func (decoder *ChunkDecoder) tryAppendHandleColumn(colIdx int, col *ColInfo, han } for i, id := range decoder.handleColIDs { if col.ID == id { - if types.CommonHandleNeedRestoredData(col.Ft) { + if types.NeedRestoredData(col.Ft) { return false } coder := codec.NewDecoder(chk, decoder.loc) @@ -426,7 +426,7 @@ func (decoder *BytesDecoder) tryDecodeHandle(values [][]byte, offset int, col *C if handle == nil { return false } - if types.CommonHandleNeedRestoredData(col.Ft) { + if types.NeedRestoredData(col.Ft) { return false } if col.IsPKHandle || col.ID == model.ExtraHandleID { From b983856ddf497cd1885cca0bf375244235de47b5 Mon Sep 17 00:00:00 2001 From: Shirly Date: Fri, 19 Mar 2021 21:19:36 +0800 Subject: [PATCH 43/44] store/tikv:mv kv.ReplicaReadType to tikv (#23376) --- distsql/request_builder.go | 9 +-- distsql/request_builder_test.go | 19 +++--- executor/analyze.go | 9 +-- executor/analyze_test.go | 4 +- executor/batch_point_get.go | 3 +- executor/point_get.go | 3 +- kv/kv.go | 33 +---------- planner/optimize.go | 3 +- session/session.go | 5 +- session/session_test.go | 17 +++--- sessionctx/variable/session.go | 20 +++---- sessionctx/variable/varsutil_test.go | 8 +-- store/tikv/kv/kv.go | 11 ++++ store/tikv/{storeutil => kv}/store_vars.go | 20 ++++++- store/tikv/region_cache.go | 7 ++- store/tikv/region_cache_test.go | 2 +- store/tikv/region_request.go | 22 +++---- store/tikv/region_request_test.go | 30 +++++----- store/tikv/snapshot.go | 67 +++++++++++----------- store/tikv/tikvrpc/tikvrpc.go | 5 +- 20 files changed, 155 insertions(+), 142 deletions(-) create mode 100644 store/tikv/kv/kv.go rename store/tikv/{storeutil => kv}/store_vars.go (50%) diff --git a/distsql/request_builder.go b/distsql/request_builder.go index 310de50149eeb..21d523ffb3164 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" @@ -144,7 +145,7 @@ func (builder *RequestBuilder) SetAnalyzeRequest(ana *tipb.AnalyzeReq) *RequestB builder.Request.Tp = kv.ReqTypeAnalyze builder.Request.Data, builder.err = ana.Marshal() builder.Request.NotFillCache = true - builder.Request.IsolationLevel = kv.RC + builder.Request.IsolationLevel = tikvstore.RC builder.Request.Priority = kv.PriorityLow } @@ -198,12 +199,12 @@ func (builder *RequestBuilder) SetAllowBatchCop(batchCop bool) *RequestBuilder { return builder } -func (builder *RequestBuilder) getIsolationLevel() kv.IsoLevel { +func (builder *RequestBuilder) getIsolationLevel() tikvstore.IsoLevel { switch builder.Tp { case kv.ReqTypeAnalyze: - return kv.RC + return tikvstore.RC } - return kv.SI + return tikvstore.SI } func (builder *RequestBuilder) getKVPriority(sv *variable.SessionVars) int { diff --git a/distsql/request_builder_test.go b/distsql/request_builder_test.go index 2445de54cb82c..5b945a9ea7986 100644 --- a/distsql/request_builder_test.go +++ b/distsql/request_builder_test.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" @@ -323,7 +324,7 @@ func (s *testSuite) TestRequestBuilder1(c *C) { NotFillCache: false, SyncLog: false, Streaming: false, - ReplicaRead: kv.ReplicaReadLeader, + ReplicaRead: tikvstore.ReplicaReadLeader, } c.Assert(actual, DeepEquals, expect) } @@ -399,7 +400,7 @@ func (s *testSuite) TestRequestBuilder2(c *C) { NotFillCache: false, SyncLog: false, Streaming: false, - ReplicaRead: kv.ReplicaReadLeader, + ReplicaRead: tikvstore.ReplicaReadLeader, } c.Assert(actual, DeepEquals, expect) } @@ -446,7 +447,7 @@ func (s *testSuite) TestRequestBuilder3(c *C) { NotFillCache: false, SyncLog: false, Streaming: false, - ReplicaRead: kv.ReplicaReadLeader, + ReplicaRead: tikvstore.ReplicaReadLeader, } c.Assert(actual, DeepEquals, expect) } @@ -493,7 +494,7 @@ func (s *testSuite) TestRequestBuilder4(c *C) { Streaming: true, NotFillCache: false, SyncLog: false, - ReplicaRead: kv.ReplicaReadLeader, + ReplicaRead: tikvstore.ReplicaReadLeader, } c.Assert(actual, DeepEquals, expect) } @@ -532,7 +533,7 @@ func (s *testSuite) TestRequestBuilder5(c *C) { KeepOrder: true, Desc: false, Concurrency: 15, - IsolationLevel: kv.RC, + IsolationLevel: tikvstore.RC, Priority: 1, NotFillCache: true, SyncLog: false, @@ -576,10 +577,10 @@ func (s *testSuite) TestRequestBuilder6(c *C) { } func (s *testSuite) TestRequestBuilder7(c *C) { - for _, replicaRead := range []kv.ReplicaReadType{ - kv.ReplicaReadLeader, - kv.ReplicaReadFollower, - kv.ReplicaReadMixed, + for _, replicaRead := range []tikvstore.ReplicaReadType{ + tikvstore.ReplicaReadLeader, + tikvstore.ReplicaReadFollower, + tikvstore.ReplicaReadMixed, } { vars := variable.NewSessionVars() vars.SetReplicaRead(replicaRead) diff --git a/executor/analyze.go b/executor/analyze.go index 923e8e12b27a6..2338796ec82be 100644 --- a/executor/analyze.go +++ b/executor/analyze.go @@ -44,6 +44,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/store/tikv" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" @@ -948,7 +949,7 @@ func (e *AnalyzeFastExec) activateTxnForRowCount() (rollbackFn func() error, err } } txn.SetOption(kv.Priority, kv.PriorityLow) - txn.SetOption(kv.IsolationLevel, kv.RC) + txn.SetOption(kv.IsolationLevel, tikvstore.RC) txn.SetOption(kv.NotFillCache, true) return rollbackFn, nil } @@ -1148,7 +1149,7 @@ func (e *AnalyzeFastExec) handleScanIter(iter kv.Iterator) (scanKeysSize int, er func (e *AnalyzeFastExec) handleScanTasks(bo *tikv.Backoffer) (keysSize int, err error) { snapshot := e.ctx.GetStore().GetSnapshot(kv.MaxVersion) if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() { - snapshot.SetOption(kv.ReplicaRead, kv.ReplicaReadFollower) + snapshot.SetOption(kv.ReplicaRead, tikvstore.ReplicaReadFollower) } for _, t := range e.scanTasks { iter, err := snapshot.Iter(t.StartKey, t.EndKey) @@ -1168,10 +1169,10 @@ func (e *AnalyzeFastExec) handleSampTasks(workID int, step uint32, err *error) { defer e.wg.Done() snapshot := e.ctx.GetStore().GetSnapshot(kv.MaxVersion) snapshot.SetOption(kv.NotFillCache, true) - snapshot.SetOption(kv.IsolationLevel, kv.RC) + snapshot.SetOption(kv.IsolationLevel, tikvstore.RC) snapshot.SetOption(kv.Priority, kv.PriorityLow) if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() { - snapshot.SetOption(kv.ReplicaRead, kv.ReplicaReadFollower) + snapshot.SetOption(kv.ReplicaRead, tikvstore.ReplicaReadFollower) } rander := rand.New(rand.NewSource(e.randSeed)) diff --git a/executor/analyze_test.go b/executor/analyze_test.go index 66b7e03f52549..623bd09277948 100644 --- a/executor/analyze_test.go +++ b/executor/analyze_test.go @@ -29,7 +29,6 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/infoschema" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" @@ -39,6 +38,7 @@ import ( "github.com/pingcap/tidb/statistics/handle" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/store/tikv" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/mockstore/cluster" "github.com/pingcap/tidb/store/tikv/tikvrpc" "github.com/pingcap/tidb/table" @@ -120,7 +120,7 @@ func (s *testSuite1) TestAnalyzeReplicaReadFollower(c *C) { tk.MustExec("drop table if exists t") tk.MustExec("create table t(a int)") ctx := tk.Se.(sessionctx.Context) - ctx.GetSessionVars().SetReplicaRead(kv.ReplicaReadFollower) + ctx.GetSessionVars().SetReplicaRead(tikvstore.ReplicaReadFollower) tk.MustExec("analyze table t") } diff --git a/executor/batch_point_get.go b/executor/batch_point_get.go index ca4416658306b..aceaec0e89afd 100644 --- a/executor/batch_point_get.go +++ b/executor/batch_point_get.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/tikv" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" @@ -113,7 +114,7 @@ func (e *BatchPointGetExec) Open(context.Context) error { e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() { - snapshot.SetOption(kv.ReplicaRead, kv.ReplicaReadFollower) + snapshot.SetOption(kv.ReplicaRead, tikvstore.ReplicaReadFollower) } snapshot.SetOption(kv.TaskID, e.ctx.GetSessionVars().StmtCtx.TaskID) isStaleness := e.ctx.GetSessionVars().TxnCtx.IsStaleness diff --git a/executor/point_get.go b/executor/point_get.go index 6f2449fcfecfa..6d949051d2903 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -31,6 +31,7 @@ import ( plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/store/tikv" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" @@ -147,7 +148,7 @@ func (e *PointGetExecutor) Open(context.Context) error { e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } if e.ctx.GetSessionVars().GetReplicaRead().IsFollowerRead() { - e.snapshot.SetOption(kv.ReplicaRead, kv.ReplicaReadFollower) + e.snapshot.SetOption(kv.ReplicaRead, tikvstore.ReplicaReadFollower) } e.snapshot.SetOption(kv.TaskID, e.ctx.GetSessionVars().StmtCtx.TaskID) isStaleness := e.ctx.GetSessionVars().TxnCtx.IsStaleness diff --git a/kv/kv.go b/kv/kv.go index bcebff808ca1f..c32db7da6f37d 100644 --- a/kv/kv.go +++ b/kv/kv.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/parser/model" "github.com/pingcap/tidb/config" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/memory" @@ -95,34 +96,6 @@ const UnCommitIndexKVFlag byte = '1' // We use it to abort the transaction to guarantee GC worker will not influence it. const MaxTxnTimeUse = 24 * 60 * 60 * 1000 -// IsoLevel is the transaction's isolation level. -type IsoLevel int - -const ( - // SI stands for 'snapshot isolation'. - SI IsoLevel = iota - // RC stands for 'read committed'. - RC -) - -// ReplicaReadType is the type of replica to read data from -type ReplicaReadType byte - -const ( - // ReplicaReadLeader stands for 'read from leader'. - ReplicaReadLeader ReplicaReadType = 1 << iota - // ReplicaReadFollower stands for 'read from follower'. - ReplicaReadFollower - // ReplicaReadMixed stands for 'read from leader and follower and learner'. - ReplicaReadMixed -) - -// IsFollowerRead checks if leader is going to be used to read data. -func (r ReplicaReadType) IsFollowerRead() bool { - // In some cases the default value is 0, which should be treated as `ReplicaReadLeader`. - return r != ReplicaReadLeader && r != 0 -} - // Those limits is enforced to make sure the transaction can be well handled by TiKV. var ( // TxnEntrySizeLimit is limit of single entry size (len(key) + len(value)). @@ -391,7 +364,7 @@ type Request struct { // sent to multiple storage units concurrently. Concurrency int // IsolationLevel is the isolation level, default is SI. - IsolationLevel IsoLevel + IsolationLevel tikvstore.IsoLevel // Priority is the priority of this KV request, its value may be PriorityNormal/PriorityLow/PriorityHigh. Priority int // memTracker is used to trace and control memory usage in co-processor layer. @@ -408,7 +381,7 @@ type Request struct { // call would not corresponds to a whole region result. Streaming bool // ReplicaRead is used for reading data from replicas, only follower is supported at this time. - ReplicaRead ReplicaReadType + ReplicaRead tikvstore.ReplicaReadType // StoreType represents this request is sent to the which type of store. StoreType StoreType // Cacheable is true if the request can be cached. Currently only deterministic DAG requests can be cached. diff --git a/planner/optimize.go b/planner/optimize.go index ec9bfef67d0a7..4e0f7334ae5ae 100644 --- a/planner/optimize.go +++ b/planner/optimize.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/hint" "github.com/pingcap/tidb/util/logutil" @@ -532,7 +533,7 @@ func handleStmtHints(hints []*ast.TableOptimizerHint) (stmtHints stmtctx.StmtHin warns = append(warns, warn) } stmtHints.HasReplicaReadHint = true - stmtHints.ReplicaRead = byte(kv.ReplicaReadFollower) + stmtHints.ReplicaRead = byte(tikvstore.ReplicaReadFollower) } // Handle MAX_EXECUTION_TIME if maxExecutionTimeCnt != 0 { diff --git a/session/session.go b/session/session.go index 65ae81fbad608..d548eb29ff4ae 100644 --- a/session/session.go +++ b/session/session.go @@ -65,6 +65,7 @@ import ( "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/statistics/handle" "github.com/pingcap/tidb/store/tikv" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/oracle" tikvutil "github.com/pingcap/tidb/store/tikv/util" "github.com/pingcap/tidb/types" @@ -1788,7 +1789,7 @@ func (s *session) Txn(active bool) (kv.Transaction, error) { s.sessionVars.TxnCtx.CouldRetry = s.isTxnRetryable() s.txn.SetVars(s.sessionVars.KVVars) if s.sessionVars.GetReplicaRead().IsFollowerRead() { - s.txn.SetOption(kv.ReplicaRead, kv.ReplicaReadFollower) + s.txn.SetOption(kv.ReplicaRead, tikvstore.ReplicaReadFollower) } } return &s.txn, nil @@ -1852,7 +1853,7 @@ func (s *session) NewTxn(ctx context.Context) error { } txn.SetVars(s.sessionVars.KVVars) if s.GetSessionVars().GetReplicaRead().IsFollowerRead() { - txn.SetOption(kv.ReplicaRead, kv.ReplicaReadFollower) + txn.SetOption(kv.ReplicaRead, tikvstore.ReplicaReadFollower) } s.txn.changeInvalidToValid(txn) is := domain.GetDomain(s).InfoSchema() diff --git a/session/session_test.go b/session/session_test.go index ee0a68304499f..b147b536c3974 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -50,6 +50,7 @@ import ( "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/store/mockstore/mocktikv" "github.com/pingcap/tidb/store/tikv" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/mockstore/cluster" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/table/tables" @@ -2619,13 +2620,13 @@ func (s *testSessionSuite3) TestSetTransactionIsolationOneShot(c *C) { // Check isolation level is set to read committed. ctx := context.WithValue(context.Background(), "CheckSelectRequestHook", func(req *kv.Request) { - c.Assert(req.IsolationLevel, Equals, kv.SI) + c.Assert(req.IsolationLevel, Equals, tikvstore.SI) }) tk.Se.Execute(ctx, "select * from t where k = 1") // Check it just take effect for one time. ctx = context.WithValue(context.Background(), "CheckSelectRequestHook", func(req *kv.Request) { - c.Assert(req.IsolationLevel, Equals, kv.SI) + c.Assert(req.IsolationLevel, Equals, tikvstore.SI) }) tk.Se.Execute(ctx, "select * from t where k = 1") @@ -3057,11 +3058,11 @@ func (s *testSessionSuite2) TestReplicaRead(c *C) { tk := testkit.NewTestKit(c, s.store) tk.Se, err = session.CreateSession4Test(s.store) c.Assert(err, IsNil) - c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, kv.ReplicaReadLeader) + c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, tikvstore.ReplicaReadLeader) tk.MustExec("set @@tidb_replica_read = 'follower';") - c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, kv.ReplicaReadFollower) + c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, tikvstore.ReplicaReadFollower) tk.MustExec("set @@tidb_replica_read = 'leader';") - c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, kv.ReplicaReadLeader) + c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, tikvstore.ReplicaReadLeader) } func (s *testSessionSuite3) TestIsolationRead(c *C) { @@ -3146,12 +3147,12 @@ func (s *testSessionSuite2) TestStmtHints(c *C) { c.Assert(tk.Se.GetSessionVars().GetEnableCascadesPlanner(), IsTrue) // Test READ_CONSISTENT_REPLICA hint - tk.Se.GetSessionVars().SetReplicaRead(kv.ReplicaReadLeader) + tk.Se.GetSessionVars().SetReplicaRead(tikvstore.ReplicaReadLeader) tk.MustExec("select /*+ READ_CONSISTENT_REPLICA() */ 1;") - c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, kv.ReplicaReadFollower) + c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, tikvstore.ReplicaReadFollower) tk.MustExec("select /*+ READ_CONSISTENT_REPLICA(), READ_CONSISTENT_REPLICA() */ 1;") c.Assert(tk.Se.GetSessionVars().StmtCtx.GetWarnings(), HasLen, 1) - c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, kv.ReplicaReadFollower) + c.Assert(tk.Se.GetSessionVars().GetReplicaRead(), Equals, tikvstore.ReplicaReadFollower) } func (s *testSessionSuite3) TestPessimisticLockOnPartition(c *C) { diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 02dc35868d42e..fd0bcd3f9af82 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -42,8 +42,8 @@ import ( "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/sessionctx/stmtctx" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/oracle" - "github.com/pingcap/tidb/store/tikv/storeutil" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/collate" @@ -712,7 +712,7 @@ type SessionVars struct { enableIndexMerge bool // replicaRead is used for reading data from replicas, only follower is supported at this time. - replicaRead kv.ReplicaReadType + replicaRead tikvstore.ReplicaReadType // IsolationReadEngines is used to isolation read, tidb only read from the stores whose engine type is in the engines. IsolationReadEngines map[kv.StoreType]struct{} @@ -969,7 +969,7 @@ func NewSessionVars() *SessionVars { WaitSplitRegionTimeout: DefWaitSplitRegionTimeout, enableIndexMerge: false, EnableNoopFuncs: DefTiDBEnableNoopFuncs, - replicaRead: kv.ReplicaReadLeader, + replicaRead: tikvstore.ReplicaReadLeader, AllowRemoveAutoInc: DefTiDBAllowRemoveAutoInc, UsePlanBaselines: DefTiDBUsePlanBaselines, EvolvePlanBaselines: DefTiDBEvolvePlanBaselines, @@ -1118,15 +1118,15 @@ func (s *SessionVars) SetEnableIndexMerge(val bool) { } // GetReplicaRead get ReplicaRead from sql hints and SessionVars.replicaRead. -func (s *SessionVars) GetReplicaRead() kv.ReplicaReadType { +func (s *SessionVars) GetReplicaRead() tikvstore.ReplicaReadType { if s.StmtCtx.HasReplicaReadHint { - return kv.ReplicaReadType(s.StmtCtx.ReplicaRead) + return tikvstore.ReplicaReadType(s.StmtCtx.ReplicaRead) } return s.replicaRead } // SetReplicaRead set SessionVars.replicaRead. -func (s *SessionVars) SetReplicaRead(val kv.ReplicaReadType) { +func (s *SessionVars) SetReplicaRead(val tikvstore.ReplicaReadType) { s.replicaRead = val } @@ -1589,11 +1589,11 @@ func (s *SessionVars) SetSystemVar(name string, val string) error { s.EnableNoopFuncs = TiDBOptOn(val) case TiDBReplicaRead: if strings.EqualFold(val, "follower") { - s.SetReplicaRead(kv.ReplicaReadFollower) + s.SetReplicaRead(tikvstore.ReplicaReadFollower) } else if strings.EqualFold(val, "leader-and-follower") { - s.SetReplicaRead(kv.ReplicaReadMixed) + s.SetReplicaRead(tikvstore.ReplicaReadMixed) } else if strings.EqualFold(val, "leader") || len(val) == 0 { - s.SetReplicaRead(kv.ReplicaReadLeader) + s.SetReplicaRead(tikvstore.ReplicaReadLeader) } case TiDBAllowRemoveAutoInc: s.AllowRemoveAutoInc = TiDBOptOn(val) @@ -1619,7 +1619,7 @@ func (s *SessionVars) SetSystemVar(name string, val string) error { } } case TiDBStoreLimit: - storeutil.StoreLimit.Store(tidbOptInt64(val, DefTiDBStoreLimit)) + tikvstore.StoreLimit.Store(tidbOptInt64(val, DefTiDBStoreLimit)) case TiDBMetricSchemaStep: s.MetricSchemaStep = tidbOptInt64(val, DefTiDBMetricSchemaStep) case TiDBMetricSchemaRangeDuration: diff --git a/sessionctx/variable/varsutil_test.go b/sessionctx/variable/varsutil_test.go index 3668e16db1c72..1c5e3bf8c9098 100644 --- a/sessionctx/variable/varsutil_test.go +++ b/sessionctx/variable/varsutil_test.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/kv" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/testleak" ) @@ -434,19 +434,19 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) { val, err = GetSessionSystemVar(v, TiDBReplicaRead) c.Assert(err, IsNil) c.Assert(val, Equals, "follower") - c.Assert(v.GetReplicaRead(), Equals, kv.ReplicaReadFollower) + c.Assert(v.GetReplicaRead(), Equals, tikvstore.ReplicaReadFollower) err = SetSessionSystemVar(v, TiDBReplicaRead, types.NewStringDatum("leader")) c.Assert(err, IsNil) val, err = GetSessionSystemVar(v, TiDBReplicaRead) c.Assert(err, IsNil) c.Assert(val, Equals, "leader") - c.Assert(v.GetReplicaRead(), Equals, kv.ReplicaReadLeader) + c.Assert(v.GetReplicaRead(), Equals, tikvstore.ReplicaReadLeader) err = SetSessionSystemVar(v, TiDBReplicaRead, types.NewStringDatum("leader-and-follower")) c.Assert(err, IsNil) val, err = GetSessionSystemVar(v, TiDBReplicaRead) c.Assert(err, IsNil) c.Assert(val, Equals, "leader-and-follower") - c.Assert(v.GetReplicaRead(), Equals, kv.ReplicaReadMixed) + c.Assert(v.GetReplicaRead(), Equals, tikvstore.ReplicaReadMixed) err = SetSessionSystemVar(v, TiDBEnableStmtSummary, types.NewStringDatum("ON")) c.Assert(err, IsNil) diff --git a/store/tikv/kv/kv.go b/store/tikv/kv/kv.go new file mode 100644 index 0000000000000..464c35c8c7413 --- /dev/null +++ b/store/tikv/kv/kv.go @@ -0,0 +1,11 @@ +package kv + +// IsoLevel is the transaction's isolation level. +type IsoLevel int + +const ( + // SI stands for 'snapshot isolation'. + SI IsoLevel = iota + // RC stands for 'read committed'. + RC +) diff --git a/store/tikv/storeutil/store_vars.go b/store/tikv/kv/store_vars.go similarity index 50% rename from store/tikv/storeutil/store_vars.go rename to store/tikv/kv/store_vars.go index dad3f1f6fa6f0..5f65f927bffb9 100644 --- a/store/tikv/storeutil/store_vars.go +++ b/store/tikv/kv/store_vars.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package storeutil +package kv import ( "go.uber.org/atomic" @@ -19,3 +19,21 @@ import ( // StoreLimit will update from config reload and global variable set. var StoreLimit atomic.Int64 + +// ReplicaReadType is the type of replica to read data from +type ReplicaReadType byte + +const ( + // ReplicaReadLeader stands for 'read from leader'. + ReplicaReadLeader ReplicaReadType = 1 << iota + // ReplicaReadFollower stands for 'read from follower'. + ReplicaReadFollower + // ReplicaReadMixed stands for 'read from leader and follower and learner'. + ReplicaReadMixed +) + +// IsFollowerRead checks if leader is going to be used to read data. +func (r ReplicaReadType) IsFollowerRead() bool { + // In some cases the default value is 0, which should be treated as `ReplicaReadLeader`. + return r != ReplicaReadLeader && r != 0 +} diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go index b6e39c8c7b3bc..57c1ee57f2d77 100644 --- a/store/tikv/region_cache.go +++ b/store/tikv/region_cache.go @@ -30,8 +30,9 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/ddl/placement" - "github.com/pingcap/tidb/kv" + tidbkv "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/tikv/config" + "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/logutil" "github.com/pingcap/tidb/store/tikv/metrics" pd "github.com/tikv/pd/client" @@ -567,8 +568,8 @@ func (c *RegionCache) GetTiFlashRPCContext(bo *Backoffer, id RegionVerID) (*RPCC // KeyLocation is the region and range that a key is located. type KeyLocation struct { Region RegionVerID - StartKey kv.Key - EndKey kv.Key + StartKey tidbkv.Key + EndKey tidbkv.Key } // Contains checks if key is in [StartKey, EndKey). diff --git a/store/tikv/region_cache_test.go b/store/tikv/region_cache_test.go index 80b89afc3cc83..c47b154ba0bac 100644 --- a/store/tikv/region_cache_test.go +++ b/store/tikv/region_cache_test.go @@ -27,8 +27,8 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/errorpb" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/store/tikv/kv" pd "github.com/tikv/pd/client" ) diff --git a/store/tikv/region_request.go b/store/tikv/region_request.go index 030b4259789df..59db371394170 100644 --- a/store/tikv/region_request.go +++ b/store/tikv/region_request.go @@ -32,10 +32,10 @@ import ( "github.com/pingcap/kvproto/pkg/coprocessor" "github.com/pingcap/kvproto/pkg/errorpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/tidb/kv" + tidbkv "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/logutil" "github.com/pingcap/tidb/store/tikv/metrics" - "github.com/pingcap/tidb/store/tikv/storeutil" "github.com/pingcap/tidb/store/tikv/tikvrpc" "github.com/pingcap/tidb/store/tikv/util" "github.com/pingcap/tidb/util/execdetails" @@ -184,7 +184,7 @@ func (s *RegionRequestSender) SetRPCError(err error) { // SendReq sends a request to tikv server. func (s *RegionRequestSender) SendReq(bo *Backoffer, req *tikvrpc.Request, regionID RegionVerID, timeout time.Duration) (*tikvrpc.Response, error) { - resp, _, err := s.SendReqCtx(bo, req, regionID, timeout, kv.TiKV) + resp, _, err := s.SendReqCtx(bo, req, regionID, timeout, tidbkv.TiKV) return resp, err } @@ -192,19 +192,19 @@ func (s *RegionRequestSender) getRPCContext( bo *Backoffer, req *tikvrpc.Request, regionID RegionVerID, - sType kv.StoreType, + sType tidbkv.StoreType, opts ...StoreSelectorOption, ) (*RPCContext, error) { switch sType { - case kv.TiKV: + case tidbkv.TiKV: var seed uint32 if req.ReplicaReadSeed != nil { seed = *req.ReplicaReadSeed } return s.regionCache.GetTiKVRPCContext(bo, regionID, req.ReplicaReadType, seed, opts...) - case kv.TiFlash: + case tidbkv.TiFlash: return s.regionCache.GetTiFlashRPCContext(bo, regionID) - case kv.TiDB: + case tidbkv.TiDB: return &RPCContext{Addr: s.storeAddr}, nil default: return nil, errors.Errorf("unsupported storage type: %v", sType) @@ -217,7 +217,7 @@ func (s *RegionRequestSender) SendReqCtx( req *tikvrpc.Request, regionID RegionVerID, timeout time.Duration, - sType kv.StoreType, + sType tidbkv.StoreType, opts ...StoreSelectorOption, ) ( resp *tikvrpc.Response, @@ -253,11 +253,11 @@ func (s *RegionRequestSender) SendReqCtx( bo.vars.Hook("callBackofferHook", bo.vars) } case "requestTiDBStoreError": - if sType == kv.TiDB { + if sType == tidbkv.TiDB { failpoint.Return(nil, nil, ErrTiKVServerTimeout) } case "requestTiFlashError": - if sType == kv.TiFlash { + if sType == tidbkv.TiFlash { failpoint.Return(nil, nil, ErrTiFlashServerTimeout) } } @@ -386,7 +386,7 @@ func (s *RegionRequestSender) sendReqToRegion(bo *Backoffer, rpcCtx *RPCContext, return nil, false, errors.Trace(e) } // judge the store limit switch. - if limit := storeutil.StoreLimit.Load(); limit > 0 { + if limit := kv.StoreLimit.Load(); limit > 0 { if err := s.getStoreToken(rpcCtx.Store, limit); err != nil { return nil, false, err } diff --git a/store/tikv/region_request_test.go b/store/tikv/region_request_test.go index a066c71518a37..7fcfa4d21a855 100644 --- a/store/tikv/region_request_test.go +++ b/store/tikv/region_request_test.go @@ -28,11 +28,11 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/mpp" "github.com/pingcap/kvproto/pkg/tikvpb" + "github.com/pingcap/tidb/store/tikv/kv" - "github.com/pingcap/tidb/kv" + tidbkv "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/mockstore/mocktikv" "github.com/pingcap/tidb/store/tikv/config" - "github.com/pingcap/tidb/store/tikv/storeutil" "github.com/pingcap/tidb/store/tikv/tikvrpc" "google.golang.org/grpc" ) @@ -115,22 +115,22 @@ func (s *testRegionRequestToThreeStoresSuite) TestGetRPCContext(c *C) { var regionID = RegionVerID{s.regionID, 0, 0} req := tikvrpc.NewReplicaReadRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{}, kv.ReplicaReadLeader, &seed) - rpcCtx, err := s.regionRequestSender.getRPCContext(s.bo, req, regionID, kv.TiKV) + rpcCtx, err := s.regionRequestSender.getRPCContext(s.bo, req, regionID, tidbkv.TiKV) c.Assert(err, IsNil) c.Assert(rpcCtx.Peer.Id, Equals, s.leaderPeer) req.ReplicaReadType = kv.ReplicaReadFollower - rpcCtx, err = s.regionRequestSender.getRPCContext(s.bo, req, regionID, kv.TiKV) + rpcCtx, err = s.regionRequestSender.getRPCContext(s.bo, req, regionID, tidbkv.TiKV) c.Assert(err, IsNil) c.Assert(rpcCtx.Peer.Id, Not(Equals), s.leaderPeer) req.ReplicaReadType = kv.ReplicaReadMixed - rpcCtx, err = s.regionRequestSender.getRPCContext(s.bo, req, regionID, kv.TiKV) + rpcCtx, err = s.regionRequestSender.getRPCContext(s.bo, req, regionID, tidbkv.TiKV) c.Assert(err, IsNil) c.Assert(rpcCtx.Peer.Id, Equals, s.leaderPeer) seed = 1 - rpcCtx, err = s.regionRequestSender.getRPCContext(s.bo, req, regionID, kv.TiKV) + rpcCtx, err = s.regionRequestSender.getRPCContext(s.bo, req, regionID, tidbkv.TiKV) c.Assert(err, IsNil) c.Assert(rpcCtx.Peer.Id, Not(Equals), s.leaderPeer) } @@ -169,15 +169,15 @@ func (s *testRegionRequestToThreeStoresSuite) TestStoreTokenLimit(c *C) { region, err := s.cache.LocateRegionByID(s.bo, s.regionID) c.Assert(err, IsNil) c.Assert(region, NotNil) - oldStoreLimit := storeutil.StoreLimit.Load() - storeutil.StoreLimit.Store(500) + oldStoreLimit := kv.StoreLimit.Load() + kv.StoreLimit.Store(500) s.cache.getStoreByStoreID(s.storeIDs[0]).tokenCount.Store(500) // cause there is only one region in this cluster, regionID maps this leader. resp, err := s.regionRequestSender.SendReq(s.bo, req, region.Region, time.Second) c.Assert(err, NotNil) c.Assert(resp, IsNil) c.Assert(err.Error(), Equals, "[tikv:9008]Store token is up to the limit, store id = 1") - storeutil.StoreLimit.Store(oldStoreLimit) + kv.StoreLimit.Store(oldStoreLimit) } func (s *testRegionRequestToSingleStoreSuite) TestOnSendFailedWithStoreRestart(c *C) { @@ -252,12 +252,12 @@ func (s *testRegionRequestToSingleStoreSuite) TestSendReqCtx(c *C) { region, err := s.cache.LocateRegionByID(s.bo, s.region) c.Assert(err, IsNil) c.Assert(region, NotNil) - resp, ctx, err := s.regionRequestSender.SendReqCtx(s.bo, req, region.Region, time.Second, kv.TiKV) + resp, ctx, err := s.regionRequestSender.SendReqCtx(s.bo, req, region.Region, time.Second, tidbkv.TiKV) c.Assert(err, IsNil) c.Assert(resp.Resp, NotNil) c.Assert(ctx, NotNil) req.ReplicaRead = true - resp, ctx, err = s.regionRequestSender.SendReqCtx(s.bo, req, region.Region, time.Second, kv.TiKV) + resp, ctx, err = s.regionRequestSender.SendReqCtx(s.bo, req, region.Region, time.Second, tidbkv.TiKV) c.Assert(err, IsNil) c.Assert(resp.Resp, NotNil) c.Assert(ctx, NotNil) @@ -614,7 +614,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestForwarding(c *C) { Key: []byte("k"), Value: []byte("v1"), }) - resp, ctx, err := s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, kv.TiKV) + resp, ctx, err := s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, tidbkv.TiKV) c.Assert(err, IsNil) regionErr, err := resp.GetRegionError() c.Assert(err, IsNil) @@ -642,7 +642,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestForwarding(c *C) { atomic.StoreUint32(&storeState, uint32(unreachable)) req = tikvrpc.NewRequest(tikvrpc.CmdRawGet, &kvrpcpb.RawGetRequest{Key: []byte("k")}) - resp, ctx, err = s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, kv.TiKV) + resp, ctx, err = s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, tidbkv.TiKV) c.Assert(err, IsNil) regionErr, err = resp.GetRegionError() c.Assert(err, IsNil) @@ -675,7 +675,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestForwarding(c *C) { Key: []byte("k"), Value: []byte("v2"), }) - resp, ctx, err = s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, kv.TiKV) + resp, ctx, err = s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, tidbkv.TiKV) c.Assert(err, IsNil) regionErr, err = resp.GetRegionError() c.Assert(err, IsNil) @@ -695,7 +695,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestForwarding(c *C) { Key: []byte("k"), Value: []byte("v2"), }) - resp, ctx, err = s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, kv.TiKV) + resp, ctx, err = s.regionRequestSender.SendReqCtx(bo, req, loc.Region, time.Second, tidbkv.TiKV) c.Assert(err, IsNil) regionErr, err = resp.GetRegionError() c.Assert(err, IsNil) diff --git a/store/tikv/snapshot.go b/store/tikv/snapshot.go index eba840db15190..c1ecc3ccbac74 100644 --- a/store/tikv/snapshot.go +++ b/store/tikv/snapshot.go @@ -30,7 +30,8 @@ import ( "github.com/pingcap/failpoint" pb "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/tidb/kv" + tidbkv "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/logutil" "github.com/pingcap/tidb/store/tikv/metrics" "github.com/pingcap/tidb/store/tikv/oracle" @@ -47,7 +48,7 @@ const ( maxTimestamp = math.MaxUint64 ) -// KVSnapshot implements the kv.Snapshot interface. +// KVSnapshot implements the tidbkv.Snapshot interface. type KVSnapshot struct { store *KVStore version uint64 @@ -56,7 +57,7 @@ type KVSnapshot struct { notFillCache bool syncLog bool keyOnly bool - vars *kv.Variables + vars *tidbkv.Variables replicaReadSeed uint32 resolvedLocks *util.TSSet @@ -94,7 +95,7 @@ func newTiKVSnapshot(store *KVStore, ts uint64, replicaReadSeed uint32) *KVSnaps store: store, version: ts, priority: pb.CommandPri_Normal, - vars: kv.DefaultVars, + vars: tidbkv.DefaultVars, replicaReadSeed: replicaReadSeed, resolvedLocks: util.NewTSSet(5), } @@ -117,12 +118,12 @@ func (s *KVSnapshot) setSnapshotTS(ts uint64) { // BatchGet gets all the keys' value from kv-server and returns a map contains key/value pairs. // The map will not contain nonexistent keys. -func (s *KVSnapshot) BatchGet(ctx context.Context, keys []kv.Key) (map[string][]byte, error) { +func (s *KVSnapshot) BatchGet(ctx context.Context, keys []tidbkv.Key) (map[string][]byte, error) { // Check the cached value first. m := make(map[string][]byte) s.mu.RLock() if s.mu.cached != nil { - tmp := make([]kv.Key, 0, len(keys)) + tmp := make([]tidbkv.Key, 0, len(keys)) for _, key := range keys { if val, ok := s.mu.cached[string(key)]; ok { atomic.AddInt64(&s.mu.hitCnt, 1) @@ -141,7 +142,7 @@ func (s *KVSnapshot) BatchGet(ctx context.Context, keys []kv.Key) (map[string][] return m, nil } - // We want [][]byte instead of []kv.Key, use some magic to save memory. + // We want [][]byte instead of []tidbkv.Key, use some magic to save memory. bytesKeys := *(*[][]byte)(unsafe.Pointer(&keys)) ctx = context.WithValue(ctx, TxnStartKey, s.version) bo := NewBackofferWithVars(ctx, batchGetMaxBackoff, s.vars) @@ -294,7 +295,7 @@ func (s *KVSnapshot) batchGetSingleRegion(bo *Backoffer, batch batchKeys, collec if len(matchStoreLabels) > 0 { ops = append(ops, WithMatchLabels(matchStoreLabels)) } - resp, _, _, err := cli.SendReqCtx(bo, req, batch.region, ReadTimeoutMedium, kv.TiKV, "", ops...) + resp, _, _, err := cli.SendReqCtx(bo, req, batch.region, ReadTimeoutMedium, tidbkv.TiKV, "", ops...) if err != nil { return errors.Trace(err) @@ -368,7 +369,7 @@ func (s *KVSnapshot) batchGetSingleRegion(bo *Backoffer, batch batchKeys, collec } // Get gets the value for key k from snapshot. -func (s *KVSnapshot) Get(ctx context.Context, k kv.Key) ([]byte, error) { +func (s *KVSnapshot) Get(ctx context.Context, k tidbkv.Key) ([]byte, error) { defer func(start time.Time) { metrics.TxnCmdHistogramWithGet.Observe(time.Since(start).Seconds()) @@ -387,12 +388,12 @@ func (s *KVSnapshot) Get(ctx context.Context, k kv.Key) ([]byte, error) { } if len(val) == 0 { - return nil, kv.ErrNotExist + return nil, tidbkv.ErrNotExist } return val, nil } -func (s *KVSnapshot) get(ctx context.Context, bo *Backoffer, k kv.Key) ([]byte, error) { +func (s *KVSnapshot) get(ctx context.Context, bo *Backoffer, k tidbkv.Key) ([]byte, error) { // Check the cached values first. s.mu.RLock() if s.mu.cached != nil { @@ -457,7 +458,7 @@ func (s *KVSnapshot) get(ctx context.Context, bo *Backoffer, k kv.Key) ([]byte, if err != nil { return nil, errors.Trace(err) } - resp, _, _, err := cli.SendReqCtx(bo, req, loc.Region, readTimeoutShort, kv.TiKV, "", ops...) + resp, _, _, err := cli.SendReqCtx(bo, req, loc.Region, readTimeoutShort, tidbkv.TiKV, "", ops...) if err != nil { return nil, errors.Trace(err) } @@ -536,68 +537,68 @@ func (s *KVSnapshot) mergeExecDetail(detail *pb.ExecDetailsV2) { } // Iter return a list of key-value pair after `k`. -func (s *KVSnapshot) Iter(k kv.Key, upperBound kv.Key) (kv.Iterator, error) { +func (s *KVSnapshot) Iter(k tidbkv.Key, upperBound tidbkv.Key) (tidbkv.Iterator, error) { scanner, err := newScanner(s, k, upperBound, scanBatchSize, false) return scanner, errors.Trace(err) } // IterReverse creates a reversed Iterator positioned on the first entry which key is less than k. -func (s *KVSnapshot) IterReverse(k kv.Key) (kv.Iterator, error) { +func (s *KVSnapshot) IterReverse(k tidbkv.Key) (tidbkv.Iterator, error) { scanner, err := newScanner(s, nil, k, scanBatchSize, true) return scanner, errors.Trace(err) } // SetOption sets an option with a value, when val is nil, uses the default // value of this option. Only ReplicaRead is supported for snapshot -func (s *KVSnapshot) SetOption(opt kv.Option, val interface{}) { +func (s *KVSnapshot) SetOption(opt tidbkv.Option, val interface{}) { switch opt { - case kv.IsolationLevel: + case tidbkv.IsolationLevel: s.isolationLevel = val.(kv.IsoLevel) - case kv.Priority: + case tidbkv.Priority: s.priority = PriorityToPB(val.(int)) - case kv.NotFillCache: + case tidbkv.NotFillCache: s.notFillCache = val.(bool) - case kv.SyncLog: + case tidbkv.SyncLog: s.syncLog = val.(bool) - case kv.KeyOnly: + case tidbkv.KeyOnly: s.keyOnly = val.(bool) - case kv.SnapshotTS: + case tidbkv.SnapshotTS: s.setSnapshotTS(val.(uint64)) - case kv.ReplicaRead: + case tidbkv.ReplicaRead: s.mu.Lock() s.mu.replicaRead = val.(kv.ReplicaReadType) s.mu.Unlock() - case kv.TaskID: + case tidbkv.TaskID: s.mu.Lock() s.mu.taskID = val.(uint64) s.mu.Unlock() - case kv.CollectRuntimeStats: + case tidbkv.CollectRuntimeStats: s.mu.Lock() s.mu.stats = val.(*SnapshotRuntimeStats) s.mu.Unlock() - case kv.SampleStep: + case tidbkv.SampleStep: s.sampleStep = val.(uint32) - case kv.IsStalenessReadOnly: + case tidbkv.IsStalenessReadOnly: s.mu.Lock() s.mu.isStaleness = val.(bool) s.mu.Unlock() - case kv.MatchStoreLabels: + case tidbkv.MatchStoreLabels: s.mu.Lock() s.mu.matchStoreLabels = val.([]*metapb.StoreLabel) s.mu.Unlock() - case kv.TxnScope: + case tidbkv.TxnScope: s.txnScope = val.(string) } } // DelOption deletes an option. -func (s *KVSnapshot) DelOption(opt kv.Option) { +func (s *KVSnapshot) DelOption(opt tidbkv.Option) { switch opt { - case kv.ReplicaRead: + case tidbkv.ReplicaRead: s.mu.Lock() s.mu.replicaRead = kv.ReplicaReadLeader s.mu.Unlock() - case kv.CollectRuntimeStats: + case tidbkv.CollectRuntimeStats: s.mu.Lock() s.mu.stats = nil s.mu.Unlock() @@ -636,7 +637,7 @@ func extractKeyErr(keyErr *pb.KeyError) error { } if keyErr.Retryable != "" { notFoundDetail := prettyLockNotFoundKey(keyErr.GetRetryable()) - return kv.ErrTxnRetryable.GenWithStackByArgs(keyErr.GetRetryable() + " " + notFoundDetail) + return tidbkv.ErrTxnRetryable.GenWithStackByArgs(keyErr.GetRetryable() + " " + notFoundDetail) } if keyErr.Abort != "" { err := errors.Errorf("tikv aborts txn: %s", keyErr.GetAbort()) @@ -684,7 +685,7 @@ func newWriteConflictError(conflict *pb.WriteConflict) error { prettyWriteKey(&buf, conflict.Key) buf.WriteString(" primary=") prettyWriteKey(&buf, conflict.Primary) - return kv.ErrWriteConflict.FastGenByArgs(conflict.StartTs, conflict.ConflictTs, conflict.ConflictCommitTs, buf.String()) + return tidbkv.ErrWriteConflict.FastGenByArgs(conflict.StartTs, conflict.ConflictTs, conflict.ConflictCommitTs, buf.String()) } func prettyWriteKey(buf *bytes.Buffer, key []byte) { diff --git a/store/tikv/tikvrpc/tikvrpc.go b/store/tikv/tikvrpc/tikvrpc.go index bba680274f2c0..698c4341bda60 100644 --- a/store/tikv/tikvrpc/tikvrpc.go +++ b/store/tikv/tikvrpc/tikvrpc.go @@ -27,7 +27,8 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/mpp" "github.com/pingcap/kvproto/pkg/tikvpb" - "github.com/pingcap/tidb/kv" + tidbkv "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/kv" ) // CmdType represents the concrete request type in Request or response type in Response. @@ -175,7 +176,7 @@ type Request struct { kvrpcpb.Context ReplicaReadType kv.ReplicaReadType // different from `kvrpcpb.Context.ReplicaRead` ReplicaReadSeed *uint32 // pointer to follower read seed in snapshot/coprocessor - StoreTp kv.StoreType + StoreTp tidbkv.StoreType // ForwardedHost is the address of a store which will handle the request. It's different from // the address the request sent to. // If it's not empty, the store which receive the request will forward it to From 7d113f0dbe7944d6c95c232cb489de7ea54cc222 Mon Sep 17 00:00:00 2001 From: Andrew Date: Fri, 19 Mar 2021 23:11:36 +0800 Subject: [PATCH 44/44] ddl: add truncate partition all support (#23040) --- ddl/db_test.go | 13 +++++++++++++ ddl/ddl_api.go | 17 ++++++++++++----- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 28 insertions(+), 8 deletions(-) diff --git a/ddl/db_test.go b/ddl/db_test.go index bf9686e14423e..d45a29fb84db5 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -6592,3 +6592,16 @@ func (s *testSerialDBSuite) TestIssue22819(c *C) { _, err := tk1.Exec("commit") c.Assert(err, ErrorMatches, ".*8028.*Information schema is changed during the execution of the statement.*") } + +func (s *testSerialSuite) TestTruncateAllPartitions(c *C) { + tk1 := testkit.NewTestKit(c, s.store) + tk1.MustExec("use test;") + tk1.MustExec("drop table if exists partition_table;") + defer func() { + tk1.MustExec("drop table if exists partition_table;") + }() + tk1.MustExec("create table partition_table (v int) partition by hash (v) partitions 10;") + tk1.MustExec("insert into partition_table values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9),(10);") + tk1.MustExec("alter table partition_table truncate partition all;") + tk1.MustQuery("select count(*) from partition_table;").Check(testkit.Rows("0")) +} diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 57a8f203b6575..7f34f0bddf4df 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -2971,12 +2971,19 @@ func (d *ddl) TruncateTablePartition(ctx sessionctx.Context, ident ast.Ident, sp } pids := make([]int64, len(spec.PartitionNames)) - for i, name := range spec.PartitionNames { - pid, err := tables.FindPartitionByName(meta, name.L) - if err != nil { - return errors.Trace(err) + if spec.OnAllPartitions { + pids = make([]int64, len(meta.GetPartitionInfo().Definitions)) + for i, def := range meta.GetPartitionInfo().Definitions { + pids[i] = def.ID + } + } else { + for i, name := range spec.PartitionNames { + pid, err := tables.FindPartitionByName(meta, name.L) + if err != nil { + return errors.Trace(err) + } + pids[i] = pid } - pids[i] = pid } job := &model.Job{ diff --git a/go.mod b/go.mod index b4c75f5b838d5..2ccd193f0283a 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20210308063835-39b884695fb8 github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8 - github.com/pingcap/parser v0.0.0-20210311132237-9841cb715606 + github.com/pingcap/parser v0.0.0-20210314080929-ed8900c94180 github.com/pingcap/sysutil v0.0.0-20210221112134-a07bda3bde99 github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible github.com/pingcap/tipb v0.0.0-20210309080453-72c4feaa6da7 diff --git a/go.sum b/go.sum index 95676245bcffd..099ab001ce9a5 100644 --- a/go.sum +++ b/go.sum @@ -449,8 +449,8 @@ github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIf github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8 h1:M+DNpOu/I3uDmwee6vcnoPd6GgSMqND4gxvDQ/W584U= github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20210311132237-9841cb715606 h1:/d3CdGzpfCRbdKn38gYH4FGEXgTJCzfI8yroEfKcwbA= -github.com/pingcap/parser v0.0.0-20210311132237-9841cb715606/go.mod h1:GbEr2PgY72/4XqPZzmzstlOU/+il/wrjeTNFs6ihsSE= +github.com/pingcap/parser v0.0.0-20210314080929-ed8900c94180 h1:pnTbjUpOib2uhBfm9R9P6n1huAvl5a9CtMysvI493hQ= +github.com/pingcap/parser v0.0.0-20210314080929-ed8900c94180/go.mod h1:GbEr2PgY72/4XqPZzmzstlOU/+il/wrjeTNFs6ihsSE= github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= github.com/pingcap/sysutil v0.0.0-20210221112134-a07bda3bde99 h1:/ogXgm4guJzow4UafiyXZ6ciAIPzxImaXYiFvTpKzKY= github.com/pingcap/sysutil v0.0.0-20210221112134-a07bda3bde99/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI=