From 5bcc12906af32fe8b876e4644b2928d9e04423b3 Mon Sep 17 00:00:00 2001 From: fzzf678 <108643977+fzzf678@users.noreply.github.com> Date: Wed, 22 Feb 2023 13:27:06 +0800 Subject: [PATCH] This is an automated cherry-pick of #41618 Signed-off-by: ti-chi-bot --- planner/core/BUILD.bazel | 2 + planner/core/plan_cache.go | 51 ++++++++ planner/core/plan_cache_lru.go | 40 ++++++- planner/core/plan_cache_lru_test.go | 150 +++++++++++++++++++++++ planner/core/plan_cache_utils.go | 178 +++++++++++++++++++++++----- sessionctx/BUILD.bazel | 2 +- sessionctx/context.go | 7 +- util/plancache/BUILD.bazel | 9 ++ util/plancache/util.go | 29 +++++ 9 files changed, 433 insertions(+), 35 deletions(-) create mode 100644 util/plancache/BUILD.bazel create mode 100644 util/plancache/util.go diff --git a/planner/core/BUILD.bazel b/planner/core/BUILD.bazel index dcbb25512f458..065380c3e7c4f 100644 --- a/planner/core/BUILD.bazel +++ b/planner/core/BUILD.bazel @@ -130,6 +130,7 @@ go_library( "//util/mock", "//util/paging", "//util/parser", + "//util/plancache", "//util/plancodec", "//util/ranger", "//util/rowcodec", @@ -261,6 +262,7 @@ go_test( "//util/kvcache", "//util/logutil", "//util/mock", + "//util/plancache", "//util/plancodec", "//util/ranger", "//util/set", diff --git a/planner/core/plan_cache.go b/planner/core/plan_cache.go index a2002a6f18010..9d7b3bc7eea5a 100644 --- a/planner/core/plan_cache.go +++ b/planner/core/plan_cache.go @@ -38,6 +38,7 @@ import ( "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/kvcache" "github.com/pingcap/tidb/util/logutil" + utilpc "github.com/pingcap/tidb/util/plancache" "github.com/pingcap/tidb/util/ranger" "go.uber.org/zap" ) @@ -151,14 +152,30 @@ func GetPlanFromSessionPlanCache(ctx context.Context, sctx sessionctx.Context, } } +<<<<<<< HEAD paramTypes := parseParamTypes(sctx, params) if stmtCtx.UseCache && stmtAst.CachedPlan != nil { // for point query plan if plan, names, ok, err := getPointQueryPlan(stmtAst, sessVars, stmtCtx); ok { +======= + if stmtCtx.UseCache && stmtAst.CachedPlan != nil { // special code path for fast point plan + if plan, names, ok, err := getCachedPointPlan(stmtAst, sessVars, stmtCtx); ok { return plan, names, err } } + matchOpts, err := GetMatchOpts(sctx, stmt.PreparedAst.Stmt, params) + if err != nil { + return nil, nil, err + } + if stmtCtx.UseCache { // for non-point plans + if plan, names, ok, err := getCachedPlan(sctx, isNonPrepared, cacheKey, bindSQL, is, stmt, matchOpts); err != nil || ok { +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) + return plan, names, err + } + } + +<<<<<<< HEAD if stmtCtx.UseCache { // for non-point plans if plan, names, ok, err := getGeneralPlan(sctx, isGeneralPlanCache, cacheKey, bindSQL, is, stmt, paramTypes); err != nil || ok { @@ -167,6 +184,9 @@ func GetPlanFromSessionPlanCache(ctx context.Context, sctx sessionctx.Context, } return generateNewPlan(ctx, sctx, isGeneralPlanCache, is, stmt, cacheKey, latestSchemaVersion, paramTypes, bindSQL) +======= + return generateNewPlan(ctx, sctx, isNonPrepared, is, stmt, cacheKey, latestSchemaVersion, bindSQL, matchOpts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } // parseParamTypes get parameters' types in PREPARE statement @@ -212,13 +232,22 @@ func getPointQueryPlan(stmt *ast.Prepared, sessVars *variable.SessionVars, stmtC return plan, names, true, nil } +<<<<<<< HEAD func getGeneralPlan(sctx sessionctx.Context, isGeneralPlanCache bool, cacheKey kvcache.Key, bindSQL string, is infoschema.InfoSchema, stmt *PlanCacheStmt, paramTypes []*types.FieldType) (Plan, +======= +func getCachedPlan(sctx sessionctx.Context, isNonPrepared bool, cacheKey kvcache.Key, bindSQL string, + is infoschema.InfoSchema, stmt *PlanCacheStmt, matchOpts *utilpc.PlanCacheMatchOpts) (Plan, +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) []*types.FieldName, bool, error) { sessVars := sctx.GetSessionVars() stmtCtx := sessVars.StmtCtx +<<<<<<< HEAD candidate, exist := sctx.GetPlanCache(isGeneralPlanCache).Get(cacheKey, paramTypes) +======= + candidate, exist := sctx.GetPlanCache(isNonPrepared).Get(cacheKey, matchOpts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) if !exist { return nil, nil, false, nil } @@ -256,9 +285,15 @@ func getGeneralPlan(sctx sessionctx.Context, isGeneralPlanCache bool, cacheKey k // generateNewPlan call the optimizer to generate a new plan for current statement // and try to add it to cache +<<<<<<< HEAD func generateNewPlan(ctx context.Context, sctx sessionctx.Context, isGeneralPlanCache bool, is infoschema.InfoSchema, stmt *PlanCacheStmt, cacheKey kvcache.Key, latestSchemaVersion int64, paramTypes []*types.FieldType, bindSQL string) (Plan, []*types.FieldName, error) { +======= +func generateNewPlan(ctx context.Context, sctx sessionctx.Context, isNonPrepared bool, is infoschema.InfoSchema, + stmt *PlanCacheStmt, cacheKey kvcache.Key, latestSchemaVersion int64, bindSQL string, + matchOpts *utilpc.PlanCacheMatchOpts) (Plan, []*types.FieldName, error) { +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) stmtAst := stmt.PreparedAst sessVars := sctx.GetSessionVars() stmtCtx := sessVars.StmtCtx @@ -276,7 +311,15 @@ func generateNewPlan(ctx context.Context, sctx sessionctx.Context, isGeneralPlan } // check whether this plan is cacheable. +<<<<<<< HEAD checkPlanCacheability(sctx, p, len(paramTypes)) +======= + if stmtCtx.UseCache { + if cacheable, reason := isPlanCacheable(sctx, p, len(matchOpts.ParamTypes), len(matchOpts.LimitOffsetAndCount)); !cacheable { + stmtCtx.SetSkipPlanCache(errors.Errorf(reason)) + } + } +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) // put this plan into the plan cache. if stmtCtx.UseCache { @@ -289,11 +332,19 @@ func generateNewPlan(ctx context.Context, sctx sessionctx.Context, isGeneralPlan } sessVars.IsolationReadEngines[kv.TiFlash] = struct{}{} } +<<<<<<< HEAD cached := NewPlanCacheValue(p, names, stmtCtx.TblInfo2UnionScan, paramTypes) stmt.NormalizedPlan, stmt.PlanDigest = NormalizePlan(p) stmtCtx.SetPlan(p) stmtCtx.SetPlanDigest(stmt.NormalizedPlan, stmt.PlanDigest) sctx.GetPlanCache(isGeneralPlanCache).Put(cacheKey, cached, paramTypes) +======= + cached := NewPlanCacheValue(p, names, stmtCtx.TblInfo2UnionScan, matchOpts) + stmt.NormalizedPlan, stmt.PlanDigest = NormalizePlan(p) + stmtCtx.SetPlan(p) + stmtCtx.SetPlanDigest(stmt.NormalizedPlan, stmt.PlanDigest) + sctx.GetPlanCache(isNonPrepared).Put(cacheKey, cached, matchOpts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } sessVars.FoundInPlanCache = false return p, names, err diff --git a/planner/core/plan_cache_lru.go b/planner/core/plan_cache_lru.go index 413dd37e8f5a2..5d28bee3fa415 100644 --- a/planner/core/plan_cache_lru.go +++ b/planner/core/plan_cache_lru.go @@ -20,11 +20,11 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/kvcache" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/memory" + utilpc "github.com/pingcap/tidb/util/plancache" ) // planCacheEntry wraps Key and Value. It's the value of list.Element. @@ -94,13 +94,21 @@ func strHashKey(key kvcache.Key, deepCopy bool) string { } // Get tries to find the corresponding value according to the given key. +<<<<<<< HEAD func (l *LRUPlanCache) Get(key kvcache.Key, paramTypes []*types.FieldType) (value kvcache.Value, ok bool) { +======= +func (l *LRUPlanCache) Get(key kvcache.Key, opts *utilpc.PlanCacheMatchOpts) (value kvcache.Value, ok bool) { +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) l.lock.Lock() defer l.lock.Unlock() bucket, bucketExist := l.buckets[strHashKey(key, false)] if bucketExist { +<<<<<<< HEAD if element, exist := l.pickFromBucket(bucket, paramTypes); exist { +======= + if element, exist := l.pickFromBucket(bucket, opts); exist { +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) l.lruList.MoveToFront(element) return element.Value.(*planCacheEntry).PlanValue, true } @@ -109,14 +117,22 @@ func (l *LRUPlanCache) Get(key kvcache.Key, paramTypes []*types.FieldType) (valu } // Put puts the (key, value) pair into the LRU Cache. +<<<<<<< HEAD func (l *LRUPlanCache) Put(key kvcache.Key, value kvcache.Value, paramTypes []*types.FieldType) { +======= +func (l *LRUPlanCache) Put(key kvcache.Key, value kvcache.Value, opts *utilpc.PlanCacheMatchOpts) { +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) l.lock.Lock() defer l.lock.Unlock() hash := strHashKey(key, true) bucket, bucketExist := l.buckets[hash] if bucketExist { +<<<<<<< HEAD if element, exist := l.pickFromBucket(bucket, paramTypes); exist { +======= + if element, exist := l.pickFromBucket(bucket, opts); exist { +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) l.updateInstanceMetric(&planCacheEntry{PlanKey: key, PlanValue: value}, element.Value.(*planCacheEntry)) element.Value.(*planCacheEntry).PlanValue = value l.lruList.MoveToFront(element) @@ -252,12 +268,34 @@ func (l *LRUPlanCache) memoryControl() { } // PickPlanFromBucket pick one plan from bucket +<<<<<<< HEAD func PickPlanFromBucket(bucket map[*list.Element]struct{}, paramTypes []*types.FieldType) (*list.Element, bool) { for k := range bucket { plan := k.Value.(*planCacheEntry).PlanValue.(*PlanCacheValue) if plan.ParamTypes.CheckTypesCompatibility4PC(paramTypes) { return k, true } +======= +func (l *LRUPlanCache) pickFromBucket(bucket map[*list.Element]struct{}, matchOpts *utilpc.PlanCacheMatchOpts) (*list.Element, bool) { + for k := range bucket { + plan := k.Value.(*planCacheEntry).PlanValue.(*PlanCacheValue) + // check param types' compatibility + ok1 := checkTypesCompatibility4PC(plan.matchOpts.ParamTypes, matchOpts.ParamTypes) + if !ok1 { + continue + } + + // check limit offset and key if equal and check switch if enabled + ok2 := checkUint64SliceIfEqual(plan.matchOpts.LimitOffsetAndCount, matchOpts.LimitOffsetAndCount) + if !ok2 { + continue + } + if len(plan.matchOpts.LimitOffsetAndCount) > 0 && !l.sctx.GetSessionVars().EnablePlanCacheForParamLimit { + // offset and key slice matched, but it is a plan with param limit and the switch is disabled + continue + } + return k, true +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } return nil, false } diff --git a/planner/core/plan_cache_lru_test.go b/planner/core/plan_cache_lru_test.go index 74b6b2c92c3bb..5052596d29db0 100644 --- a/planner/core/plan_cache_lru_test.go +++ b/planner/core/plan_cache_lru_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/kvcache" + utilpc "github.com/pingcap/tidb/util/plancache" "github.com/stretchr/testify/require" ) @@ -40,8 +41,13 @@ func randomPlanCacheValue(types []*types.FieldType) *PlanCacheValue { &PhysicalIndexLookUpReader{}, &PhysicalApply{}, &PhysicalApply{}, &PhysicalLimit{}} random := rand.New(rand.NewSource(time.Now().UnixNano())) return &PlanCacheValue{ +<<<<<<< HEAD Plan: plans[random.Int()%len(plans)], ParamTypes: types, +======= + Plan: plans[random.Int()%len(plans)], + matchOpts: &utilpc.PlanCacheMatchOpts{ParamTypes: types}, +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } } @@ -69,10 +75,21 @@ func TestLRUPCPut(t *testing.T) { // one key corresponding to multi values for i := 0; i < 5; i++ { keys[i] = &planCacheKey{database: strconv.FormatInt(int64(1), 10)} +<<<<<<< HEAD vals[i] = &PlanCacheValue{ ParamTypes: pTypes[i], } lru.Put(keys[i], vals[i], pTypes[i]) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[i], + LimitOffsetAndCount: limitParams[i], + } + vals[i] = &PlanCacheValue{ + matchOpts: opts, + } + lru.Put(keys[i], vals[i], opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } require.Equal(t, lru.size, lru.capacity) require.Equal(t, uint(3), lru.size) @@ -103,7 +120,15 @@ func TestLRUPCPut(t *testing.T) { bucket, exist := lru.buckets[string(hack.String(keys[i].Hash()))] require.True(t, exist) +<<<<<<< HEAD element, exist := lru.pickFromBucket(bucket, pTypes[i]) +======= + matchOpts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[i], + LimitOffsetAndCount: limitParams[i], + } + element, exist := lru.pickFromBucket(bucket, matchOpts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) require.NotNil(t, element) require.True(t, exist) require.Equal(t, root, element) @@ -134,19 +159,46 @@ func TestLRUPCGet(t *testing.T) { // 5 bucket for i := 0; i < 5; i++ { keys[i] = &planCacheKey{database: strconv.FormatInt(int64(i%4), 10)} +<<<<<<< HEAD vals[i] = &PlanCacheValue{ParamTypes: pTypes[i]} lru.Put(keys[i], vals[i], pTypes[i]) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[i], + LimitOffsetAndCount: limitParams[i], + } + vals[i] = &PlanCacheValue{ + matchOpts: opts, + } + lru.Put(keys[i], vals[i], opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } // test for non-existent elements for i := 0; i < 2; i++ { +<<<<<<< HEAD value, exists := lru.Get(keys[i], pTypes[i]) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[i], + LimitOffsetAndCount: limitParams[i], + } + value, exists := lru.Get(keys[i], opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) require.False(t, exists) require.Nil(t, value) } for i := 2; i < 5; i++ { +<<<<<<< HEAD value, exists := lru.Get(keys[i], pTypes[i]) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[i], + LimitOffsetAndCount: limitParams[i], + } + value, exists := lru.Get(keys[i], opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) require.True(t, exists) require.NotNil(t, value) require.Equal(t, vals[i], value) @@ -177,21 +229,53 @@ func TestLRUPCDelete(t *testing.T) { } for i := 0; i < 3; i++ { keys[i] = &planCacheKey{database: strconv.FormatInt(int64(i), 10)} +<<<<<<< HEAD vals[i] = &PlanCacheValue{ParamTypes: pTypes[i]} lru.Put(keys[i], vals[i], pTypes[i]) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[i], + LimitOffsetAndCount: limitParams[i], + } + vals[i] = &PlanCacheValue{ + matchOpts: opts, + } + lru.Put(keys[i], vals[i], opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } require.Equal(t, 3, int(lru.size)) lru.Delete(keys[1]) +<<<<<<< HEAD value, exists := lru.Get(keys[1], pTypes[1]) +======= + + value, exists := lru.Get(keys[1], &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[1], + LimitOffsetAndCount: limitParams[1], + }) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) require.False(t, exists) require.Nil(t, value) require.Equal(t, 2, int(lru.size)) +<<<<<<< HEAD _, exists = lru.Get(keys[0], pTypes[0]) require.True(t, exists) _, exists = lru.Get(keys[2], pTypes[2]) +======= + _, exists = lru.Get(keys[0], &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[0], + LimitOffsetAndCount: limitParams[0], + }) + require.True(t, exists) + + _, exists = lru.Get(keys[2], &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[2], + LimitOffsetAndCount: limitParams[2], + }) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) require.True(t, exists) } @@ -206,15 +290,34 @@ func TestLRUPCDeleteAll(t *testing.T) { } for i := 0; i < 3; i++ { keys[i] = &planCacheKey{database: strconv.FormatInt(int64(i), 10)} +<<<<<<< HEAD vals[i] = &PlanCacheValue{ParamTypes: pTypes[i]} lru.Put(keys[i], vals[i], pTypes[i]) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[i], + LimitOffsetAndCount: []uint64{}, + } + vals[i] = &PlanCacheValue{ + matchOpts: opts, + } + lru.Put(keys[i], vals[i], opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } require.Equal(t, 3, int(lru.size)) lru.DeleteAll() for i := 0; i < 3; i++ { +<<<<<<< HEAD value, exists := lru.Get(keys[i], pTypes[i]) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[i], + LimitOffsetAndCount: []uint64{}, + } + value, exists := lru.Get(keys[i], opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) require.False(t, exists) require.Nil(t, value) require.Equal(t, 0, int(lru.size)) @@ -241,8 +344,19 @@ func TestLRUPCSetCapacity(t *testing.T) { // one key corresponding to multi values for i := 0; i < 5; i++ { keys[i] = &planCacheKey{database: strconv.FormatInt(int64(1), 10)} +<<<<<<< HEAD vals[i] = &PlanCacheValue{ParamTypes: pTypes[i]} lru.Put(keys[i], vals[i], pTypes[i]) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[i], + LimitOffsetAndCount: []uint64{}, + } + vals[i] = &PlanCacheValue{ + matchOpts: opts, + } + lru.Put(keys[i], vals[i], opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } require.Equal(t, lru.size, lru.capacity) require.Equal(t, uint(5), lru.size) @@ -289,10 +403,21 @@ func TestIssue37914(t *testing.T) { pTypes := []*types.FieldType{types.NewFieldType(mysql.TypeFloat), types.NewFieldType(mysql.TypeDouble)} key := &planCacheKey{database: strconv.FormatInt(int64(1), 10)} +<<<<<<< HEAD val := &PlanCacheValue{ParamTypes: pTypes} require.NotPanics(t, func() { lru.Put(key, val, pTypes) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes, + LimitOffsetAndCount: []uint64{}, + } + val := &PlanCacheValue{matchOpts: opts} + + require.NotPanics(t, func() { + lru.Put(key, val, opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) }) } @@ -312,8 +437,17 @@ func TestIssue38244(t *testing.T) { // one key corresponding to multi values for i := 0; i < 5; i++ { keys[i] = &planCacheKey{database: strconv.FormatInt(int64(i), 10)} +<<<<<<< HEAD vals[i] = &PlanCacheValue{ParamTypes: pTypes[i]} lru.Put(keys[i], vals[i], pTypes[i]) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes[i], + LimitOffsetAndCount: []uint64{}, + } + vals[i] = &PlanCacheValue{matchOpts: opts} + lru.Put(keys[i], vals[i], opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } require.Equal(t, lru.size, lru.capacity) require.Equal(t, uint(3), lru.size) @@ -334,7 +468,15 @@ func TestLRUPlanCacheMemoryUsage(t *testing.T) { for i := 0; i < 3; i++ { k := randomPlanCacheKey() v := randomPlanCacheValue(pTypes) +<<<<<<< HEAD lru.Put(k, v, pTypes) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes, + LimitOffsetAndCount: []uint64{}, + } + lru.Put(k, v, opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) res += k.MemoryUsage() + v.MemoryUsage() require.Equal(t, lru.MemoryUsage(), res) } @@ -342,7 +484,15 @@ func TestLRUPlanCacheMemoryUsage(t *testing.T) { p := &PhysicalTableScan{} k := &planCacheKey{database: "3"} v := &PlanCacheValue{Plan: p} +<<<<<<< HEAD lru.Put(k, v, pTypes) +======= + opts := &utilpc.PlanCacheMatchOpts{ + ParamTypes: pTypes, + LimitOffsetAndCount: []uint64{}, + } + lru.Put(k, v, opts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) res += k.MemoryUsage() + v.MemoryUsage() for kk, vv := range evict { res -= kk.(*planCacheKey).MemoryUsage() + vv.(*PlanCacheValue).MemoryUsage() diff --git a/planner/core/plan_cache_utils.go b/planner/core/plan_cache_utils.go index 4229e2b134f06..3e9325a5fc63f 100644 --- a/planner/core/plan_cache_utils.go +++ b/planner/core/plan_cache_utils.go @@ -22,6 +22,7 @@ import ( "unsafe" "github.com/pingcap/errors" + "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" @@ -35,6 +36,7 @@ import ( "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/hint" "github.com/pingcap/tidb/util/kvcache" + utilpc "github.com/pingcap/tidb/util/plancache" "github.com/pingcap/tidb/util/size" atomic2 "go.uber.org/atomic" "golang.org/x/exp/slices" @@ -311,36 +313,6 @@ func NewPlanCacheKey(sessionVars *variable.SessionVars, stmtText, stmtDB string, return key, nil } -// FieldSlice is the slice of the types.FieldType -type FieldSlice []*types.FieldType - -// CheckTypesCompatibility4PC compares FieldSlice with []*types.FieldType -// Currently this is only used in plan cache to check whether the types of parameters are compatible. -// If the types of parameters are compatible, we can use the cached plan. -func (s FieldSlice) CheckTypesCompatibility4PC(tps []*types.FieldType) bool { - if len(s) != len(tps) { - return false - } - for i := range tps { - // We only use part of logic of `func (ft *FieldType) Equal(other *FieldType)` here because (1) only numeric and - // string types will show up here, and (2) we don't need flen and decimal to be matched exactly to use plan cache - tpEqual := (s[i].GetType() == tps[i].GetType()) || - (s[i].GetType() == mysql.TypeVarchar && tps[i].GetType() == mysql.TypeVarString) || - (s[i].GetType() == mysql.TypeVarString && tps[i].GetType() == mysql.TypeVarchar) - if !tpEqual || s[i].GetCharset() != tps[i].GetCharset() || s[i].GetCollate() != tps[i].GetCollate() || - (s[i].EvalType() == types.ETInt && mysql.HasUnsignedFlag(s[i].GetFlag()) != mysql.HasUnsignedFlag(tps[i].GetFlag())) { - return false - } - // When the type is decimal, we should compare the Flen and Decimal. - // We can only use the plan when both Flen and Decimal should less equal than the cached one. - // We assume here that there is no correctness problem when the precision of the parameters is less than the precision of the parameters in the cache. - if tpEqual && s[i].GetType() == mysql.TypeNewDecimal && !(s[i].GetFlen() >= tps[i].GetFlen() && s[i].GetDecimal() >= tps[i].GetDecimal()) { - return false - } - } - return true -} - // PlanCacheValue stores the cached Statement and StmtNode. type PlanCacheValue struct { Plan Plan @@ -348,10 +320,20 @@ type PlanCacheValue struct { TblInfo2UnionScan map[*model.TableInfo]bool ParamTypes FieldSlice memoryUsage int64 +<<<<<<< HEAD } func (v *PlanCacheValue) varTypesUnchanged(txtVarTps []*types.FieldType) bool { return v.ParamTypes.CheckTypesCompatibility4PC(txtVarTps) +======= + + // matchOpts stores some fields help to choose a suitable plan + matchOpts *utilpc.PlanCacheMatchOpts +} + +func (v *PlanCacheValue) varTypesUnchanged(txtVarTps []*types.FieldType) bool { + return checkTypesCompatibility4PC(v.matchOpts.ParamTypes, txtVarTps) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } // unKnownMemoryUsage represent the memory usage of uncounted structure, maybe need implement later @@ -380,35 +362,56 @@ func (v *PlanCacheValue) MemoryUsage() (sum int64) { sum = unKnownMemoryUsage } +<<<<<<< HEAD sum += size.SizeOfInterface + size.SizeOfSlice*2 + int64(cap(v.OutPutNames)+cap(v.ParamTypes))*size.SizeOfPointer + +======= + sum += size.SizeOfInterface + size.SizeOfSlice*2 + int64(cap(v.OutPutNames))*size.SizeOfPointer + +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) size.SizeOfMap + int64(len(v.TblInfo2UnionScan))*(size.SizeOfPointer+size.SizeOfBool) + size.SizeOfInt64*2 + if v.matchOpts != nil { + sum += int64(cap(v.matchOpts.ParamTypes)) * size.SizeOfPointer + for _, ft := range v.matchOpts.ParamTypes { + sum += ft.MemoryUsage() + } + } for _, name := range v.OutPutNames { sum += name.MemoryUsage() } +<<<<<<< HEAD for _, ft := range v.ParamTypes { sum += ft.MemoryUsage() } +======= +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) v.memoryUsage = sum return } // NewPlanCacheValue creates a SQLCacheValue. func NewPlanCacheValue(plan Plan, names []*types.FieldName, srcMap map[*model.TableInfo]bool, +<<<<<<< HEAD paramTypes []*types.FieldType) *PlanCacheValue { +======= + matchOpts *utilpc.PlanCacheMatchOpts) *PlanCacheValue { +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) dstMap := make(map[*model.TableInfo]bool) for k, v := range srcMap { dstMap[k] = v } - userParamTypes := make([]*types.FieldType, len(paramTypes)) - for i, tp := range paramTypes { + userParamTypes := make([]*types.FieldType, len(matchOpts.ParamTypes)) + for i, tp := range matchOpts.ParamTypes { userParamTypes[i] = tp.Clone() } return &PlanCacheValue{ Plan: plan, OutPutNames: names, TblInfo2UnionScan: dstMap, +<<<<<<< HEAD ParamTypes: userParamTypes, +======= + matchOpts: matchOpts, +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) } } @@ -451,3 +454,114 @@ func GetPreparedStmt(stmt *ast.ExecuteStmt, vars *variable.SessionVars) (*PlanCa } return nil, ErrStmtNotFound } +<<<<<<< HEAD +======= + +type limitExtractor struct { + cacheable bool // For safety considerations, check if limit count less than 10000 + offsetAndCount []uint64 + unCacheableReason string + paramTypeErr error + hasSubQuery bool +} + +// Enter implements Visitor interface. +func (checker *limitExtractor) Enter(in ast.Node) (out ast.Node, skipChildren bool) { + switch node := in.(type) { + case *ast.Limit: + if node.Count != nil { + if count, isParamMarker := node.Count.(*driver.ParamMarkerExpr); isParamMarker { + typeExpected, val := CheckParamTypeInt64orUint64(count) + if typeExpected { + if val > 10000 { + checker.cacheable = false + checker.unCacheableReason = "limit count more than 10000" + return in, true + } + checker.offsetAndCount = append(checker.offsetAndCount, val) + } else { + checker.paramTypeErr = ErrWrongArguments.GenWithStackByArgs("LIMIT") + return in, true + } + } + } + if node.Offset != nil { + if offset, isParamMarker := node.Offset.(*driver.ParamMarkerExpr); isParamMarker { + typeExpected, val := CheckParamTypeInt64orUint64(offset) + if typeExpected { + checker.offsetAndCount = append(checker.offsetAndCount, val) + } else { + checker.paramTypeErr = ErrWrongArguments.GenWithStackByArgs("LIMIT") + return in, true + } + } + } + } + return in, false +} + +// Leave implements Visitor interface. +func (checker *limitExtractor) Leave(in ast.Node) (out ast.Node, ok bool) { + return in, checker.cacheable +} + +// ExtractLimitFromAst extract limit offset and count from ast for plan cache key encode +func ExtractLimitFromAst(node ast.Node, sctx sessionctx.Context) ([]uint64, error) { + if node == nil { + return nil, nil + } + checker := limitExtractor{ + cacheable: true, + offsetAndCount: []uint64{}, + } + node.Accept(&checker) + if checker.paramTypeErr != nil { + return nil, checker.paramTypeErr + } + if sctx != nil && !checker.cacheable { + sctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.New("skip plan-cache: " + checker.unCacheableReason)) + } + return checker.offsetAndCount, nil +} + +// GetMatchOpts get options to fetch plan or generate new plan +func GetMatchOpts(sctx sessionctx.Context, node ast.Node, params []expression.Expression) (*utilpc.PlanCacheMatchOpts, error) { + limitParams, err := ExtractLimitFromAst(node, sctx) + if err != nil { + return nil, err + } + paramTypes := parseParamTypes(sctx, params) + return &utilpc.PlanCacheMatchOpts{ + ParamTypes: paramTypes, + LimitOffsetAndCount: limitParams, + }, nil +} + +// CheckTypesCompatibility4PC compares FieldSlice with []*types.FieldType +// Currently this is only used in plan cache to check whether the types of parameters are compatible. +// If the types of parameters are compatible, we can use the cached plan. +// tpsExpected is types from cached plan +func checkTypesCompatibility4PC(tpsExpected, tpsActual []*types.FieldType) bool { + if len(tpsExpected) != len(tpsActual) { + return false + } + for i := range tpsActual { + // We only use part of logic of `func (ft *FieldType) Equal(other *FieldType)` here because (1) only numeric and + // string types will show up here, and (2) we don't need flen and decimal to be matched exactly to use plan cache + tpEqual := (tpsExpected[i].GetType() == tpsActual[i].GetType()) || + (tpsExpected[i].GetType() == mysql.TypeVarchar && tpsActual[i].GetType() == mysql.TypeVarString) || + (tpsExpected[i].GetType() == mysql.TypeVarString && tpsActual[i].GetType() == mysql.TypeVarchar) + if !tpEqual || tpsExpected[i].GetCharset() != tpsActual[i].GetCharset() || tpsExpected[i].GetCollate() != tpsActual[i].GetCollate() || + (tpsExpected[i].EvalType() == types.ETInt && mysql.HasUnsignedFlag(tpsExpected[i].GetFlag()) != mysql.HasUnsignedFlag(tpsActual[i].GetFlag())) { + return false + } + // When the type is decimal, we should compare the Flen and Decimal. + // We can only use the plan when both Flen and Decimal should less equal than the cached one. + // We assume here that there is no correctness problem when the precision of the parameters is less than the precision of the parameters in the cache. + if tpEqual && tpsExpected[i].GetType() == mysql.TypeNewDecimal && !(tpsExpected[i].GetFlen() >= tpsActual[i].GetFlen() && tpsExpected[i].GetDecimal() >= tpsActual[i].GetDecimal()) { + return false + } + } + return true +} +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) diff --git a/sessionctx/BUILD.bazel b/sessionctx/BUILD.bazel index 800001fd426b3..c5c04c32b0e77 100644 --- a/sessionctx/BUILD.bazel +++ b/sessionctx/BUILD.bazel @@ -12,9 +12,9 @@ go_library( "//parser/model", "//sessionctx/sessionstates", "//sessionctx/variable", - "//types", "//util", "//util/kvcache", + "//util/plancache", "//util/sli", "//util/topsql/stmtstats", "@com_github_pingcap_errors//:errors", diff --git a/sessionctx/context.go b/sessionctx/context.go index f39d3a82a8f38..4ce18d7154137 100644 --- a/sessionctx/context.go +++ b/sessionctx/context.go @@ -27,9 +27,9 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx/sessionstates" "github.com/pingcap/tidb/sessionctx/variable" - "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/kvcache" + utilpc "github.com/pingcap/tidb/util/plancache" "github.com/pingcap/tidb/util/sli" "github.com/pingcap/tidb/util/topsql/stmtstats" "github.com/pingcap/tipb/go-binlog" @@ -54,8 +54,13 @@ type SessionStatesHandler interface { // PlanCache is an interface for prepare and general plan cache type PlanCache interface { +<<<<<<< HEAD Get(key kvcache.Key, paramTypes []*types.FieldType) (value kvcache.Value, ok bool) Put(key kvcache.Key, value kvcache.Value, paramTypes []*types.FieldType) +======= + Get(key kvcache.Key, opts *utilpc.PlanCacheMatchOpts) (value kvcache.Value, ok bool) + Put(key kvcache.Key, value kvcache.Value, opts *utilpc.PlanCacheMatchOpts) +>>>>>>> 12107e33d3 (planner: refactor plan cache LRU code (#41618)) Delete(key kvcache.Key) DeleteAll() Size() int diff --git a/util/plancache/BUILD.bazel b/util/plancache/BUILD.bazel new file mode 100644 index 0000000000000..0c9f0db3866a0 --- /dev/null +++ b/util/plancache/BUILD.bazel @@ -0,0 +1,9 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "plancache", + srcs = ["util.go"], + importpath = "github.com/pingcap/tidb/util/plancache", + visibility = ["//visibility:public"], + deps = ["//types"], +) diff --git a/util/plancache/util.go b/util/plancache/util.go new file mode 100644 index 0000000000000..8d5be2d7b7fcd --- /dev/null +++ b/util/plancache/util.go @@ -0,0 +1,29 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "github.com/pingcap/tidb/types" +) + +// PlanCacheMatchOpts store some property used to fetch plan from plan cache +// The structure set here is to avoid import cycle +type PlanCacheMatchOpts struct { + // paramTypes stores all parameters' FieldType, some different parameters may share same plan + ParamTypes []*types.FieldType + // limitOffsetAndCount stores all the offset and key parameters extract from limit statement + // only used for cache and pick plan with parameters in limit + LimitOffsetAndCount []uint64 +}