From b4b22e3ac3983d48167a34c845efc29cddc78dfe Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Fri, 29 Mar 2024 10:51:34 +0800 Subject: [PATCH] fmt Signed-off-by: AilinKid <314806019@qq.com> --- pkg/planner/core/common_plans.go | 59 +- pkg/planner/core/encode.go | 58 +- pkg/planner/core/exhaust_physical_plans.go | 30 +- pkg/planner/core/find_best_task.go | 254 ++++---- pkg/planner/core/hint_utils.go | 14 +- pkg/planner/core/optimizer.go | 19 +- pkg/planner/core/physical_plans.go | 127 ++-- pkg/planner/core/plan.go | 235 ++----- pkg/planner/core/plan_base.go | 133 ++++ pkg/planner/core/plan_cache.go | 1 - pkg/planner/core/plan_cost_detail.go | 25 +- pkg/planner/core/plan_cost_ver1.go | 254 ++++---- pkg/planner/core/plan_cost_ver2.go | 577 ++++++++---------- pkg/planner/core/plan_cost_ver2_test.go | 5 +- pkg/planner/core/planbuilder.go | 6 +- pkg/planner/core/point_get_plan.go | 29 +- .../core/rule_aggregation_elimination.go | 12 +- .../core/rule_aggregation_push_down.go | 15 +- .../core/rule_aggregation_skew_rewrite.go | 8 +- pkg/planner/core/rule_build_key_info.go | 4 +- pkg/planner/core/rule_collect_plan_stats.go | 6 +- pkg/planner/core/rule_column_pruning.go | 53 +- pkg/planner/core/rule_constant_propagation.go | 12 +- pkg/planner/core/rule_decorrelate.go | 24 +- .../core/rule_derive_topn_from_window.go | 9 +- pkg/planner/core/rule_eliminate_projection.go | 10 +- .../core/rule_generate_column_substitute.go | 14 +- pkg/planner/core/rule_join_elimination.go | 12 +- pkg/planner/core/rule_join_reorder.go | 8 +- pkg/planner/core/rule_max_min_eliminate.go | 15 +- pkg/planner/core/rule_partition_processor.go | 20 +- pkg/planner/core/rule_predicate_push_down.go | 45 +- .../core/rule_predicate_simplification.go | 8 +- pkg/planner/core/rule_push_down_sequence.go | 4 +- .../core/rule_resolve_grouping_expand.go | 7 +- pkg/planner/core/rule_result_reorder.go | 3 +- pkg/planner/core/rule_semi_join_rewrite.go | 4 +- pkg/planner/core/rule_topn_push_down.go | 35 +- pkg/planner/core/task.go | 422 ++++++------- pkg/planner/core/task_base.go | 77 +++ pkg/planner/core/util.go | 4 +- pkg/planner/util/coreusage/costMisc.go | 128 ++++ pkg/planner/util/{ => coreusage}/optTracer.go | 64 +- 43 files changed, 1473 insertions(+), 1376 deletions(-) create mode 100644 pkg/planner/core/plan_base.go create mode 100644 pkg/planner/core/task_base.go create mode 100644 pkg/planner/util/coreusage/costMisc.go rename pkg/planner/util/{ => coreusage}/optTracer.go (58%) diff --git a/pkg/planner/core/common_plans.go b/pkg/planner/core/common_plans.go index 10be131af0d29..38e4c42d43dd8 100644 --- a/pkg/planner/core/common_plans.go +++ b/pkg/planner/core/common_plans.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/property" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/table" @@ -858,16 +859,16 @@ func (e *Explain) RenderResult() error { pp, ok := e.TargetPlan.(PhysicalPlan) if ok { if _, err := getPlanCost(pp, property.RootTaskType, - NewDefaultPlanCostOption().WithCostFlag(CostFlagRecalculate|CostFlagUseTrueCardinality|CostFlagTrace)); err != nil { + coreusage.NewDefaultPlanCostOption().WithCostFlag(coreusage.CostFlagRecalculate|coreusage.CostFlagUseTrueCardinality|coreusage.CostFlagTrace)); err != nil { return err } if pp.SCtx().GetSessionVars().CostModelVersion == modelVer2 { // output cost formula and factor costs through warning under model ver2 and true_card_cost mode for cost calibration. - cost, _ := pp.getPlanCostVer2(property.RootTaskType, NewDefaultPlanCostOption()) - if cost.trace != nil { - trace := cost.trace - pp.SCtx().GetSessionVars().StmtCtx.AppendWarning(errors.NewNoStackErrorf("cost formula: %v", trace.formula)) - data, err := json.Marshal(trace.factorCosts) + cost, _ := pp.GetPlanCostVer2(property.RootTaskType, coreusage.NewDefaultPlanCostOption()) + if cost.GetTrace() != nil { + trace := cost.GetTrace() + pp.SCtx().GetSessionVars().StmtCtx.AppendWarning(errors.NewNoStackErrorf("cost formula: %v", trace.GetFormula())) + data, err := json.Marshal(trace.GetFactorCosts()) if err != nil { pp.SCtx().GetSessionVars().StmtCtx.AppendWarning(errors.NewNoStackErrorf("marshal factor costs error %v", err)) } @@ -877,7 +878,7 @@ func (e *Explain) RenderResult() error { factors := defaultVer2Factors.tolist() weights := make(map[string]float64) for _, factor := range factors { - if factorCost, ok := trace.factorCosts[factor.Name]; ok && factor.Value > 0 { + if factorCost, ok := trace.GetFactorCosts()[factor.Name]; ok && factor.Value > 0 { weights[factor.Name] = factorCost / factor.Value // cost = [factors] * [weights] } } @@ -897,7 +898,7 @@ func (e *Explain) RenderResult() error { if pp, ok := e.TargetPlan.(PhysicalPlan); ok { // trigger getPlanCost again with CostFlagTrace to record all cost formulas if _, err := getPlanCost(pp, property.RootTaskType, - NewDefaultPlanCostOption().WithCostFlag(CostFlagRecalculate|CostFlagTrace)); err != nil { + coreusage.NewDefaultPlanCostOption().WithCostFlag(coreusage.CostFlagRecalculate|coreusage.CostFlagTrace)); err != nil { return err } } @@ -1142,15 +1143,15 @@ func (e *Explain) getOperatorInfo(p Plan, id string) (estRows, estCost, costForm estCost = "N/A" costFormula = "N/A" if isPhysicalPlan { - estRows = strconv.FormatFloat(pp.getEstRowCountForDisplay(), 'f', 2, 64) + estRows = strconv.FormatFloat(pp.GetEstRowCountForDisplay(), 'f', 2, 64) if e.SCtx() != nil && e.SCtx().GetSessionVars().CostModelVersion == modelVer2 { - costVer2, _ := pp.getPlanCostVer2(property.RootTaskType, NewDefaultPlanCostOption()) - estCost = strconv.FormatFloat(costVer2.cost, 'f', 2, 64) - if costVer2.trace != nil { - costFormula = costVer2.trace.formula + costVer2, _ := pp.GetPlanCostVer2(property.RootTaskType, coreusage.NewDefaultPlanCostOption()) + estCost = strconv.FormatFloat(costVer2.GetCost(), 'f', 2, 64) + if costVer2.GetTrace() != nil { + costFormula = costVer2.GetTrace().GetFormula() } } else { - planCost, _ := getPlanCost(pp, property.RootTaskType, NewDefaultPlanCostOption()) + planCost, _ := getPlanCost(pp, property.RootTaskType, coreusage.NewDefaultPlanCostOption()) estCost = strconv.FormatFloat(planCost, 'f', 2, 64) } } else if si := p.StatsInfo(); si != nil { @@ -1219,9 +1220,9 @@ func binaryOpTreeFromFlatOps(explainCtx PlanContext, ops FlatPlanTree) *tipb.Exp return &s[0] } -func binaryOpFromFlatOp(explainCtx PlanContext, op *FlatOperator, out *tipb.ExplainOperator) { - out.Name = op.Origin.ExplainID().String() - switch op.Label { +func binaryOpFromFlatOp(explainCtx PlanContext, fop *FlatOperator, out *tipb.ExplainOperator) { + out.Name = fop.Origin.ExplainID().String() + switch fop.Label { case BuildSide: out.Labels = []tipb.OperatorLabel{tipb.OperatorLabel_buildSide} case ProbeSide: @@ -1231,7 +1232,7 @@ func binaryOpFromFlatOp(explainCtx PlanContext, op *FlatOperator, out *tipb.Expl case RecursivePart: out.Labels = []tipb.OperatorLabel{tipb.OperatorLabel_recursivePart} } - switch op.StoreType { + switch fop.StoreType { case kv.TiDB: out.StoreType = tipb.StoreType_tidb case kv.TiKV: @@ -1239,10 +1240,10 @@ func binaryOpFromFlatOp(explainCtx PlanContext, op *FlatOperator, out *tipb.Expl case kv.TiFlash: out.StoreType = tipb.StoreType_tiflash } - if op.IsRoot { + if fop.IsRoot { out.TaskType = tipb.TaskType_root } else { - switch op.ReqType { + switch fop.ReqType { case Cop: out.TaskType = tipb.TaskType_cop case BatchCop: @@ -1252,16 +1253,16 @@ func binaryOpFromFlatOp(explainCtx PlanContext, op *FlatOperator, out *tipb.Expl } } - if op.IsPhysicalPlan { - p := op.Origin.(PhysicalPlan) - out.Cost, _ = getPlanCost(p, property.RootTaskType, NewDefaultPlanCostOption()) - out.EstRows = p.getEstRowCountForDisplay() - } else if statsInfo := op.Origin.StatsInfo(); statsInfo != nil { + if fop.IsPhysicalPlan { + p := fop.Origin.(PhysicalPlan) + out.Cost, _ = getPlanCost(p, property.RootTaskType, coreusage.NewDefaultPlanCostOption()) + out.EstRows = p.GetEstRowCountForDisplay() + } else if statsInfo := fop.Origin.StatsInfo(); statsInfo != nil { out.EstRows = statsInfo.RowCount } // Runtime info - rootStats, copStats, memTracker, diskTracker := getRuntimeInfo(explainCtx, op.Origin, nil) + rootStats, copStats, memTracker, diskTracker := getRuntimeInfo(explainCtx, fop.Origin, nil) if rootStats != nil { basic, groups := rootStats.MergeStats() if basic != nil { @@ -1291,14 +1292,14 @@ func binaryOpFromFlatOp(explainCtx PlanContext, op *FlatOperator, out *tipb.Expl } // Operator info - if plan, ok := op.Origin.(dataAccesser); ok { + if plan, ok := fop.Origin.(dataAccesser); ok { out.OperatorInfo = plan.OperatorInfo(false) } else { - out.OperatorInfo = op.Origin.ExplainInfo() + out.OperatorInfo = fop.Origin.ExplainInfo() } // Access object - switch p := op.Origin.(type) { + switch p := fop.Origin.(type) { case dataAccesser: ao := p.AccessObject() if ao != nil { diff --git a/pkg/planner/core/encode.go b/pkg/planner/core/encode.go index 46caf11d9188c..b11d8fff4da11 100644 --- a/pkg/planner/core/encode.go +++ b/pkg/planner/core/encode.go @@ -61,26 +61,26 @@ func EncodeFlatPlan(flat *FlatPhysicalPlan) string { buf.Grow(80 * opCount) encodeFlatPlanTree(flat.Main, 0, &buf) for _, cte := range flat.CTEs { - op := cte[0] + fop := cte[0] cteDef := cte[0].Origin.(*CTEDefinition) id := cteDef.CTE.IDForStorage tp := plancodec.TypeCTEDefinition - taskTypeInfo := plancodec.EncodeTaskType(op.IsRoot, op.StoreType) - p := op.Origin + taskTypeInfo := plancodec.EncodeTaskType(fop.IsRoot, fop.StoreType) + p := fop.Origin actRows, analyzeInfo, memoryInfo, diskInfo := getRuntimeInfoStr(p.SCtx(), p, nil) var estRows float64 - if op.IsPhysicalPlan { - estRows = op.Origin.(PhysicalPlan).getEstRowCountForDisplay() + if fop.IsPhysicalPlan { + estRows = fop.Origin.(PhysicalPlan).GetEstRowCountForDisplay() } else if statsInfo := p.StatsInfo(); statsInfo != nil { estRows = statsInfo.RowCount } plancodec.EncodePlanNode( - int(op.Depth), - strconv.Itoa(id)+op.Label.String(), + int(fop.Depth), + strconv.Itoa(id)+fop.Label.String(), tp, estRows, taskTypeInfo, - op.Origin.ExplainInfo(), + fop.Origin.ExplainInfo(), actRows, analyzeInfo, memoryInfo, @@ -96,23 +96,23 @@ func EncodeFlatPlan(flat *FlatPhysicalPlan) string { func encodeFlatPlanTree(flatTree FlatPlanTree, offset int, buf *bytes.Buffer) { for i := 0; i < len(flatTree); { - op := flatTree[i] - taskTypeInfo := plancodec.EncodeTaskType(op.IsRoot, op.StoreType) - p := op.Origin + fop := flatTree[i] + taskTypeInfo := plancodec.EncodeTaskType(fop.IsRoot, fop.StoreType) + p := fop.Origin actRows, analyzeInfo, memoryInfo, diskInfo := getRuntimeInfoStr(p.SCtx(), p, nil) var estRows float64 - if op.IsPhysicalPlan { - estRows = op.Origin.(PhysicalPlan).getEstRowCountForDisplay() + if fop.IsPhysicalPlan { + estRows = fop.Origin.(PhysicalPlan).GetEstRowCountForDisplay() } else if statsInfo := p.StatsInfo(); statsInfo != nil { estRows = statsInfo.RowCount } plancodec.EncodePlanNode( - int(op.Depth), - strconv.Itoa(op.Origin.ID())+op.Label.String(), - op.Origin.TP(), + int(fop.Depth), + strconv.Itoa(fop.Origin.ID())+fop.Label.String(), + fop.Origin.TP(), estRows, taskTypeInfo, - op.Origin.ExplainInfo(), + fop.Origin.ExplainInfo(), actRows, analyzeInfo, memoryInfo, @@ -120,16 +120,16 @@ func encodeFlatPlanTree(flatTree FlatPlanTree, offset int, buf *bytes.Buffer) { buf, ) - if op.NeedReverseDriverSide { + if fop.NeedReverseDriverSide { // If NeedReverseDriverSide is true, we don't rely on the order of flatTree. // Instead, we manually slice the build and probe side children from flatTree and recursively call // encodeFlatPlanTree to keep build side before probe side. - buildSide := flatTree[op.ChildrenIdx[1]-offset : op.ChildrenEndIdx+1-offset] - probeSide := flatTree[op.ChildrenIdx[0]-offset : op.ChildrenIdx[1]-offset] - encodeFlatPlanTree(buildSide, op.ChildrenIdx[1], buf) - encodeFlatPlanTree(probeSide, op.ChildrenIdx[0], buf) + buildSide := flatTree[fop.ChildrenIdx[1]-offset : fop.ChildrenEndIdx+1-offset] + probeSide := flatTree[fop.ChildrenIdx[0]-offset : fop.ChildrenIdx[1]-offset] + encodeFlatPlanTree(buildSide, fop.ChildrenIdx[1], buf) + encodeFlatPlanTree(probeSide, fop.ChildrenIdx[0], buf) // Skip the children plan tree of the current operator. - i = op.ChildrenEndIdx + 1 - offset + i = fop.ChildrenEndIdx + 1 - offset } else { // Normally, we just go to the next element in the slice. i++ @@ -210,7 +210,7 @@ func (pn *planEncoder) encodePlan(p Plan, isRoot bool, store kv.StoreType, depth actRows, analyzeInfo, memoryInfo, diskInfo := getRuntimeInfoStr(p.SCtx(), p, nil) rowCount := 0.0 if pp, ok := p.(PhysicalPlan); ok { - rowCount = pp.getEstRowCountForDisplay() + rowCount = pp.GetEstRowCountForDisplay() } else if statsInfo := p.StatsInfo(); statsInfo != nil { rowCount = statsInfo.RowCount } @@ -283,12 +283,12 @@ func NormalizeFlatPlan(flat *FlatPhysicalPlan) (normalized string, digest *parse }() // assume an operator costs around 30 bytes, preallocate space for them d.buf.Grow(30 * len(selectPlan)) - for _, op := range selectPlan { - taskTypeInfo := plancodec.EncodeTaskTypeForNormalize(op.IsRoot, op.StoreType) - p := op.Origin.(PhysicalPlan) + for _, fop := range selectPlan { + taskTypeInfo := plancodec.EncodeTaskTypeForNormalize(fop.IsRoot, fop.StoreType) + p := fop.Origin.(PhysicalPlan) plancodec.NormalizePlanNode( - int(op.Depth-uint32(selectPlanOffset)), - op.Origin.TP(), + int(fop.Depth-uint32(selectPlanOffset)), + fop.Origin.TP(), taskTypeInfo, p.ExplainNormalizedInfo(), &d.buf, diff --git a/pkg/planner/core/exhaust_physical_plans.go b/pkg/planner/core/exhaust_physical_plans.go index 0bd930d28a4b4..0b3e065c3351d 100644 --- a/pkg/planner/core/exhaust_physical_plans.go +++ b/pkg/planner/core/exhaust_physical_plans.go @@ -463,7 +463,7 @@ func (p *LogicalJoin) getHashJoin(prop *property.PhysicalProperty, innerIdx int, func (p *LogicalJoin) constructIndexJoin( prop *property.PhysicalProperty, outerIdx int, - innerTask task, + innerTask Task, ranges ranger.MutableRanges, keyOff2IdxOff []int, path *util.AccessPath, @@ -577,7 +577,7 @@ func (p *LogicalJoin) constructIndexJoin( func (p *LogicalJoin) constructIndexMergeJoin( prop *property.PhysicalProperty, outerIdx int, - innerTask task, + innerTask Task, ranges ranger.MutableRanges, keyOff2IdxOff []int, path *util.AccessPath, @@ -684,7 +684,7 @@ func (p *LogicalJoin) constructIndexMergeJoin( func (p *LogicalJoin) constructIndexHashJoin( prop *property.PhysicalProperty, outerIdx int, - innerTask task, + innerTask Task, ranges ranger.MutableRanges, keyOff2IdxOff []int, path *util.AccessPath, @@ -832,7 +832,7 @@ func (p *LogicalJoin) buildIndexJoinInner2TableScan( keyOff2IdxOff := make([]int, len(innerJoinKeys)) newOuterJoinKeys := make([]*expression.Column, 0) var ranges ranger.MutableRanges = ranger.Ranges{} - var innerTask, innerTask2 task + var innerTask, innerTask2 Task var helper *indexJoinBuildHelper if ds.tableInfo.IsCommonHandle { helper, keyOff2IdxOff = p.getIndexJoinBuildHelper(ds, innerJoinKeys, func(path *util.AccessPath) bool { return path.IsCommonHandlePath }, outerJoinKeys) @@ -1024,7 +1024,7 @@ func (p *LogicalJoin) constructInnerTableScanTask( keepOrder bool, desc bool, rowCount float64, -) task { +) Task { ds := wrapper.ds // If `ds.tableInfo.GetPartitionInfo() != nil`, // it means the data source is a partition table reader. @@ -1089,9 +1089,9 @@ func (p *LogicalJoin) constructInnerTableScanTask( ts.PlanPartInfo = copTask.physPlanPartInfo selStats := ts.StatsInfo().Scale(selectivity) ts.addPushedDownSelection(copTask, selStats) - t := copTask.convertToRootTask(ds.SCtx()) - reader := t.p - t.p = p.constructInnerByWrapper(wrapper, reader) + t := copTask.ConvertToRootTask(ds.SCtx()) + reader := t.GetPlan() + t.SetPlan(p.constructInnerByWrapper(wrapper, reader)) return t } @@ -1210,7 +1210,7 @@ func (p *LogicalJoin) constructInnerIndexScanTask( desc bool, rowCount float64, maxOneRow bool, -) task { +) Task { ds := wrapper.ds // If `ds.tableInfo.GetPartitionInfo() != nil`, // it means the data source is a partition table reader. @@ -1377,9 +1377,9 @@ func (p *LogicalJoin) constructInnerIndexScanTask( } finalStats := ds.tableStats.ScaleByExpectCnt(rowCount) is.addPushedDownSelection(cop, ds, tmpPath, finalStats) - t := cop.convertToRootTask(ds.SCtx()) - reader := t.p - t.p = p.constructInnerByWrapper(wrapper, reader) + t := cop.ConvertToRootTask(ds.SCtx()) + reader := t.GetPlan() + t.SetPlan(p.constructInnerByWrapper(wrapper, reader)) return t } @@ -2566,7 +2566,7 @@ func (p *LogicalProjection) TryToGetChildProp(prop *property.PhysicalProperty) ( return newProp, true } -// exhaustPhysicalPlans enumerate all the possible physical plan for expand operator (currently only mpp case is supported) +// exhaustop.PhysicalPlans enumerate all the possible physical plan for expand operator (currently only mpp case is supported) func (p *LogicalExpand) exhaustPhysicalPlans(prop *property.PhysicalProperty) ([]PhysicalPlan, bool, error) { // under the mpp task type, if the sort item is not empty, refuse it, cause expanded data doesn't support any sort items. if !prop.IsSortItemEmpty() { @@ -2943,9 +2943,9 @@ func (lw *LogicalWindow) exhaustPhysicalPlans(prop *property.PhysicalProperty) ( return windows, true, nil } -// exhaustPhysicalPlans is only for implementing interface. DataSource and Dual generate task in `findBestTask` directly. +// exhaustop.PhysicalPlans is only for implementing interface. DataSource and Dual generate task in `findBestTask` directly. func (*baseLogicalPlan) exhaustPhysicalPlans(*property.PhysicalProperty) ([]PhysicalPlan, bool, error) { - panic("baseLogicalPlan.exhaustPhysicalPlans() should never be called.") + panic("baseLogicalPlan.exhaustop.PhysicalPlans() should never be called.") } // canPushToCop checks if it can be pushed to some stores. For TiKV, it only checks datasource. diff --git a/pkg/planner/core/find_best_task.go b/pkg/planner/core/find_best_task.go index 43c08246a2844..60eebcf91e2fa 100644 --- a/pkg/planner/core/find_best_task.go +++ b/pkg/planner/core/find_best_task.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/planner/util/fixcontrol" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/types" @@ -102,7 +103,7 @@ func (c *PlanCounterTp) IsForce() bool { return *c != -1 } -var invalidTask = &rootTask{p: nil} // invalid if p is nil +var invalidTask = &RootTask{} // invalid if p is nil // GetPropByOrderByItems will check if this sort property can be pushed or not. In order to simplify the problem, we only // consider the case that all expression are columns. @@ -141,7 +142,7 @@ func GetPropByOrderByItemsContainScalarFunc(items []*util.ByItems) (*property.Ph return &property.PhysicalProperty{SortItems: propItems}, true, onlyColumn } -func (p *LogicalTableDual) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (task, int64, error) { +func (p *LogicalTableDual) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *coreusage.PhysicalOptimizeOp) (Task, int64, error) { // If the required property is not empty and the row count > 1, // we cannot ensure this required property. // But if the row count is 0 or 1, we don't need to care about the property. @@ -153,32 +154,39 @@ func (p *LogicalTableDual) findBestTask(prop *property.PhysicalProperty, planCou }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset()) dual.SetSchema(p.schema) planCounter.Dec(1) - opt.appendCandidate(p, dual, prop) - return &rootTask{p: dual, isEmpty: p.RowCount == 0}, 1, nil + appendCandidate4PhysicalOptimizeOp(opt, p, dual, prop) + rt := &RootTask{} + rt.SetPlan(dual) + rt.SetEmpty(p.RowCount == 0) + return rt, 1, nil } -func (p *LogicalShow) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, _ *physicalOptimizeOp) (task, int64, error) { +func (p *LogicalShow) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, _ *coreusage.PhysicalOptimizeOp) (Task, int64, error) { if !prop.IsSortItemEmpty() || planCounter.Empty() { return invalidTask, 0, nil } pShow := PhysicalShow{ShowContents: p.ShowContents, Extractor: p.Extractor}.Init(p.SCtx()) pShow.SetSchema(p.schema) planCounter.Dec(1) - return &rootTask{p: pShow}, 1, nil + rt := &RootTask{} + rt.SetPlan(pShow) + return rt, 1, nil } -func (p *LogicalShowDDLJobs) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, _ *physicalOptimizeOp) (task, int64, error) { +func (p *LogicalShowDDLJobs) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, _ *coreusage.PhysicalOptimizeOp) (Task, int64, error) { if !prop.IsSortItemEmpty() || planCounter.Empty() { return invalidTask, 0, nil } pShow := PhysicalShowDDLJobs{JobNumber: p.JobNumber}.Init(p.SCtx()) pShow.SetSchema(p.schema) planCounter.Dec(1) - return &rootTask{p: pShow}, 1, nil + rt := &RootTask{} + rt.SetPlan(pShow) + return rt, 1, nil } // rebuildChildTasks rebuilds the childTasks to make the clock_th combination. -func (p *baseLogicalPlan) rebuildChildTasks(childTasks *[]task, pp PhysicalPlan, childCnts []int64, planCounter int64, ts uint64, opt *physicalOptimizeOp) error { +func (p *baseLogicalPlan) rebuildChildTasks(childTasks *[]Task, pp PhysicalPlan, childCnts []int64, planCounter int64, ts uint64, opt *coreusage.PhysicalOptimizeOp) error { // The taskMap of children nodes should be rolled back first. for _, child := range p.children { child.rollBackTaskMap(ts) @@ -201,7 +209,7 @@ func (p *baseLogicalPlan) rebuildChildTasks(childTasks *[]task, pp PhysicalPlan, if curClock != 0 { return errors.Errorf("PlanCounterTp planCounter is not handled") } - if childTask != nil && childTask.invalid() { + if childTask != nil && childTask.Invalid() { return errors.Errorf("The current plan is invalid, please skip this plan") } *childTasks = append(*childTasks, childTask) @@ -214,12 +222,12 @@ func (p *baseLogicalPlan) enumeratePhysicalPlans4Task( prop *property.PhysicalProperty, addEnforcer bool, planCounter *PlanCounterTp, - opt *physicalOptimizeOp, -) (task, int64, error) { - var bestTask task = invalidTask + opt *coreusage.PhysicalOptimizeOp, +) (Task, int64, error) { + var bestTask Task = invalidTask var curCntPlan, cntPlan int64 var err error - childTasks := make([]task, 0, len(p.children)) + childTasks := make([]Task, 0, len(p.children)) childCnts := make([]int64, len(p.children)) cntPlan = 0 iteration := p.iteratePhysicalPlan @@ -252,14 +260,14 @@ func (p *baseLogicalPlan) enumeratePhysicalPlans4Task( } // Combine the best child tasks with parent physical plan. - curTask := pp.attach2Task(childTasks...) - if curTask.invalid() { + curTask := pp.Attach2Task(childTasks...) + if curTask.Invalid() { continue } // An optimal task could not satisfy the property, so it should be converted here. - if _, ok := curTask.(*rootTask); !ok && prop.TaskTp == property.RootTaskType { - curTask = curTask.convertToRootTask(p.SCtx()) + if _, ok := curTask.(*RootTask); !ok && prop.TaskTp == property.RootTaskType { + curTask = curTask.ConvertToRootTask(p.SCtx()) } // Enforce curTask property @@ -280,7 +288,7 @@ func (p *baseLogicalPlan) enumeratePhysicalPlans4Task( bestTask = curTask break } - opt.appendCandidate(p, curTask.plan(), prop) + appendCandidate4PhysicalOptimizeOp(opt, p, curTask.Plan(), prop) // Get the most efficient one. if curIsBetter, err := compareTaskCost(curTask, bestTask, opt); err != nil { return nil, 0, err @@ -294,11 +302,11 @@ func (p *baseLogicalPlan) enumeratePhysicalPlans4Task( // iteratePhysicalPlan is used to iterate the physical plan and get all child tasks. func (p *baseLogicalPlan) iteratePhysicalPlan( selfPhysicalPlan PhysicalPlan, - childTasks []task, + childTasks []Task, childCnts []int64, _ *property.PhysicalProperty, - opt *physicalOptimizeOp, -) ([]task, int64, []int64, error) { + opt *coreusage.PhysicalOptimizeOp, +) ([]Task, int64, []int64, error) { // Find best child tasks firstly. childTasks = childTasks[:0] // The curCntPlan records the number of possible plans for pp @@ -311,7 +319,7 @@ func (p *baseLogicalPlan) iteratePhysicalPlan( return nil, 0, childCnts, err } curCntPlan = curCntPlan * cnt - if childTask != nil && childTask.invalid() { + if childTask != nil && childTask.Invalid() { return nil, 0, childCnts, nil } childTasks = append(childTasks, childTask) @@ -327,11 +335,11 @@ func (p *baseLogicalPlan) iteratePhysicalPlan( // iterateChildPlan does the special part for sequence. We need to iterate its child one by one to check whether the former child is a valid plan and then go to the nex func (p *LogicalSequence) iterateChildPlan( selfPhysicalPlan PhysicalPlan, - childTasks []task, + childTasks []Task, childCnts []int64, prop *property.PhysicalProperty, - opt *physicalOptimizeOp, -) ([]task, int64, []int64, error) { + opt *coreusage.PhysicalOptimizeOp, +) ([]Task, int64, []int64, error) { // Find best child tasks firstly. childTasks = childTasks[:0] // The curCntPlan records the number of possible plans for pp @@ -346,7 +354,7 @@ func (p *LogicalSequence) iterateChildPlan( return nil, 0, nil, err } curCntPlan = curCntPlan * cnt - if childTask != nil && childTask.invalid() { + if childTask != nil && childTask.Invalid() { return nil, 0, nil, nil } _, isMpp := childTask.(*mppTask) @@ -370,7 +378,7 @@ func (p *LogicalSequence) iterateChildPlan( return nil, 0, nil, err } curCntPlan = curCntPlan * cnt - if lastChildTask != nil && lastChildTask.invalid() { + if lastChildTask != nil && lastChildTask.Invalid() { return nil, 0, nil, nil } @@ -383,7 +391,7 @@ func (p *LogicalSequence) iterateChildPlan( } // compareTaskCost compares cost of curTask and bestTask and returns whether curTask's cost is smaller than bestTask's. -func compareTaskCost(curTask, bestTask task, op *physicalOptimizeOp) (curIsBetter bool, err error) { +func compareTaskCost(curTask, bestTask Task, op *coreusage.PhysicalOptimizeOp) (curIsBetter bool, err error) { curCost, curInvalid, err := getTaskPlanCost(curTask, op) if err != nil { return false, err @@ -404,8 +412,8 @@ func compareTaskCost(curTask, bestTask task, op *physicalOptimizeOp) (curIsBette // getTaskPlanCost returns the cost of this task. // The new cost interface will be used if EnableNewCostInterface is true. // The second returned value indicates whether this task is valid. -func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { - if t.invalid() { +func getTaskPlanCost(t Task, pop *coreusage.PhysicalOptimizeOp) (float64, bool, error) { + if t.Invalid() { return math.MaxFloat64, true, nil } @@ -415,7 +423,7 @@ func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { indexPartialCost float64 ) switch t.(type) { - case *rootTask: + case *RootTask: taskType = property.RootTaskType case *copTask: // no need to know whether the task is single-read or double-read, so both CopSingleReadTaskType and CopDoubleReadTaskType are OK cop := t.(*copTask) @@ -423,15 +431,15 @@ func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { taskType = property.CopMultiReadTaskType // keep compatible with the old cost interface, for CopMultiReadTask, the cost is idxCost + tblCost. if !cop.indexPlanFinished { // only consider index cost in this case - idxCost, err := getPlanCost(cop.indexPlan, taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + idxCost, err := getPlanCost(cop.indexPlan, taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) return idxCost, false, err } // consider both sides - idxCost, err := getPlanCost(cop.indexPlan, taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + idxCost, err := getPlanCost(cop.indexPlan, taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) if err != nil { return 0, false, err } - tblCost, err := getPlanCost(cop.tablePlan, taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + tblCost, err := getPlanCost(cop.tablePlan, taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) if err != nil { return 0, false, err } @@ -457,7 +465,7 @@ func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { // cost about table plan. if cop.indexPlanFinished && len(cop.idxMergePartPlans) != 0 { for _, partialScan := range cop.idxMergePartPlans { - partialCost, err := getPlanCost(partialScan, taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + partialCost, err := getPlanCost(partialScan, taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) if err != nil { return 0, false, err } @@ -469,13 +477,13 @@ func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { default: return 0, false, errors.New("unknown task type") } - if t.plan() == nil { + if t.Plan() == nil { // It's a very special case for index merge case. // t.plan() == nil in index merge COP case, it means indexPlanFinished is false in other words. cost := 0.0 copTsk := t.(*copTask) for _, partialScan := range copTsk.idxMergePartPlans { - partialCost, err := getPlanCost(partialScan, taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + partialCost, err := getPlanCost(partialScan, taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) if err != nil { return 0, false, err } @@ -483,33 +491,19 @@ func getTaskPlanCost(t task, op *physicalOptimizeOp) (float64, bool, error) { } return cost, false, nil } - cost, err := getPlanCost(t.plan(), taskType, NewDefaultPlanCostOption().WithOptimizeTracer(op)) + cost, err := getPlanCost(t.Plan(), taskType, coreusage.NewDefaultPlanCostOption().WithOptimizeTracer(pop)) return cost + indexPartialCost, false, err } -type physicalOptimizeOp struct { - // tracer is goring to track optimize steps during physical optimizing - tracer *tracing.PhysicalOptimizeTracer -} - -func defaultPhysicalOptimizeOption() *physicalOptimizeOp { - return &physicalOptimizeOp{} -} - -func (op *physicalOptimizeOp) withEnableOptimizeTracer(tracer *tracing.PhysicalOptimizeTracer) *physicalOptimizeOp { - op.tracer = tracer - return op -} - -func (op *physicalOptimizeOp) appendCandidate(lp LogicalPlan, pp PhysicalPlan, prop *property.PhysicalProperty) { - if op == nil || op.tracer == nil || pp == nil { +func appendCandidate4PhysicalOptimizeOp(pop *coreusage.PhysicalOptimizeOp, lp LogicalPlan, pp PhysicalPlan, prop *property.PhysicalProperty) { + if pop == nil || pop.GetTracer() == nil || pp == nil { return } candidate := &tracing.CandidatePlanTrace{ PlanTrace: &tracing.PlanTrace{TP: pp.TP(), ID: pp.ID(), ExplainInfo: pp.ExplainInfo(), ProperType: prop.String()}, MappingLogicalPlan: tracing.CodecPlanName(lp.TP(), lp.ID())} - op.tracer.AppendCandidate(candidate) + pop.GetTracer().AppendCandidate(candidate) // for PhysicalIndexMergeJoin/PhysicalIndexHashJoin/PhysicalIndexJoin, it will use innerTask as a child instead of calling findBestTask, // and innerTask.plan() will be appended to planTree in appendChildCandidate using empty MappingLogicalPlan field, so it won't mapping with the logic plan, @@ -520,13 +514,13 @@ func (op *physicalOptimizeOp) appendCandidate(lp LogicalPlan, pp PhysicalPlan, p switch join := pp.(type) { case *PhysicalIndexMergeJoin: index = join.InnerChildIdx - plan = join.innerTask.plan() + plan = join.innerTask.Plan() case *PhysicalIndexHashJoin: index = join.InnerChildIdx - plan = join.innerTask.plan() + plan = join.innerTask.Plan() case *PhysicalIndexJoin: index = join.InnerChildIdx - plan = join.innerTask.plan() + plan = join.innerTask.Plan() } if index != -1 { child := lp.(*baseLogicalPlan).children[index] @@ -534,20 +528,20 @@ func (op *physicalOptimizeOp) appendCandidate(lp LogicalPlan, pp PhysicalPlan, p PlanTrace: &tracing.PlanTrace{TP: plan.TP(), ID: plan.ID(), ExplainInfo: plan.ExplainInfo(), ProperType: prop.String()}, MappingLogicalPlan: tracing.CodecPlanName(child.TP(), child.ID())} - op.tracer.AppendCandidate(candidate) + pop.GetTracer().AppendCandidate(candidate) } - pp.appendChildCandidate(op) + pp.AppendChildCandidate(pop) } -func (op *physicalOptimizeOp) appendPlanCostDetail(detail *tracing.PhysicalPlanCostDetail) { - if op == nil || op.tracer == nil { +func appendPlanCostDetail4PhysicalOptimizeOp(pop *coreusage.PhysicalOptimizeOp, detail *tracing.PhysicalPlanCostDetail) { + if pop == nil || pop.GetTracer() == nil { return } - op.tracer.PhysicalPlanCostDetails[fmt.Sprintf("%v_%v", detail.GetPlanType(), detail.GetPlanID())] = detail + pop.GetTracer().PhysicalPlanCostDetails[fmt.Sprintf("%v_%v", detail.GetPlanType(), detail.GetPlanID())] = detail } // findBestTask implements LogicalPlan interface. -func (p *baseLogicalPlan) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (bestTask task, cntPlan int64, err error) { +func (p *baseLogicalPlan) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *coreusage.PhysicalOptimizeOp) (bestTask Task, cntPlan int64, err error) { // If p is an inner plan in an IndexJoin, the IndexJoin will generate an inner plan by itself, // and set inner child prop nil, so here we do nothing. if prop == nil { @@ -616,7 +610,7 @@ func (p *baseLogicalPlan) findBestTask(prop *property.PhysicalProperty, planCoun } var cnt int64 - var curTask task + var curTask Task if bestTask, cnt, err = p.enumeratePhysicalPlans4Task(plansFitsProp, newProp, false, planCounter, opt); err != nil { return nil, 0, err } @@ -634,7 +628,7 @@ func (p *baseLogicalPlan) findBestTask(prop *property.PhysicalProperty, planCoun bestTask = curTask goto END } - opt.appendCandidate(p, curTask.plan(), prop) + appendCandidate4PhysicalOptimizeOp(opt, p, curTask.Plan(), prop) if curIsBetter, err := compareTaskCost(curTask, bestTask, opt); err != nil { return nil, 0, err } else if curIsBetter { @@ -646,7 +640,7 @@ END: return bestTask, cntPlan, nil } -func (p *LogicalMemTable) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (t task, cntPlan int64, err error) { +func (p *LogicalMemTable) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *coreusage.PhysicalOptimizeOp) (t Task, cntPlan int64, err error) { if prop.MPPPartitionTp != property.AnyType { return invalidTask, 0, nil } @@ -693,12 +687,14 @@ func (p *LogicalMemTable) findBestTask(prop *property.PhysicalProperty, planCoun }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset()) memTable.SetSchema(p.schema) planCounter.Dec(1) - opt.appendCandidate(p, memTable, prop) - return &rootTask{p: memTable}, 1, nil + appendCandidate4PhysicalOptimizeOp(opt, p, memTable, prop) + rt := &RootTask{} + rt.SetPlan(memTable) + return rt, 1, nil } // tryToGetDualTask will check if the push down predicate has false constant. If so, it will return table dual. -func (ds *DataSource) tryToGetDualTask() (task, error) { +func (ds *DataSource) tryToGetDualTask() (Task, error) { for _, cond := range ds.pushedDownConds { if con, ok := cond.(*expression.Constant); ok && con.DeferredExpr == nil && con.ParamMarker == nil { result, _, err := expression.EvalBool(ds.SCtx().GetExprCtx().GetEvalCtx(), []expression.Expression{cond}, chunk.Row{}) @@ -708,9 +704,9 @@ func (ds *DataSource) tryToGetDualTask() (task, error) { if !result { dual := PhysicalTableDual{}.Init(ds.SCtx(), ds.StatsInfo(), ds.QueryBlockOffset()) dual.SetSchema(ds.schema) - return &rootTask{ - p: dual, - }, nil + rt := &RootTask{} + rt.SetPlan(dual) + return rt, nil } } } @@ -1277,7 +1273,7 @@ func (ds *DataSource) exploreEnforcedPlan() bool { // findBestTask implements the PhysicalPlan interface. // It will enumerate all the available indices and choose a plan with least cost. -func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *physicalOptimizeOp) (t task, cntPlan int64, err error) { +func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, opt *coreusage.PhysicalOptimizeOp) (t Task, cntPlan int64, err error) { // If ds is an inner plan in an IndexJoin, the IndexJoin will generate an inner plan by itself, // and set inner child prop nil, so here we do nothing. if prop == nil { @@ -1315,7 +1311,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter return } var cnt int64 - var unenforcedTask task + var unenforcedTask Task // If prop.CanAddEnforcer is true, the prop.SortItems need to be set nil for ds.findBestTask. // Before function return, reset it for enforcing task prop and storing map. oldProp := prop.CloneEssentialFields() @@ -1326,7 +1322,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if err != nil { return nil, 0, err } - if !unenforcedTask.invalid() && !ds.exploreEnforcedPlan() { + if !unenforcedTask.Invalid() && !ds.exploreEnforcedPlan() { ds.storeTask(prop, unenforcedTask) return unenforcedTask, cnt, nil } @@ -1349,7 +1345,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter prop.CanAddEnforcer = true } - if unenforcedTask != nil && !unenforcedTask.invalid() { + if unenforcedTask != nil && !unenforcedTask.Invalid() { curIsBest, cerr := compareTaskCost(unenforcedTask, t, opt) if cerr != nil { err = cerr @@ -1377,7 +1373,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter candidates := ds.skylinePruning(prop) pruningInfo := ds.getPruningInfo(candidates, prop) defer func() { - if err == nil && t != nil && !t.invalid() && pruningInfo != "" { + if err == nil && t != nil && !t.Invalid() && pruningInfo != "" { warnErr := errors.NewNoStackError(pruningInfo) if ds.SCtx().GetSessionVars().StmtCtx.InVerboseExplain { ds.SCtx().GetSessionVars().StmtCtx.AppendNote(warnErr) @@ -1395,7 +1391,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if err != nil { return nil, 0, err } - if !idxMergeTask.invalid() { + if !idxMergeTask.Invalid() { cntPlan++ planCounter.Dec(1) } @@ -1423,9 +1419,8 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter dual.SetSchema(ds.schema) cntPlan++ planCounter.Dec(1) - t := &rootTask{ - p: dual, - } + t := &RootTask{} + t.SetPlan(dual) appendCandidate(ds, t, prop, opt) return t, cntPlan, nil } @@ -1499,7 +1494,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter } } if allRangeIsPoint { - var pointGetTask task + var pointGetTask Task if len(path.Ranges) == 1 { pointGetTask = ds.convertToPointGet(prop, candidate) } else { @@ -1508,12 +1503,12 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter // Batch/PointGet plans may be over-optimized, like `a>=1(?) and a<=1(?)` --> `a=1` --> PointGet(a=1). // For safety, prevent these plans from the plan cache here. - if !pointGetTask.invalid() && expression.MaybeOverOptimized4PlanCache(ds.SCtx().GetExprCtx(), candidate.path.AccessConds) && !isSafePointGetPath4PlanCache(ds.SCtx(), candidate.path) { + if !pointGetTask.Invalid() && expression.MaybeOverOptimized4PlanCache(ds.SCtx().GetExprCtx(), candidate.path.AccessConds) && !isSafePointGetPath4PlanCache(ds.SCtx(), candidate.path) { ds.SCtx().GetSessionVars().StmtCtx.SetSkipPlanCache(errors.NewNoStackError("Batch/PointGet plans may be over-optimized")) } appendCandidate(ds, pointGetTask, prop, opt) - if !pointGetTask.invalid() { + if !pointGetTask.Invalid() { cntPlan++ planCounter.Dec(1) } @@ -1537,7 +1532,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if ds.preferStoreType&h.PreferTiKV != 0 && path.StoreType == kv.TiFlash { continue } - var tblTask task + var tblTask Task if ds.SampleInfo != nil { tblTask, err = ds.convertToSampleTable(prop, candidate, opt) } else { @@ -1546,7 +1541,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if err != nil { return nil, 0, err } - if !tblTask.invalid() { + if !tblTask.Invalid() { cntPlan++ planCounter.Dec(1) } @@ -1571,7 +1566,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if err != nil { return nil, 0, err } - if !idxTask.invalid() { + if !idxTask.Invalid() { cntPlan++ planCounter.Dec(1) } @@ -1592,7 +1587,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter } // convertToIndexMergeScan builds the index merge scan for intersection or union cases. -func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { +func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, candidate *candidatePath, _ *coreusage.PhysicalOptimizeOp) (task Task, err error) { if prop.IsFlashProp() || prop.TaskTp == property.CopSingleReadTaskType { return invalidTask, nil } @@ -1672,7 +1667,7 @@ func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, c // task plan in function `getTaskPlanCost`. if prop.TaskTp == property.RootTaskType { cop.indexPlanFinished = true - task = cop.convertToRootTask(ds.SCtx()) + task = cop.ConvertToRootTask(ds.SCtx()) } else { _, pureTableScan := ts.(*PhysicalTableScan) if !pureTableScan { @@ -1995,7 +1990,7 @@ func (ts *PhysicalTableScan) appendExtraHandleCol(ds *DataSource) (*expression.C // convertToIndexScan converts the DataSource to index scan with idx. func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, - candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { + candidate *candidatePath, _ *coreusage.PhysicalOptimizeOp) (task Task, err error) { if candidate.path.Index.MVIndex { // MVIndex is special since different index rows may return the same _row_id and this can break some assumptions of IndexReader. // Currently only support using IndexMerge to access MVIndex instead of IndexReader. @@ -2110,8 +2105,8 @@ func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, finalStats := ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt) is.addPushedDownSelection(cop, ds, path, finalStats) if prop.TaskTp == property.RootTaskType { - task = task.convertToRootTask(ds.SCtx()) - } else if _, ok := task.(*rootTask); ok { + task = task.ConvertToRootTask(ds.SCtx()) + } else if _, ok := task.(*RootTask); ok { return invalidTask, nil } return task, nil @@ -2243,7 +2238,7 @@ func (is *PhysicalIndexScan) addPushedDownSelection(copTask *copTask, p *DataSou logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) selectivity = SelectionFactor } - tableSel.SetStats(copTask.plan().StatsInfo().Scale(selectivity)) + tableSel.SetStats(copTask.Plan().StatsInfo().Scale(selectivity)) } tableSel.SetChildren(copTask.tablePlan) copTask.tablePlan = tableSel @@ -2386,7 +2381,7 @@ func (ds *DataSource) isPointGetPath(path *util.AccessPath) bool { } // convertToTableScan converts the DataSource to table scan. -func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { +func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candidate *candidatePath, _ *coreusage.PhysicalOptimizeOp) (Task, error) { // It will be handled in convertToIndexScan. if prop.TaskTp == property.CopMultiReadTaskType { return invalidTask, nil @@ -2461,8 +2456,8 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid ColumnNames: ds.names, } mppTask = ts.addPushedDownSelectionToMppTask(mppTask, ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt)) - task = mppTask - if !mppTask.invalid() { + var task Task = mppTask + if !mppTask.Invalid() { if prop.TaskTp == property.MppTaskType && len(mppTask.rootTaskConds) > 0 { // If got filters cannot be pushed down to tiflash, we have to make sure it will be executed in TiDB, // So have to return a rootTask, but prop requires mppTask, cannot meet this requirement. @@ -2474,7 +2469,7 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid // which cannot pushdown to tiflash(because TiFlash doesn't support some expr in Proj) // So HashJoin cannot pushdown to tiflash. But we still want TableScan to run on tiflash. task = mppTask - task = task.convertToRootTask(ds.SCtx()) + task = task.ConvertToRootTask(ds.SCtx()) } } return task, nil @@ -2495,7 +2490,7 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid ColumnNames: ds.names, } ts.PlanPartInfo = copTask.physPlanPartInfo - task = copTask + var task Task = copTask if candidate.isMatchProp { copTask.keepOrder = true if ds.tableInfo.GetPartitionInfo() != nil { @@ -2519,15 +2514,15 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid return invalidTask, nil } if prop.TaskTp == property.RootTaskType { - task = task.convertToRootTask(ds.SCtx()) - } else if _, ok := task.(*rootTask); ok { + task = task.ConvertToRootTask(ds.SCtx()) + } else if _, ok := task.(*RootTask); ok { return invalidTask, nil } return task, nil } func (ds *DataSource) convertToSampleTable(prop *property.PhysicalProperty, - candidate *candidatePath, _ *physicalOptimizeOp) (task task, err error) { + candidate *candidatePath, _ *coreusage.PhysicalOptimizeOp) (Task, error) { if prop.TaskTp == property.CopMultiReadTaskType { return invalidTask, nil } @@ -2544,12 +2539,12 @@ func (ds *DataSource) convertToSampleTable(prop *property.PhysicalProperty, Desc: candidate.isMatchProp && prop.SortItems[0].Desc, }.Init(ds.SCtx(), ds.QueryBlockOffset()) p.schema = ds.schema - return &rootTask{ - p: p, - }, nil + rt := &RootTask{} + rt.SetPlan(p) + return rt, nil } -func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candidate *candidatePath) (task task) { +func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candidate *candidatePath) Task { if !prop.IsSortItemEmpty() && !candidate.isMatchProp { return invalidTask } @@ -2577,7 +2572,8 @@ func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candida pointGetPlan.PartitionIdx = ds.partitionDefIdx } pointGetPlan.PartitionNames = ds.partitionNames - rTsk := &rootTask{p: pointGetPlan} + rTsk := &RootTask{} + rTsk.SetPlan(pointGetPlan) if candidate.path.IsIntHandlePath { pointGetPlan.Handle = kv.IntHandle(candidate.path.Ranges[0].LowVal[0].GetInt64()) pointGetPlan.UnsignedHandle = mysql.HasUnsignedFlag(ds.handleCols.GetCol(0).RetType.GetFlag()) @@ -2599,7 +2595,7 @@ func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candida Conditions: candidate.path.TableFilters, }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) sel.SetChildren(pointGetPlan) - rTsk.p = sel + rTsk.SetPlan(sel) } } else { pointGetPlan.IndexInfo = candidate.path.Index @@ -2617,14 +2613,14 @@ func (ds *DataSource) convertToPointGet(prop *property.PhysicalProperty, candida Conditions: append(candidate.path.IndexFilters, candidate.path.TableFilters...), }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) sel.SetChildren(pointGetPlan) - rTsk.p = sel + rTsk.SetPlan(sel) } } return rTsk } -func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, candidate *candidatePath) (task task) { +func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, candidate *candidatePath) Task { if !prop.IsSortItemEmpty() && !candidate.isMatchProp { return invalidTask } @@ -2650,7 +2646,7 @@ func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, ca if batchPointGetPlan.KeepOrder { batchPointGetPlan.Desc = prop.SortItems[0].Desc } - rTsk := &rootTask{} + rTsk := &RootTask{} if candidate.path.IsIntHandlePath { for _, ran := range candidate.path.Ranges { batchPointGetPlan.Handles = append(batchPointGetPlan.Handles, kv.IntHandle(ran.LowVal[0].GetInt64())) @@ -2664,7 +2660,7 @@ func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, ca Conditions: candidate.path.TableFilters, }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) sel.SetChildren(batchPointGetPlan) - rTsk.p = sel + rTsk.SetPlan(sel) } } else { batchPointGetPlan.IndexInfo = candidate.path.Index @@ -2689,11 +2685,12 @@ func (ds *DataSource) convertToBatchPointGet(prop *property.PhysicalProperty, ca Conditions: append(candidate.path.IndexFilters, candidate.path.TableFilters...), }.Init(ds.SCtx(), ds.StatsInfo().ScaleByExpectCnt(prop.ExpectedCnt), ds.QueryBlockOffset()) sel.SetChildren(batchPointGetPlan) - rTsk.p = sel + rTsk.SetPlan(sel) } } - if rTsk.p == nil { - rTsk.p = batchPointGetPlan.Init(ds.SCtx(), ds.tableStats.ScaleByExpectCnt(accessCnt), ds.schema.Clone(), ds.names, ds.QueryBlockOffset()) + if rTsk.GetPlan() == nil { + tmpP := batchPointGetPlan.Init(ds.SCtx(), ds.tableStats.ScaleByExpectCnt(accessCnt), ds.schema.Clone(), ds.names, ds.QueryBlockOffset()) + rTsk.SetPlan(tmpP) } return rTsk @@ -2841,7 +2838,7 @@ func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProper return is } -func (p *LogicalCTE) findBestTask(prop *property.PhysicalProperty, counter *PlanCounterTp, pop *physicalOptimizeOp) (t task, cntPlan int64, err error) { +func (p *LogicalCTE) findBestTask(prop *property.PhysicalProperty, counter *PlanCounterTp, pop *coreusage.PhysicalOptimizeOp) (t Task, cntPlan int64, err error) { if len(p.children) > 0 { return p.baseLogicalPlan.findBestTask(prop, counter, pop) } @@ -2863,7 +2860,10 @@ func (p *LogicalCTE) findBestTask(prop *property.PhysicalProperty, counter *Plan tblColHists: p.StatsInfo().HistColl, } } else { - t = &rootTask{p: pcte, isEmpty: false} + rt := &RootTask{} + rt.SetPlan(pcte) + rt.SetEmpty(false) + t = rt } if prop.CanAddEnforcer { t = enforceProperty(prop, t, p.Plan.SCtx()) @@ -2871,22 +2871,24 @@ func (p *LogicalCTE) findBestTask(prop *property.PhysicalProperty, counter *Plan return t, 1, nil } -func (p *LogicalCTETable) findBestTask(prop *property.PhysicalProperty, _ *PlanCounterTp, _ *physicalOptimizeOp) (t task, cntPlan int64, err error) { +func (p *LogicalCTETable) findBestTask(prop *property.PhysicalProperty, _ *PlanCounterTp, _ *coreusage.PhysicalOptimizeOp) (t Task, cntPlan int64, err error) { if !prop.IsSortItemEmpty() { return nil, 1, nil } pcteTable := PhysicalCTETable{IDForStorage: p.idForStorage}.Init(p.SCtx(), p.StatsInfo()) pcteTable.SetSchema(p.schema) - t = &rootTask{p: pcteTable} + rt := &RootTask{} + rt.SetPlan(pcteTable) + t = rt return t, 1, nil } -func appendCandidate(lp LogicalPlan, task task, prop *property.PhysicalProperty, opt *physicalOptimizeOp) { - if task == nil || task.invalid() { +func appendCandidate(lp LogicalPlan, task Task, prop *property.PhysicalProperty, opt *coreusage.PhysicalOptimizeOp) { + if task == nil || task.Invalid() { return } - opt.appendCandidate(lp, task.plan(), prop) + appendCandidate4PhysicalOptimizeOp(opt, lp, task.Plan(), prop) } // PushDownNot here can convert condition 'not (a != 1)' to 'a = 1'. When we build range from conds, the condition like @@ -2898,12 +2900,12 @@ func pushDownNot(ctx expression.BuildContext, conds []expression.Expression) []e return conds } -func validateTableSamplePlan(ds *DataSource, t task, err error) error { +func validateTableSamplePlan(ds *DataSource, t Task, err error) error { if err != nil { return err } - if ds.SampleInfo != nil && !t.invalid() { - if _, ok := t.plan().(*PhysicalTableSample); !ok { + if ds.SampleInfo != nil && !t.Invalid() { + if _, ok := t.Plan().(*PhysicalTableSample); !ok { return expression.ErrInvalidTableSample.GenWithStackByArgs("plan not supported") } } diff --git a/pkg/planner/core/hint_utils.go b/pkg/planner/core/hint_utils.go index 1399a9420c886..6d8a17d9bd54e 100644 --- a/pkg/planner/core/hint_utils.go +++ b/pkg/planner/core/hint_utils.go @@ -38,17 +38,17 @@ func GenHintsFromFlatPlan(flat *FlatPhysicalPlan) []*ast.TableOptimizerHint { if len(selectPlan) == 0 || !selectPlan[0].IsPhysicalPlan { return nil } - for _, op := range selectPlan { - p := op.Origin.(PhysicalPlan) - hints = genHintsFromSingle(p, nodeTp, op.StoreType, hints) + for _, fop := range selectPlan { + p := fop.Origin.(PhysicalPlan) + hints = genHintsFromSingle(p, nodeTp, fop.StoreType, hints) } for _, cte := range flat.CTEs { - for i, op := range cte { - if i == 0 || !op.IsRoot { + for i, fop := range cte { + if i == 0 || !fop.IsRoot { continue } - p := op.Origin.(PhysicalPlan) - hints = genHintsFromSingle(p, nodeTp, op.StoreType, hints) + p := fop.Origin.(PhysicalPlan) + hints = genHintsFromSingle(p, nodeTp, fop.StoreType, hints) } } return h.RemoveDuplicatedHints(hints) diff --git a/pkg/planner/core/optimizer.go b/pkg/planner/core/optimizer.go index 6e549a9cb5cb1..a7de886760594 100644 --- a/pkg/planner/core/optimizer.go +++ b/pkg/planner/core/optimizer.go @@ -18,6 +18,7 @@ import ( "cmp" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "math" "runtime" "slices" @@ -137,7 +138,7 @@ type logicalOptRule interface { The default value is false. It means that no interaction rule will be triggered. 3. error: If there is error during the rule optimizer, it will be thrown */ - optimize(context.Context, LogicalPlan, *plannerutil.LogicalOptimizeOp) (LogicalPlan, bool, error) + optimize(context.Context, LogicalPlan, *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) name() string } @@ -1045,7 +1046,7 @@ func setupFineGrainedShuffleInternal(ctx context.Context, sctx PlanContext, plan // It's for handling the inconsistency between row count in the statsInfo and the recorded actual row count. Please // see comments in PhysicalPlan for details. func propagateProbeParents(plan PhysicalPlan, probeParents []PhysicalPlan) { - plan.setProbeParents(probeParents) + plan.SetProbeParents(probeParents) switch x := plan.(type) { case *PhysicalApply, *PhysicalIndexJoin, *PhysicalIndexHashJoin, *PhysicalIndexMergeJoin: if join, ok := plan.(interface{ getInnerChildIdx() int }); ok { @@ -1122,7 +1123,7 @@ func logicalOptimize(ctx context.Context, flag uint64, logic LogicalPlan) (Logic debugtrace.EnterContextCommon(logic.SCtx()) defer debugtrace.LeaveContextCommon(logic.SCtx()) } - opt := plannerutil.DefaultLogicalOptimizeOption() + opt := coreusage.DefaultLogicalOptimizeOption() vars := logic.SCtx().GetSessionVars() if vars.StmtCtx.EnableOptimizeTrace { vars.StmtCtx.OptimizeTracer = &tracing.OptimizeTracer{} @@ -1190,14 +1191,14 @@ func physicalOptimize(logic LogicalPlan, planCounter *PlanCounterTp) (plan Physi ExpectedCnt: math.MaxFloat64, } - opt := defaultPhysicalOptimizeOption() + opt := coreusage.DefaultPhysicalOptimizeOption() stmtCtx := logic.SCtx().GetSessionVars().StmtCtx if stmtCtx.EnableOptimizeTrace { tracer := &tracing.PhysicalOptimizeTracer{ PhysicalPlanCostDetails: make(map[string]*tracing.PhysicalPlanCostDetail), Candidates: make(map[int]*tracing.CandidatePlanTrace), } - opt = opt.withEnableOptimizeTracer(tracer) + opt = opt.WithEnableOptimizeTracer(tracer) defer func() { r := recover() if r != nil { @@ -1218,7 +1219,7 @@ func physicalOptimize(logic LogicalPlan, planCounter *PlanCounterTp) (plan Physi if *planCounter > 0 { logic.SCtx().GetSessionVars().StmtCtx.AppendWarning(errors.NewNoStackErrorf("The parameter of nth_plan() is out of range")) } - if t.invalid() { + if t.Invalid() { errMsg := "Can't find a proper physical plan for this query" if config.GetGlobalConfig().DisaggregatedTiFlash && !logic.SCtx().GetSessionVars().IsMPPAllowed() { errMsg += ": cop and batchCop are not allowed in disaggregated tiflash mode, you should turn on tidb_allow_mpp switch" @@ -1226,11 +1227,11 @@ func physicalOptimize(logic LogicalPlan, planCounter *PlanCounterTp) (plan Physi return nil, 0, plannererrors.ErrInternal.GenWithStackByArgs(errMsg) } - if err = t.plan().ResolveIndices(); err != nil { + if err = t.Plan().ResolveIndices(); err != nil { return nil, 0, err } - cost, err = getPlanCost(t.plan(), property.RootTaskType, NewDefaultPlanCostOption()) - return t.plan(), cost, err + cost, err = getPlanCost(t.Plan(), property.RootTaskType, coreusage.NewDefaultPlanCostOption()) + return t.Plan(), cost, err } // eliminateUnionScanAndLock set lock property for PointGet and BatchPointGet and eliminates UnionScan and Lock. diff --git a/pkg/planner/core/physical_plans.go b/pkg/planner/core/physical_plans.go index 2b085bf2a9870..3500b0c4b220c 100644 --- a/pkg/planner/core/physical_plans.go +++ b/pkg/planner/core/physical_plans.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/statistics" @@ -256,7 +257,7 @@ func (sg *TiKVSingleGather) GetPhysicalIndexReader(schema *expression.Schema, st return reader } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalTableReader) Clone() (PhysicalPlan, error) { cloned := new(PhysicalTableReader) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) @@ -275,13 +276,13 @@ func (p *PhysicalTableReader) Clone() (PhysicalPlan, error) { return cloned, nil } -// SetChildren overrides PhysicalPlan SetChildren interface. +// SetChildren overrides op.PhysicalPlan SetChildren interface. func (p *PhysicalTableReader) SetChildren(children ...PhysicalPlan) { p.tablePlan = children[0] p.TablePlans = flattenPushDownPlan(p.tablePlan) } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalTableReader) ExtractCorrelatedCols() (corCols []*expression.CorrelatedColumn) { for _, child := range p.TablePlans { corCols = append(corCols, ExtractCorrelatedCols4PhysicalPlan(child)...) @@ -289,7 +290,7 @@ func (p *PhysicalTableReader) ExtractCorrelatedCols() (corCols []*expression.Cor return corCols } -// BuildPlanTrace implements PhysicalPlan interface. +// BuildPlanTrace implements op.PhysicalPlan interface. func (p *PhysicalTableReader) BuildPlanTrace() *tracing.PlanTrace { rp := p.basePhysicalPlan.BuildPlanTrace() if p.tablePlan != nil { @@ -298,8 +299,8 @@ func (p *PhysicalTableReader) BuildPlanTrace() *tracing.PlanTrace { return rp } -func (p *PhysicalTableReader) appendChildCandidate(op *physicalOptimizeOp) { - p.basePhysicalPlan.appendChildCandidate(op) +func (p *PhysicalTableReader) AppendChildCandidate(op *coreusage.PhysicalOptimizeOp) { + p.basePhysicalPlan.AppendChildCandidate(op) appendChildCandidate(p, p.tablePlan, op) } @@ -318,7 +319,7 @@ type PhysicalIndexReader struct { PlanPartInfo PhysPlanPartInfo } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalIndexReader) Clone() (PhysicalPlan, error) { cloned := new(PhysicalIndexReader) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) @@ -336,7 +337,7 @@ func (p *PhysicalIndexReader) Clone() (PhysicalPlan, error) { return cloned, err } -// SetSchema overrides PhysicalPlan SetSchema interface. +// SetSchema overrides op.PhysicalPlan SetSchema interface. func (p *PhysicalIndexReader) SetSchema(_ *expression.Schema) { if p.indexPlan != nil { p.IndexPlans = flattenPushDownPlan(p.indexPlan) @@ -351,13 +352,13 @@ func (p *PhysicalIndexReader) SetSchema(_ *expression.Schema) { } } -// SetChildren overrides PhysicalPlan SetChildren interface. +// SetChildren overrides op.PhysicalPlan SetChildren interface. func (p *PhysicalIndexReader) SetChildren(children ...PhysicalPlan) { p.indexPlan = children[0] p.SetSchema(nil) } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalIndexReader) ExtractCorrelatedCols() (corCols []*expression.CorrelatedColumn) { for _, child := range p.IndexPlans { corCols = append(corCols, ExtractCorrelatedCols4PhysicalPlan(child)...) @@ -365,7 +366,7 @@ func (p *PhysicalIndexReader) ExtractCorrelatedCols() (corCols []*expression.Cor return corCols } -// BuildPlanTrace implements PhysicalPlan interface. +// BuildPlanTrace implements op.PhysicalPlan interface. func (p *PhysicalIndexReader) BuildPlanTrace() *tracing.PlanTrace { rp := p.basePhysicalPlan.BuildPlanTrace() if p.indexPlan != nil { @@ -374,8 +375,8 @@ func (p *PhysicalIndexReader) BuildPlanTrace() *tracing.PlanTrace { return rp } -func (p *PhysicalIndexReader) appendChildCandidate(op *physicalOptimizeOp) { - p.basePhysicalPlan.appendChildCandidate(op) +func (p *PhysicalIndexReader) AppendChildCandidate(op *coreusage.PhysicalOptimizeOp) { + p.basePhysicalPlan.AppendChildCandidate(op) if p.indexPlan != nil { appendChildCandidate(p, p.indexPlan, op) } @@ -457,7 +458,7 @@ type PhysicalIndexLookUpReader struct { keepOrder bool } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalIndexLookUpReader) Clone() (PhysicalPlan, error) { cloned := new(PhysicalIndexLookUpReader) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) @@ -492,7 +493,7 @@ func (p *PhysicalIndexLookUpReader) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalIndexLookUpReader) ExtractCorrelatedCols() (corCols []*expression.CorrelatedColumn) { for _, child := range p.TablePlans { corCols = append(corCols, ExtractCorrelatedCols4PhysicalPlan(child)...) @@ -513,7 +514,7 @@ func (p *PhysicalIndexLookUpReader) GetAvgTableRowSize() float64 { return cardinality.GetAvgRowSize(p.SCtx(), getTblStats(p.tablePlan), p.tablePlan.Schema().Columns, false, false) } -// BuildPlanTrace implements PhysicalPlan interface. +// BuildPlanTrace implements op.PhysicalPlan interface. func (p *PhysicalIndexLookUpReader) BuildPlanTrace() *tracing.PlanTrace { rp := p.basePhysicalPlan.BuildPlanTrace() if p.indexPlan != nil { @@ -525,8 +526,8 @@ func (p *PhysicalIndexLookUpReader) BuildPlanTrace() *tracing.PlanTrace { return rp } -func (p *PhysicalIndexLookUpReader) appendChildCandidate(op *physicalOptimizeOp) { - p.basePhysicalPlan.appendChildCandidate(op) +func (p *PhysicalIndexLookUpReader) appendChildCandidate(op *coreusage.PhysicalOptimizeOp) { + p.basePhysicalPlan.AppendChildCandidate(op) if p.indexPlan != nil { appendChildCandidate(p, p.indexPlan, op) } @@ -606,7 +607,7 @@ func (p *PhysicalIndexMergeReader) GetAvgTableRowSize() float64 { return cardinality.GetAvgRowSize(p.SCtx(), getTblStats(p.TablePlans[len(p.TablePlans)-1]), p.Schema().Columns, false, false) } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalIndexMergeReader) ExtractCorrelatedCols() (corCols []*expression.CorrelatedColumn) { for _, child := range p.TablePlans { corCols = append(corCols, ExtractCorrelatedCols4PhysicalPlan(child)...) @@ -622,7 +623,7 @@ func (p *PhysicalIndexMergeReader) ExtractCorrelatedCols() (corCols []*expressio return corCols } -// BuildPlanTrace implements PhysicalPlan interface. +// BuildPlanTrace implements op.PhysicalPlan interface. func (p *PhysicalIndexMergeReader) BuildPlanTrace() *tracing.PlanTrace { rp := p.basePhysicalPlan.BuildPlanTrace() if p.tablePlan != nil { @@ -634,8 +635,8 @@ func (p *PhysicalIndexMergeReader) BuildPlanTrace() *tracing.PlanTrace { return rp } -func (p *PhysicalIndexMergeReader) appendChildCandidate(op *physicalOptimizeOp) { - p.basePhysicalPlan.appendChildCandidate(op) +func (p *PhysicalIndexMergeReader) appendChildCandidate(op *coreusage.PhysicalOptimizeOp) { + p.basePhysicalPlan.AppendChildCandidate(op) if p.tablePlan != nil { appendChildCandidate(p, p.tablePlan, op) } @@ -731,7 +732,7 @@ type PhysicalIndexScan struct { usedStatsInfo *stmtctx.UsedStatsInfoForTable } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalIndexScan) Clone() (PhysicalPlan, error) { cloned := new(PhysicalIndexScan) *cloned = *p @@ -759,7 +760,7 @@ func (p *PhysicalIndexScan) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalIndexScan) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.AccessCondition)) for _, expr := range p.AccessCondition { @@ -913,7 +914,7 @@ type PhysicalTableScan struct { maxWaitTimeMs int } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (ts *PhysicalTableScan) Clone() (PhysicalPlan, error) { clonedScan := new(PhysicalTableScan) *clonedScan = *ts @@ -940,7 +941,7 @@ func (ts *PhysicalTableScan) Clone() (PhysicalPlan, error) { return clonedScan, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (ts *PhysicalTableScan) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(ts.AccessCondition)+len(ts.LateMaterializationFilterCondition)) for _, expr := range ts.AccessCondition { @@ -1077,7 +1078,7 @@ type PhysicalProjection struct { AvoidColumnEvaluator bool } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalProjection) Clone() (PhysicalPlan, error) { cloned := new(PhysicalProjection) *cloned = *p @@ -1090,7 +1091,7 @@ func (p *PhysicalProjection) Clone() (PhysicalPlan, error) { return cloned, err } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalProjection) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.Exprs)) for _, expr := range p.Exprs { @@ -1127,7 +1128,7 @@ func (lt *PhysicalTopN) GetPartitionBy() []property.SortItem { return lt.PartitionBy } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (lt *PhysicalTopN) Clone() (PhysicalPlan, error) { cloned := new(PhysicalTopN) *cloned = *lt @@ -1147,7 +1148,7 @@ func (lt *PhysicalTopN) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (lt *PhysicalTopN) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(lt.ByItems)) for _, item := range lt.ByItems { @@ -1181,7 +1182,7 @@ type PhysicalApply struct { OuterSchema []*expression.CorrelatedColumn } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (la *PhysicalApply) Clone() (PhysicalPlan, error) { cloned := new(PhysicalApply) base, err := la.PhysicalHashJoin.Clone() @@ -1198,7 +1199,7 @@ func (la *PhysicalApply) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (la *PhysicalApply) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := la.PhysicalHashJoin.ExtractCorrelatedCols() for i := len(corCols) - 1; i >= 0; i-- { @@ -1276,7 +1277,7 @@ func (p *basePhysicalJoin) cloneWithSelf(newSelf PhysicalPlan) (*basePhysicalJoi return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *basePhysicalJoin) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.LeftConditions)+len(p.RightConditions)+len(p.OtherConditions)) for _, fun := range p.LeftConditions { @@ -1358,7 +1359,7 @@ type PhysicalHashJoin struct { runtimeFilterList []*RuntimeFilter } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalHashJoin) Clone() (PhysicalPlan, error) { cloned := new(PhysicalHashJoin) base, err := p.basePhysicalJoin.cloneWithSelf(cloned) @@ -1381,7 +1382,7 @@ func (p *PhysicalHashJoin) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalHashJoin) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.EqualConditions)+len(p.NAEqualConditions)+len(p.LeftConditions)+len(p.RightConditions)+len(p.OtherConditions)) for _, fun := range p.EqualConditions { @@ -1459,7 +1460,7 @@ func NewPhysicalHashJoin(p *LogicalJoin, innerIdx int, useOuterToBuild bool, new type PhysicalIndexJoin struct { basePhysicalJoin - innerTask task + innerTask Task // Ranges stores the IndexRanges when the inner plan is index scan. Ranges ranger.MutableRanges @@ -1579,7 +1580,7 @@ type PhysicalExchangeReceiver struct { IsCTEReader bool } -// Clone implment PhysicalPlan interface. +// Clone implment op.PhysicalPlan interface. func (p *PhysicalExchangeReceiver) Clone() (PhysicalPlan, error) { np := new(PhysicalExchangeReceiver) base, err := p.basePhysicalPlan.cloneWithSelf(np) @@ -1637,7 +1638,7 @@ func (p PhysicalExpand) Init(ctx PlanContext, stats *property.StatsInfo, offset return &p } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalExpand) Clone() (PhysicalPlan, error) { if len(p.LevelExprs) > 0 { return p.cloneV2() @@ -1706,7 +1707,7 @@ type PhysicalExchangeSender struct { CompressionMode kv.ExchangeCompressionMode } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalExchangeSender) Clone() (PhysicalPlan, error) { np := new(PhysicalExchangeSender) base, err := p.basePhysicalPlan.cloneWithSelf(np) @@ -1734,7 +1735,7 @@ func (p *PhysicalExchangeSender) MemoryUsage() (sum int64) { return } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalMergeJoin) Clone() (PhysicalPlan, error) { cloned := new(PhysicalMergeJoin) base, err := p.basePhysicalJoin.cloneWithSelf(cloned) @@ -1794,7 +1795,7 @@ func (p *PhysicalLimit) GetPartitionBy() []property.SortItem { return p.PartitionBy } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalLimit) Clone() (PhysicalPlan, error) { cloned := new(PhysicalLimit) *cloned = *p @@ -1827,7 +1828,7 @@ type PhysicalUnionAll struct { mpp bool } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalUnionAll) Clone() (PhysicalPlan, error) { cloned := new(PhysicalUnionAll) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) @@ -1927,7 +1928,7 @@ func (p *basePhysicalAgg) getAggFuncCostFactor(isMPP bool) (factor float64) { return } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *basePhysicalAgg) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.GroupByItems)+len(p.AggFuncs)) for _, expr := range p.GroupByItems { @@ -1970,7 +1971,7 @@ func (p *PhysicalHashAgg) getPointer() *basePhysicalAgg { return &p.basePhysicalAgg } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalHashAgg) Clone() (PhysicalPlan, error) { cloned := new(PhysicalHashAgg) base, err := p.basePhysicalAgg.cloneWithSelf(cloned) @@ -2018,7 +2019,7 @@ func (p *PhysicalStreamAgg) getPointer() *basePhysicalAgg { return &p.basePhysicalAgg } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalStreamAgg) Clone() (PhysicalPlan, error) { cloned := new(PhysicalStreamAgg) base, err := p.basePhysicalAgg.cloneWithSelf(cloned) @@ -2048,7 +2049,7 @@ type PhysicalSort struct { IsPartialSort bool } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (ls *PhysicalSort) Clone() (PhysicalPlan, error) { cloned := new(PhysicalSort) cloned.IsPartialSort = ls.IsPartialSort @@ -2063,7 +2064,7 @@ func (ls *PhysicalSort) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (ls *PhysicalSort) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(ls.ByItems)) for _, item := range ls.ByItems { @@ -2121,7 +2122,7 @@ type PhysicalUnionScan struct { HandleCols HandleCols } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalUnionScan) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0) for _, cond := range p.Conditions { @@ -2178,7 +2179,7 @@ type PhysicalSelection struct { // hasRFConditions bool } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalSelection) Clone() (PhysicalPlan, error) { cloned := new(PhysicalSelection) base, err := p.basePhysicalPlan.cloneWithSelf(cloned) @@ -2190,7 +2191,7 @@ func (p *PhysicalSelection) Clone() (PhysicalPlan, error) { return cloned, nil } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalSelection) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.Conditions)) for _, cond := range p.Conditions { @@ -2217,7 +2218,7 @@ type PhysicalMaxOneRow struct { basePhysicalPlan } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalMaxOneRow) Clone() (PhysicalPlan, error) { cloned := new(PhysicalMaxOneRow) base, err := p.basePhysicalPlan.cloneWithSelf(cloned) @@ -2284,7 +2285,7 @@ type PhysicalWindow struct { storeTp kv.StoreType } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalWindow) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := make([]*expression.CorrelatedColumn, 0, len(p.WindowFuncDescs)) for _, windowFunc := range p.WindowFuncDescs { @@ -2307,7 +2308,7 @@ func (p *PhysicalWindow) ExtractCorrelatedCols() []*expression.CorrelatedColumn return corCols } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalWindow) Clone() (PhysicalPlan, error) { cloned := new(PhysicalWindow) *cloned = *p @@ -2419,7 +2420,7 @@ type PhysicalShuffleReceiverStub struct { // Receiver points to `executor.shuffleReceiver`. Receiver unsafe.Pointer - // DataSource is the PhysicalPlan of the Receiver. + // DataSource is the op.PhysicalPlan of the Receiver. DataSource PhysicalPlan } @@ -2504,7 +2505,7 @@ func BuildMergeJoinPlan(ctx PlanContext, joinType JoinType, leftKeys, rightKeys return PhysicalMergeJoin{basePhysicalJoin: baseJoin}.Init(ctx, nil, 0) } -// SafeClone clones this PhysicalPlan and handles its panic. +// SafeClone clones this op.PhysicalPlan and handles its panic. func SafeClone(v PhysicalPlan) (_ PhysicalPlan, err error) { defer func() { if r := recover(); r != nil { @@ -2579,7 +2580,7 @@ type PhysicalCTETable struct { IDForStorage int } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (p *PhysicalCTE) ExtractCorrelatedCols() []*expression.CorrelatedColumn { corCols := ExtractCorrelatedCols4PhysicalPlan(p.SeedPlan) if p.RecurPlan != nil { @@ -2608,7 +2609,7 @@ func (p *PhysicalCTE) ExplainID() fmt.Stringer { }) } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalCTE) Clone() (PhysicalPlan, error) { cloned := new(PhysicalCTE) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) @@ -2751,7 +2752,7 @@ func (p *PhysicalCTEStorage) MemoryUsage() (sum int64) { return } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalCTEStorage) Clone() (PhysicalPlan, error) { cloned, err := (*PhysicalCTE)(p).Clone() if err != nil { @@ -2760,7 +2761,7 @@ func (p *PhysicalCTEStorage) Clone() (PhysicalPlan, error) { return (*PhysicalCTEStorage)(cloned.(*PhysicalCTE)), nil } -func appendChildCandidate(origin PhysicalPlan, pp PhysicalPlan, op *physicalOptimizeOp) { +func appendChildCandidate(origin PhysicalPlan, pp PhysicalPlan, op *coreusage.PhysicalOptimizeOp) { candidate := &tracing.CandidatePlanTrace{ PlanTrace: &tracing.PlanTrace{ ID: pp.ID(), @@ -2769,9 +2770,9 @@ func appendChildCandidate(origin PhysicalPlan, pp PhysicalPlan, op *physicalOpti // TODO: trace the cost }, } - op.tracer.AppendCandidate(candidate) - pp.appendChildCandidate(op) - op.tracer.Candidates[origin.ID()].AppendChildrenID(pp.ID()) + op.AppendCandidate(candidate) + pp.AppendChildCandidate(op) + op.GetTracer().Candidates[origin.ID()].AppendChildrenID(pp.ID()) } // PhysicalSequence is the physical representation of LogicalSequence. Used to mark the CTE producers in the plan tree. @@ -2806,7 +2807,7 @@ func (*PhysicalSequence) ExplainInfo() string { return res } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *PhysicalSequence) Clone() (PhysicalPlan, error) { cloned := new(PhysicalSequence) base, err := p.physicalSchemaProducer.cloneWithSelf(cloned) diff --git a/pkg/planner/core/plan.go b/pkg/planner/core/plan.go index 8b1cff1bd7c75..b4587a2367423 100644 --- a/pkg/planner/core/plan.go +++ b/pkg/planner/core/plan.go @@ -15,7 +15,6 @@ package core import ( - "fmt" "math" "github.com/pingcap/errors" @@ -27,12 +26,12 @@ import ( fd "github.com/pingcap/tidb/pkg/planner/funcdep" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/size" "github.com/pingcap/tidb/pkg/util/tracing" - "github.com/pingcap/tipb/go-tipb" ) // PlanContext is the context for building plan. @@ -47,52 +46,10 @@ func AsSctx(pctx PlanContext) (sessionctx.Context, error) { return sctx, nil } -// Plan is the description of an execution flow. -// It is created from ast.Node first, then optimized by the optimizer, -// finally used by the executor to create a Cursor which executes the statement. -type Plan interface { - // Get the schema. - Schema() *expression.Schema - - // Get the ID. - ID() int - - // TP get the plan type. - TP() string - - // Get the ID in explain statement - ExplainID() fmt.Stringer - - // ExplainInfo returns operator information to be explained. - ExplainInfo() string - - // ReplaceExprColumns replace all the column reference in the plan's expression node. - ReplaceExprColumns(replace map[string]*expression.Column) - - SCtx() PlanContext - - // StatsInfo will return the property.StatsInfo for this plan. - StatsInfo() *property.StatsInfo - - // OutputNames returns the outputting names of each column. - OutputNames() types.NameSlice - - // SetOutputNames sets the outputting name by the given slice. - SetOutputNames(names types.NameSlice) - - // QueryBlockOffset is query block offset. - // For example, in query - // `select /*+ use_index(@sel_2 t2, a) */ * from t1, (select a*2 as b from t2) tx where a>b` - // the hint should be applied on the sub-query, whose query block is 2. - QueryBlockOffset() int - - BuildPlanTrace() *tracing.PlanTrace -} - -func enforceProperty(p *property.PhysicalProperty, tsk task, ctx PlanContext) task { +func enforceProperty(p *property.PhysicalProperty, tsk Task, ctx PlanContext) Task { if p.TaskTp == property.MppTaskType { mpp, ok := tsk.(*mppTask) - if !ok || mpp.invalid() { + if !ok || mpp.Invalid() { return invalidTask } if !p.IsSortItemAllForPartition() { @@ -103,17 +60,17 @@ func enforceProperty(p *property.PhysicalProperty, tsk task, ctx PlanContext) ta } // when task is double cop task warping a index merge reader, tsk.plan() may be nil when indexPlanFinished is marked // as false, while the real plan is in idxMergePartPlans. tsk.plan()==nil is not right here. - if p.IsSortItemEmpty() || tsk.invalid() { + if p.IsSortItemEmpty() || tsk.Invalid() { return tsk } if p.TaskTp != property.MppTaskType { - tsk = tsk.convertToRootTask(ctx) + tsk = tsk.ConvertToRootTask(ctx) } sortReqProp := &property.PhysicalProperty{TaskTp: property.RootTaskType, SortItems: p.SortItems, ExpectedCnt: math.MaxFloat64} sort := PhysicalSort{ ByItems: make([]*util.ByItems, 0, len(p.SortItems)), IsPartialSort: p.IsSortItemAllForPartition(), - }.Init(ctx, tsk.plan().StatsInfo(), tsk.plan().QueryBlockOffset(), sortReqProp) + }.Init(ctx, tsk.Plan().StatsInfo(), tsk.Plan().QueryBlockOffset(), sortReqProp) for _, col := range p.SortItems { sort.ByItems = append(sort.ByItems, &util.ByItems{Expr: col.Col, Desc: col.Desc}) } @@ -121,23 +78,23 @@ func enforceProperty(p *property.PhysicalProperty, tsk task, ctx PlanContext) ta } // optimizeByShuffle insert `PhysicalShuffle` to optimize performance by running in a parallel manner. -func optimizeByShuffle(tsk task, ctx PlanContext) task { - if tsk.plan() == nil { +func optimizeByShuffle(tsk Task, ctx PlanContext) Task { + if tsk.Plan() == nil { return tsk } - switch p := tsk.plan().(type) { + switch p := tsk.Plan().(type) { case *PhysicalWindow: if shuffle := optimizeByShuffle4Window(p, ctx); shuffle != nil { - return shuffle.attach2Task(tsk) + return shuffle.Attach2Task(tsk) } case *PhysicalMergeJoin: if shuffle := optimizeByShuffle4MergeJoin(p, ctx); shuffle != nil { - return shuffle.attach2Task(tsk) + return shuffle.Attach2Task(tsk) } case *PhysicalStreamAgg: if shuffle := optimizeByShuffle4StreamAgg(p, ctx); shuffle != nil { - return shuffle.attach2Task(tsk) + return shuffle.Attach2Task(tsk) } } return tsk @@ -270,10 +227,10 @@ type LogicalPlan interface { // PredicatePushDown pushes down the predicates in the where/on/having clauses as deeply as possible. // It will accept a predicate that is an expression slice, and return the expressions that can't be pushed. // Because it might change the root if the having clause exists, we need to return a plan that represents a new root. - PredicatePushDown([]expression.Expression, *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) + PredicatePushDown([]expression.Expression, *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) // PruneColumns prunes the unused columns, and return the new logical plan if changed, otherwise it's same. - PruneColumns([]*expression.Column, *util.LogicalOptimizeOp) (LogicalPlan, error) + PruneColumns([]*expression.Column, *coreusage.LogicalOptimizeOp) (LogicalPlan, error) // findBestTask converts the logical plan to the physical plan. It's a new interface. // It is called recursively from the parent to the children to create the result physical plan. @@ -283,7 +240,7 @@ type LogicalPlan interface { // If planCounter > 0, the clock_th plan generated in this function will be returned. // If planCounter = 0, the plan generated in this function will not be considered. // If planCounter = -1, then we will not force plan. - findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, op *physicalOptimizeOp) (task, int64, error) + findBestTask(prop *property.PhysicalProperty, planCounter *PlanCounterTp, op *coreusage.PhysicalOptimizeOp) (Task, int64, error) // BuildKeyInfo will collect the information of unique keys into schema. // Because this method is also used in cascades planner, we cannot use @@ -292,16 +249,16 @@ type LogicalPlan interface { BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) // pushDownTopN will push down the topN or limit operator during logical optimization. - pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan + pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan // deriveTopN derives an implicit TopN from a filter on row_number window function.. - deriveTopN(opt *util.LogicalOptimizeOp) LogicalPlan + deriveTopN(opt *coreusage.LogicalOptimizeOp) LogicalPlan // predicateSimplification consolidates different predcicates on a column and its equivalence classes. - predicateSimplification(opt *util.LogicalOptimizeOp) LogicalPlan + predicateSimplification(opt *coreusage.LogicalOptimizeOp) LogicalPlan // constantPropagation generate new constant predicate according to column equivalence relation - constantPropagation(parentPlan LogicalPlan, currentChildIdx int, opt *util.LogicalOptimizeOp) (newRoot LogicalPlan) + constantPropagation(parentPlan LogicalPlan, currentChildIdx int, opt *coreusage.LogicalOptimizeOp) (newRoot LogicalPlan) // pullUpConstantPredicates recursive find constant predicate, used for the constant propagation rule pullUpConstantPredicates() []expression.Expression @@ -325,7 +282,7 @@ type LogicalPlan interface { // valid, but the ordered indices in leaf plan is limited. So we can get all possible order properties by a pre-walking. PreparePossibleProperties(schema *expression.Schema, childrenProperties ...[][]*expression.Column) [][]*expression.Column - // exhaustPhysicalPlans generates all possible plans that can match the required property. + // exhaustop.PhysicalPlans generates all possible plans that can match the required property. // It will return: // 1. All possible plans that can match the required property. // 2. Whether the SQL hint can work. Return true if there is no hint. @@ -356,120 +313,10 @@ type LogicalPlan interface { ExtractFD() *fd.FDSet } -// PhysicalPlan is a tree of the physical operators. -type PhysicalPlan interface { - Plan - - // getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost on model ver1. - getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) - - // getPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost on model ver2. - getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) - - // attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of - // current task. If the child's task is cop task, some operator may close this task and return a new rootTask. - attach2Task(...task) task - - // ToPB converts physical plan to tipb executor. - ToPB(ctx PlanContext, storeType kv.StoreType) (*tipb.Executor, error) - - // GetChildReqProps gets the required property by child index. - GetChildReqProps(idx int) *property.PhysicalProperty - - // StatsCount returns the count of property.StatsInfo for this plan. - StatsCount() float64 - - // ExtractCorrelatedCols extracts correlated columns inside the PhysicalPlan. - ExtractCorrelatedCols() []*expression.CorrelatedColumn - - // Children get all the children. - Children() []PhysicalPlan - - // SetChildren sets the children for the plan. - SetChildren(...PhysicalPlan) - - // SetChild sets the ith child for the plan. - SetChild(i int, child PhysicalPlan) - - // ResolveIndices resolves the indices for columns. After doing this, the columns can evaluate the rows by their indices. - ResolveIndices() error - - // StatsInfo returns the StatsInfo of the plan. - StatsInfo() *property.StatsInfo - - // SetStats sets basePlan.stats inside the basePhysicalPlan. - SetStats(s *property.StatsInfo) - - // ExplainNormalizedInfo returns operator normalized information for generating digest. - ExplainNormalizedInfo() string - - // Clone clones this physical plan. - Clone() (PhysicalPlan, error) - - // appendChildCandidate append child physicalPlan into tracer in order to track each child physicalPlan which can't - // be tracked during findBestTask or enumeratePhysicalPlans4Task - appendChildCandidate(op *physicalOptimizeOp) - - // MemoryUsage return the memory usage of PhysicalPlan - MemoryUsage() int64 - - // Below three methods help to handle the inconsistency between row count in the StatsInfo and the recorded - // actual row count. - // For the children in the inner side (probe side) of Index Join and Apply, the row count in the StatsInfo - // means the estimated row count for a single "probe", but the recorded actual row count is the total row - // count for all "probes". - // To handle this inconsistency without breaking anything else, we added a field `probeParents` of - // type `[]PhysicalPlan` into all PhysicalPlan to record all operators that are (indirect or direct) parents - // of this PhysicalPlan and will cause this inconsistency. - // Using this information, we can convert the row count between the "single probe" row count and "all probes" - // row count freely. - - // setProbeParents sets the above stated `probeParents` field. - setProbeParents([]PhysicalPlan) - // getEstRowCountForDisplay uses the "single probe" row count in StatsInfo and the probeParents to calculate - // the "all probe" row count. - // All places that display the row count for a PhysicalPlan are expected to use this method. - getEstRowCountForDisplay() float64 - // getEstRowCountForDisplay uses the runtime stats and the probeParents to calculate the actual "probe" count. - getActualProbeCnt(*execdetails.RuntimeStatsColl) int64 -} - -// NewDefaultPlanCostOption returns PlanCostOption -func NewDefaultPlanCostOption() *PlanCostOption { - return &PlanCostOption{} -} - -// PlanCostOption indicates option during GetPlanCost -type PlanCostOption struct { - CostFlag uint64 - tracer *physicalOptimizeOp -} - -// WithCostFlag set costflag -func (op *PlanCostOption) WithCostFlag(flag uint64) *PlanCostOption { - if op == nil { - return nil - } - op.CostFlag = flag - return op -} - -// WithOptimizeTracer set tracer -func (op *PlanCostOption) WithOptimizeTracer(v *physicalOptimizeOp) *PlanCostOption { - if op == nil { - return nil - } - op.tracer = v - if v != nil && v.tracer != nil { - op.CostFlag |= CostFlagTrace - } - return op -} - type baseLogicalPlan struct { base.Plan - taskMap map[string]task + taskMap map[string]Task // taskMapBak forms a backlog stack of taskMap, used to roll back the taskMap. taskMapBak []string // taskMapBakTS stores the timestamps of logs. @@ -551,10 +398,10 @@ type basePhysicalPlan struct { // used by the new cost interface planCostInit bool planCost float64 - planCostVer2 costVer2 + planCostVer2 coreusage.CostVer2 // probeParents records the IndexJoins and Applys with this operator in their inner children. - // Please see comments in PhysicalPlan for details. + // Please see comments in op.PhysicalPlan for details. probeParents []PhysicalPlan // Only for MPP. If TiFlashFineGrainedShuffleStreamCount > 0: @@ -586,7 +433,7 @@ func (p *basePhysicalPlan) cloneWithSelf(newSelf PhysicalPlan) (*basePhysicalPla return base, nil } -// Clone implements PhysicalPlan interface. +// Clone implements op.PhysicalPlan interface. func (p *basePhysicalPlan) Clone() (PhysicalPlan, error) { return nil, errors.Errorf("%T doesn't support cloning", p.self) } @@ -596,7 +443,7 @@ func (*basePhysicalPlan) ExplainInfo() string { return "" } -// ExplainNormalizedInfo implements PhysicalPlan interface. +// ExplainNormalizedInfo implements op.PhysicalPlan interface. func (*basePhysicalPlan) ExplainNormalizedInfo() string { return "" } @@ -605,12 +452,12 @@ func (p *basePhysicalPlan) GetChildReqProps(idx int) *property.PhysicalProperty return p.childrenReqProps[idx] } -// ExtractCorrelatedCols implements PhysicalPlan interface. +// ExtractCorrelatedCols implements op.PhysicalPlan interface. func (*basePhysicalPlan) ExtractCorrelatedCols() []*expression.CorrelatedColumn { return nil } -// MemoryUsage return the memory usage of basePhysicalPlan +// MemoryUsage return the memory usage of baseop.PhysicalPlan func (p *basePhysicalPlan) MemoryUsage() (sum int64) { if p == nil { return @@ -629,21 +476,21 @@ func (p *basePhysicalPlan) MemoryUsage() (sum int64) { return } -func (p *basePhysicalPlan) getEstRowCountForDisplay() float64 { +func (p *basePhysicalPlan) GetEstRowCountForDisplay() float64 { if p == nil { return 0 } return p.StatsInfo().RowCount * getEstimatedProbeCntFromProbeParents(p.probeParents) } -func (p *basePhysicalPlan) getActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { +func (p *basePhysicalPlan) GetActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { if p == nil { return 1 } return getActualProbeCntFromProbeParents(p.probeParents, statsColl) } -func (p *basePhysicalPlan) setProbeParents(probeParents []PhysicalPlan) { +func (p *basePhysicalPlan) SetProbeParents(probeParents []PhysicalPlan) { p.probeParents = probeParents } @@ -681,12 +528,12 @@ func (p *baseLogicalPlan) rollBackTaskMap(ts uint64) { } } -func (p *baseLogicalPlan) getTask(prop *property.PhysicalProperty) task { +func (p *baseLogicalPlan) getTask(prop *property.PhysicalProperty) Task { key := prop.HashCode() return p.taskMap[string(key)] } -func (p *baseLogicalPlan) storeTask(prop *property.PhysicalProperty, task task) { +func (p *baseLogicalPlan) storeTask(prop *property.PhysicalProperty, task Task) { key := prop.HashCode() if p.SCtx().GetSessionVars().StmtCtx.StmtHints.TaskMapNeedBackUp() { // Empty string for useless change. @@ -755,7 +602,7 @@ func (p *logicalSchemaProducer) BuildKeyInfo(selfSchema *expression.Schema, chil func newBaseLogicalPlan(ctx PlanContext, tp string, self LogicalPlan, qbOffset int) baseLogicalPlan { return baseLogicalPlan{ - taskMap: make(map[string]task), + taskMap: make(map[string]Task), taskMapBak: make([]string, 0, 10), taskMapBakTS: make([]uint64, 0, 10), Plan: base.NewBasePlan(ctx, tp, qbOffset), @@ -775,7 +622,7 @@ func (*baseLogicalPlan) ExtractCorrelatedCols() []*expression.CorrelatedColumn { } // PruneColumns implements LogicalPlan interface. -func (p *baseLogicalPlan) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *baseLogicalPlan) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { if len(p.children) == 0 { return p.self, nil } @@ -810,7 +657,7 @@ func (p *baseLogicalPlan) Children() []LogicalPlan { return p.children } -// Children implements PhysicalPlan Children interface. +// Children implements op.PhysicalPlan Children interface. func (p *basePhysicalPlan) Children() []PhysicalPlan { return p.children } @@ -820,7 +667,7 @@ func (p *baseLogicalPlan) SetChildren(children ...LogicalPlan) { p.children = children } -// SetChildren implements PhysicalPlan SetChildren interface. +// SetChildren implements op.PhysicalPlan SetChildren interface. func (p *basePhysicalPlan) SetChildren(children ...PhysicalPlan) { p.children = children } @@ -830,7 +677,7 @@ func (p *baseLogicalPlan) SetChild(i int, child LogicalPlan) { p.children[i] = child } -// SetChild implements PhysicalPlan SetChild interface. +// SetChild implements op.PhysicalPlan SetChild interface. func (p *basePhysicalPlan) SetChild(i int, child PhysicalPlan) { p.children[i] = child } @@ -860,7 +707,7 @@ func (p *baseLogicalPlan) BuildPlanTrace() *tracing.PlanTrace { return planTrace } -func (p *basePhysicalPlan) appendChildCandidate(op *physicalOptimizeOp) { +func (p *basePhysicalPlan) AppendChildCandidate(op *coreusage.PhysicalOptimizeOp) { if len(p.Children()) < 1 { return } @@ -870,9 +717,9 @@ func (p *basePhysicalPlan) appendChildCandidate(op *physicalOptimizeOp) { PlanTrace: &tracing.PlanTrace{TP: child.TP(), ID: child.ID(), ExplainInfo: child.ExplainInfo()}, } - op.tracer.AppendCandidate(childCandidate) - child.appendChildCandidate(op) + op.AppendCandidate(childCandidate) + child.AppendChildCandidate(op) childrenID = append(childrenID, child.ID()) } - op.tracer.Candidates[p.ID()].PlanTrace.AppendChildrenID(childrenID...) + op.GetTracer().Candidates[p.ID()].PlanTrace.AppendChildrenID(childrenID...) } diff --git a/pkg/planner/core/plan_base.go b/pkg/planner/core/plan_base.go new file mode 100644 index 0000000000000..0f109953480bb --- /dev/null +++ b/pkg/planner/core/plan_base.go @@ -0,0 +1,133 @@ +package core + +import ( + "fmt" + "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/planner/property" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util/execdetails" + "github.com/pingcap/tidb/pkg/util/tracing" + "github.com/pingcap/tipb/go-tipb" +) + +// Plan is the description of an execution flow. +// It is created from ast.Node first, then optimized by the optimizer, +// finally used by the executor to create a Cursor which executes the statement. +type Plan interface { + // Get the schema. + Schema() *expression.Schema + + // Get the ID. + ID() int + + // TP get the plan type. + TP() string + + // Get the ID in explain statement + ExplainID() fmt.Stringer + + // ExplainInfo returns operator information to be explained. + ExplainInfo() string + + // ReplaceExprColumns replace all the column reference in the plan's expression node. + ReplaceExprColumns(replace map[string]*expression.Column) + + SCtx() PlanContext + + // StatsInfo will return the property.StatsInfo for this plan. + StatsInfo() *property.StatsInfo + + // OutputNames returns the outputting names of each column. + OutputNames() types.NameSlice + + // SetOutputNames sets the outputting name by the given slice. + SetOutputNames(names types.NameSlice) + + // QueryBlockOffset is query block offset. + // For example, in query + // `select /*+ use_index(@sel_2 t2, a) */ * from t1, (select a*2 as b from t2) tx where a>b` + // the hint should be applied on the sub-query, whose query block is 2. + QueryBlockOffset() int + + BuildPlanTrace() *tracing.PlanTrace +} + +// PhysicalPlan is a tree of the physical operators. +type PhysicalPlan interface { + Plan + + // GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost on model ver1. + GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) + + // GetPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost on model ver2. + GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) + + // Attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of + // current task. If the child's task is cop task, some operator may close this task and return a new rootTask. + Attach2Task(...Task) Task + + // ToPB converts physical plan to tipb executor. + ToPB(ctx PlanContext, storeType kv.StoreType) (*tipb.Executor, error) + + // GetChildReqProps gets the required property by child index. + GetChildReqProps(idx int) *property.PhysicalProperty + + // StatsCount returns the count of property.StatsInfo for this plan. + StatsCount() float64 + + // ExtractCorrelatedCols extracts correlated columns inside the PhysicalPlan. + ExtractCorrelatedCols() []*expression.CorrelatedColumn + + // Children get all the children. + Children() []PhysicalPlan + + // SetChildren sets the children for the plan. + SetChildren(...PhysicalPlan) + + // SetChild sets the ith child for the plan. + SetChild(i int, child PhysicalPlan) + + // ResolveIndices resolves the indices for columns. After doing this, the columns can evaluate the rows by their indices. + ResolveIndices() error + + // StatsInfo returns the StatsInfo of the plan. + StatsInfo() *property.StatsInfo + + // SetStats sets basePlan.stats inside the basePhysicalPlan. + SetStats(s *property.StatsInfo) + + // ExplainNormalizedInfo returns operator normalized information for generating digest. + ExplainNormalizedInfo() string + + // Clone clones this physical plan. + Clone() (PhysicalPlan, error) + + // AppendChildCandidate append child physicalPlan into tracer in order to track each child physicalPlan which can't + // be tracked during findBestTask or enumeratePhysicalPlans4Task + AppendChildCandidate(op *coreusage.PhysicalOptimizeOp) + + // MemoryUsage return the memory usage of PhysicalPlan + MemoryUsage() int64 + + // Below three methods help to handle the inconsistency between row count in the StatsInfo and the recorded + // actual row count. + // For the children in the inner side (probe side) of Index Join and Apply, the row count in the StatsInfo + // means the estimated row count for a single "probe", but the recorded actual row count is the total row + // count for all "probes". + // To handle this inconsistency without breaking anything else, we added a field `probeParents` of + // type `[]PhysicalPlan` into all PhysicalPlan to record all operators that are (indirect or direct) parents + // of this PhysicalPlan and will cause this inconsistency. + // Using this information, we can convert the row count between the "single probe" row count and "all probes" + // row count freely. + + // SetProbeParents sets the above stated `probeParents` field. + SetProbeParents([]PhysicalPlan) + // GetEstRowCountForDisplay uses the "single probe" row count in StatsInfo and the probeParents to calculate + // the "all probe" row count. + // All places that display the row count for a PhysicalPlan are expected to use this method. + GetEstRowCountForDisplay() float64 + // GetActualProbeCnt uses the runtime stats and the probeParents to calculate the actual "probe" count. + GetActualProbeCnt(*execdetails.RuntimeStatsColl) int64 +} diff --git a/pkg/planner/core/plan_cache.go b/pkg/planner/core/plan_cache.go index 51117b0456eae..c40186af81acb 100644 --- a/pkg/planner/core/plan_cache.go +++ b/pkg/planner/core/plan_cache.go @@ -16,7 +16,6 @@ package core import ( "context" - "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/bindinfo" "github.com/pingcap/tidb/pkg/domain" diff --git a/pkg/planner/core/plan_cost_detail.go b/pkg/planner/core/plan_cost_detail.go index 8ba5c98ac0b61..5d17b614b6610 100644 --- a/pkg/planner/core/plan_cost_detail.go +++ b/pkg/planner/core/plan_cost_detail.go @@ -16,6 +16,7 @@ package core import ( "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/util/tracing" @@ -87,7 +88,7 @@ const ( MemQuotaLbl = "memQuota" ) -func setPointGetPlanCostDetail(p *PointGetPlan, opt *physicalOptimizeOp, +func setPointGetPlanCostDetail(p *PointGetPlan, opt *coreusage.PhysicalOptimizeOp, rowSize, networkFactor, seekFactor float64) { if opt == nil { return @@ -97,10 +98,10 @@ func setPointGetPlanCostDetail(p *PointGetPlan, opt *physicalOptimizeOp, AddParam(NetworkFactorLbl, networkFactor). AddParam(SeekFactorLbl, seekFactor). SetDesc(fmt.Sprintf("%s*%s+%s", RowSizeLbl, NetworkFactorLbl, SeekFactorLbl)) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } -func setBatchPointGetPlanCostDetail(p *BatchPointGetPlan, opt *physicalOptimizeOp, +func setBatchPointGetPlanCostDetail(p *BatchPointGetPlan, opt *coreusage.PhysicalOptimizeOp, rowCount, rowSize, networkFactor, seekFactor float64, scanConcurrency int) { if opt == nil { return @@ -113,10 +114,10 @@ func setBatchPointGetPlanCostDetail(p *BatchPointGetPlan, opt *physicalOptimizeO AddParam(ScanConcurrencyLbl, scanConcurrency). SetDesc(fmt.Sprintf("(%s*%s*%s+%s*%s)/%s", RowCountLbl, RowSizeLbl, NetworkFactorLbl, RowCountLbl, SeekFactorLbl, ScanConcurrencyLbl)) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } -func setPhysicalTableOrIndexScanCostDetail(p PhysicalPlan, opt *physicalOptimizeOp, +func setPhysicalTableOrIndexScanCostDetail(p PhysicalPlan, opt *coreusage.PhysicalOptimizeOp, rowCount, rowSize, scanFactor float64, costModelVersion int) { if opt == nil { return @@ -137,10 +138,10 @@ func setPhysicalTableOrIndexScanCostDetail(p PhysicalPlan, opt *physicalOptimize desc = fmt.Sprintf("%s*log2(%s)*%s", RowCountLbl, RowSizeLbl, ScanFactorLbl) } detail.SetDesc(desc) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } -func setPhysicalTableReaderCostDetail(p *PhysicalTableReader, opt *physicalOptimizeOp, +func setPhysicalTableReaderCostDetail(p *PhysicalTableReader, opt *coreusage.PhysicalOptimizeOp, rowCount, rowSize, networkFactor, netSeekCost, tablePlanCost float64, scanConcurrency int, storeType kv.StoreType) { // tracer haven't support non tikv plan for now @@ -156,10 +157,10 @@ func setPhysicalTableReaderCostDetail(p *PhysicalTableReader, opt *physicalOptim AddParam(ScanConcurrencyLbl, scanConcurrency) detail.SetDesc(fmt.Sprintf("(%s+%s*%s*%s+%s)/%s", TablePlanCostLbl, RowCountLbl, RowSizeLbl, NetworkFactorLbl, NetSeekCostLbl, ScanConcurrencyLbl)) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } -func setPhysicalIndexReaderCostDetail(p *PhysicalIndexReader, opt *physicalOptimizeOp, +func setPhysicalIndexReaderCostDetail(p *PhysicalIndexReader, opt *coreusage.PhysicalOptimizeOp, rowCount, rowSize, networkFactor, netSeekCost, indexPlanCost float64, scanConcurrency int) { if opt == nil { @@ -174,10 +175,10 @@ func setPhysicalIndexReaderCostDetail(p *PhysicalIndexReader, opt *physicalOptim AddParam(ScanConcurrencyLbl, scanConcurrency) detail.SetDesc(fmt.Sprintf("(%s+%s*%s*%s+%s)/%s", IndexPlanCostLbl, RowCountLbl, RowSizeLbl, NetworkFactorLbl, NetSeekCostLbl, ScanConcurrencyLbl)) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } -func setPhysicalHashJoinCostDetail(p *PhysicalHashJoin, opt *physicalOptimizeOp, spill bool, +func setPhysicalHashJoinCostDetail(p *PhysicalHashJoin, opt *coreusage.PhysicalOptimizeOp, spill bool, buildCnt, probeCnt, cpuFactor, rowSize, numPairs, cpuCost, probeCPUCost, memCost, diskCost, probeDiskCost, diskFactor, memoryFactor, concurrencyFactor float64, @@ -238,7 +239,7 @@ func setPhysicalHashJoinCostDetail(p *PhysicalHashJoin, opt *physicalOptimizeOp, AddParam(ProbeDiskCostDescLbl, diskCostDetail.probeDesc()) detail.SetDesc(fmt.Sprintf("%s+%s+%s+all children cost", CPUCostDetailLbl, MemCostDetailLbl, DiskCostDetailLbl)) - opt.appendPlanCostDetail(detail) + appendPlanCostDetail4PhysicalOptimizeOp(opt, detail) } // HashJoinProbeCostDetail indicates probe cpu cost detail diff --git a/pkg/planner/core/plan_cost_ver1.go b/pkg/planner/core/plan_cost_ver1.go index a0c7fdf80da26..b49a07854616b 100644 --- a/pkg/planner/core/plan_cost_ver1.go +++ b/pkg/planner/core/plan_cost_ver1.go @@ -15,6 +15,7 @@ package core import ( + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "math" "github.com/pingcap/errors" @@ -27,17 +28,6 @@ import ( "github.com/pingcap/tidb/pkg/util/paging" ) -const ( - // CostFlagRecalculate indicates the optimizer to ignore cached cost and recalculate it again. - CostFlagRecalculate uint64 = 1 << iota - - // CostFlagUseTrueCardinality indicates the optimizer to use true cardinality to calculate the cost. - CostFlagUseTrueCardinality - - // CostFlagTrace indicates whether to trace the cost calculation. - CostFlagTrace -) - const ( modelVer1 = 1 modelVer2 = 2 @@ -47,16 +37,16 @@ func hasCostFlag(costFlag, flag uint64) bool { return (costFlag & flag) > 0 } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *basePhysicalPlan) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *basePhysicalPlan) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { // just calculate the cost once and always reuse it return p.planCost, nil } p.planCost = 0 // the default implementation, the operator have no cost for _, child := range p.children { - childCost, err := child.getPlanCostVer1(taskType, option) + childCost, err := child.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -66,10 +56,10 @@ func (p *basePhysicalPlan) getPlanCostVer1(taskType property.TaskType, option *P return p.planCost, nil } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalSelection) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalSelection) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } @@ -88,7 +78,7 @@ func (p *PhysicalSelection) getPlanCostVer1(taskType property.TaskType, option * selfCost = 0 // for compatibility, see https://github.com/pingcap/tidb/issues/36243 } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -110,13 +100,13 @@ func (p *PhysicalProjection) GetCost(count float64) float64 { return cpuCost + concurrencyCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalProjection) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalProjection) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -172,17 +162,17 @@ func (p *PhysicalIndexLookUpReader) GetCost(costFlag uint64) (cost float64) { return } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexLookUpReader) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexLookUpReader) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } p.planCost = 0 // child's cost for _, child := range []PhysicalPlan{p.indexPlan, p.tablePlan} { - childCost, err := child.getPlanCostVer1(property.CopMultiReadTaskType, option) + childCost, err := child.GetPlanCostVer1(property.CopMultiReadTaskType, option) if err != nil { return 0, err } @@ -196,7 +186,7 @@ func (p *PhysicalIndexLookUpReader) getPlanCostVer1(_ property.TaskType, option tmp = tmp.Children()[0] } ts := tmp.(*PhysicalTableScan) - tblCost, err := ts.getPlanCostVer1(property.CopMultiReadTaskType, option) + tblCost, err := ts.GetPlanCostVer1(property.CopMultiReadTaskType, option) if err != nil { return 0, err } @@ -227,17 +217,17 @@ func (p *PhysicalIndexLookUpReader) getPlanCostVer1(_ property.TaskType, option return p.planCost, nil } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexReader) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexReader) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } var rowCount, rowSize, netFactor, indexPlanCost, netSeekCost float64 sqlScanConcurrency := p.SCtx().GetSessionVars().DistSQLScanConcurrency() // child's cost - childCost, err := p.indexPlan.getPlanCostVer1(property.CopSingleReadTaskType, option) + childCost, err := p.indexPlan.GetPlanCostVer1(property.CopSingleReadTaskType, option) if err != nil { return 0, err } @@ -255,8 +245,8 @@ func (p *PhysicalIndexReader) getPlanCostVer1(_ property.TaskType, option *PlanC // consider concurrency p.planCost /= float64(sqlScanConcurrency) - if option.tracer != nil { - setPhysicalIndexReaderCostDetail(p, option.tracer, rowCount, rowSize, netFactor, netSeekCost, indexPlanCost, sqlScanConcurrency) + if option.GetTracer() != nil { + setPhysicalIndexReaderCostDetail(p, option.GetTracer(), rowCount, rowSize, netFactor, netSeekCost, indexPlanCost, sqlScanConcurrency) } p.planCostInit = true return p.planCost, nil @@ -269,10 +259,10 @@ func (p *PhysicalIndexReader) GetNetDataSize() float64 { return p.indexPlan.StatsCount() * rowSize } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalTableReader) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalTableReader) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } @@ -284,7 +274,7 @@ func (p *PhysicalTableReader) getPlanCostVer1(_ property.TaskType, option *PlanC switch storeType { case kv.TiKV: // child's cost - childCost, err := p.tablePlan.getPlanCostVer1(property.CopSingleReadTaskType, option) + childCost, err := p.tablePlan.GetPlanCostVer1(property.CopSingleReadTaskType, option) if err != nil { return 0, err } @@ -307,7 +297,7 @@ func (p *PhysicalTableReader) getPlanCostVer1(_ property.TaskType, option *PlanC concurrency = p.SCtx().GetSessionVars().CopTiFlashConcurrencyFactor rowSize = collectRowSizeFromMPPPlan(p.tablePlan) seekCost = accumulateNetSeekCost4MPP(p.tablePlan) - childCost, err := p.tablePlan.getPlanCostVer1(property.MppTaskType, option) + childCost, err := p.tablePlan.GetPlanCostVer1(property.MppTaskType, option) if err != nil { return 0, err } @@ -318,7 +308,7 @@ func (p *PhysicalTableReader) getPlanCostVer1(_ property.TaskType, option *PlanC rowSize = cardinality.GetAvgRowSize(p.SCtx(), getTblStats(p.tablePlan), p.tablePlan.Schema().Columns, false, false) seekCost = estimateNetSeekCost(p.tablePlan) tType := property.CopSingleReadTaskType - childCost, err := p.tablePlan.getPlanCostVer1(tType, option) + childCost, err := p.tablePlan.GetPlanCostVer1(tType, option) if err != nil { return 0, err } @@ -333,12 +323,12 @@ func (p *PhysicalTableReader) getPlanCostVer1(_ property.TaskType, option *PlanC p.planCost /= concurrency // consider tidb_enforce_mpp if isMPP && p.SCtx().GetSessionVars().IsMPPEnforced() && - !hasCostFlag(costFlag, CostFlagRecalculate) { // show the real cost in explain-statements + !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { // show the real cost in explain-statements p.planCost /= 1000000000 } } - if option.tracer != nil { - setPhysicalTableReaderCostDetail(p, option.tracer, + if option.GetTracer() != nil { + setPhysicalTableReaderCostDetail(p, option.GetTracer(), rowCount, rowSize, netFactor, netSeekCost, tableCost, sqlScanConcurrency, storeType) } @@ -352,16 +342,16 @@ func (p *PhysicalTableReader) GetNetDataSize() float64 { return p.tablePlan.StatsCount() * rowSize } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexMergeReader) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexMergeReader) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } p.planCost = 0 if tblScan := p.tablePlan; tblScan != nil { - childCost, err := tblScan.getPlanCostVer1(property.CopSingleReadTaskType, option) + childCost, err := tblScan.GetPlanCostVer1(property.CopSingleReadTaskType, option) if err != nil { return 0, err } @@ -372,7 +362,7 @@ func (p *PhysicalIndexMergeReader) getPlanCostVer1(_ property.TaskType, option * p.planCost += getCardinality(tblScan, costFlag) * rowSize * netFactor // net I/O cost } for _, partialScan := range p.partialPlans { - childCost, err := partialScan.getPlanCostVer1(property.CopSingleReadTaskType, option) + childCost, err := partialScan.GetPlanCostVer1(property.CopSingleReadTaskType, option) if err != nil { return 0, err } @@ -421,10 +411,10 @@ func (p *PhysicalIndexMergeReader) GetPartialReaderNetDataSize(plan PhysicalPlan return plan.StatsCount() * cardinality.GetAvgRowSize(p.SCtx(), getTblStats(plan), plan.Schema().Columns, isIdxScan, false) } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalTableScan) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalTableScan) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } @@ -438,18 +428,18 @@ func (p *PhysicalTableScan) getPlanCostVer1(_ property.TaskType, option *PlanCos rowCount = getCardinality(p, costFlag) rowSize = p.getScanRowSize() selfCost = rowCount * rowSize * scanFactor - if option.tracer != nil { - setPhysicalTableOrIndexScanCostDetail(p, option.tracer, rowCount, rowSize, scanFactor, costModelVersion) + if option.GetTracer() != nil { + setPhysicalTableOrIndexScanCostDetail(p, option.GetTracer(), rowCount, rowSize, scanFactor, costModelVersion) } p.planCost = selfCost p.planCostInit = true return p.planCost, nil } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexScan) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexScan) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } @@ -463,8 +453,8 @@ func (p *PhysicalIndexScan) getPlanCostVer1(_ property.TaskType, option *PlanCos rowCount = getCardinality(p, costFlag) rowSize = p.getScanRowSize() selfCost = rowCount * rowSize * scanFactor - if option.tracer != nil { - setPhysicalTableOrIndexScanCostDetail(p, option.tracer, rowCount, rowSize, scanFactor, costModelVersion) + if option.GetTracer() != nil { + setPhysicalTableOrIndexScanCostDetail(p, option.GetTracer(), rowCount, rowSize, scanFactor, costModelVersion) } p.planCost = selfCost p.planCostInit = true @@ -509,7 +499,7 @@ func (p *PhysicalIndexJoin) GetCost(outerCnt, innerCnt, outerCost, innerCost flo numPairs = 0 } } - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { numPairs = getOperatorActRows(p) } probeCost := numPairs * sessVars.GetCPUFactor() @@ -531,24 +521,24 @@ func (p *PhysicalIndexJoin) GetCost(outerCnt, innerCnt, outerCost, innerCost flo return outerCost + innerPlanCost + cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexJoin) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexJoin) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } outerChild, innerChild := p.children[1-p.InnerChildIdx], p.children[p.InnerChildIdx] - outerCost, err := outerChild.getPlanCostVer1(taskType, option) + outerCost, err := outerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } - innerCost, err := innerChild.getPlanCostVer1(taskType, option) + innerCost, err := innerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } outerCnt := getCardinality(outerChild, costFlag) innerCnt := getCardinality(innerChild, costFlag) - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) && outerCnt > 0 { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) && outerCnt > 0 { innerCnt /= outerCnt // corresponding to one outer row when calculating IndexJoin costs innerCost /= outerCnt } @@ -596,7 +586,7 @@ func (p *PhysicalIndexHashJoin) GetCost(outerCnt, innerCnt, outerCost, innerCost numPairs = 0 } } - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { numPairs = getOperatorActRows(p) } // Inner workers do hash join in parallel, but they can only save ONE outer @@ -620,24 +610,24 @@ func (p *PhysicalIndexHashJoin) GetCost(outerCnt, innerCnt, outerCost, innerCost return outerCost + innerPlanCost + cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexHashJoin) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexHashJoin) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } outerChild, innerChild := p.children[1-p.InnerChildIdx], p.children[p.InnerChildIdx] - outerCost, err := outerChild.getPlanCostVer1(taskType, option) + outerCost, err := outerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } - innerCost, err := innerChild.getPlanCostVer1(taskType, option) + innerCost, err := innerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } outerCnt := getCardinality(outerChild, costFlag) innerCnt := getCardinality(innerChild, costFlag) - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) && outerCnt > 0 { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) && outerCnt > 0 { innerCnt /= outerCnt // corresponding to one outer row when calculating IndexJoin costs innerCost /= outerCnt } @@ -687,7 +677,7 @@ func (p *PhysicalIndexMergeJoin) GetCost(outerCnt, innerCnt, outerCost, innerCos numPairs = 0 } } - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { numPairs = getOperatorActRows(p) } avgProbeCnt := numPairs / outerCnt @@ -711,24 +701,24 @@ func (p *PhysicalIndexMergeJoin) GetCost(outerCnt, innerCnt, outerCost, innerCos return outerCost + innerPlanCost + cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalIndexMergeJoin) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalIndexMergeJoin) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } outerChild, innerChild := p.children[1-p.InnerChildIdx], p.children[p.InnerChildIdx] - outerCost, err := outerChild.getPlanCostVer1(taskType, option) + outerCost, err := outerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } - innerCost, err := innerChild.getPlanCostVer1(taskType, option) + innerCost, err := innerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } outerCnt := getCardinality(outerChild, costFlag) innerCnt := getCardinality(innerChild, costFlag) - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) && outerCnt > 0 { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) && outerCnt > 0 { innerCnt /= outerCnt // corresponding to one outer row when calculating IndexJoin costs innerCost /= outerCnt } @@ -764,18 +754,18 @@ func (p *PhysicalApply) GetCost(lCount, rCount, lCost, rCost float64) float64 { return cpuCost + lCost + lCount*rCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalApply) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalApply) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } outerChild, innerChild := p.children[1-p.InnerChildIdx], p.children[p.InnerChildIdx] - outerCost, err := outerChild.getPlanCostVer1(taskType, option) + outerCost, err := outerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } - innerCost, err := innerChild.getPlanCostVer1(taskType, option) + innerCost, err := innerChild.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -813,7 +803,7 @@ func (p *PhysicalMergeJoin) GetCost(lCnt, rCnt float64, costFlag uint64) float64 numPairs = 0 } } - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { numPairs = getOperatorActRows(p) } sessVars := p.SCtx().GetSessionVars() @@ -832,15 +822,15 @@ func (p *PhysicalMergeJoin) GetCost(lCnt, rCnt float64, costFlag uint64) float64 return cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalMergeJoin) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalMergeJoin) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } p.planCost = 0 for _, child := range p.children { - childCost, err := child.getPlanCostVer1(taskType, option) + childCost, err := child.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -852,7 +842,7 @@ func (p *PhysicalMergeJoin) getPlanCostVer1(taskType property.TaskType, option * } // GetCost computes cost of hash join operator itself. -func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64, _ bool, costFlag uint64, op *physicalOptimizeOp) float64 { +func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64, _ bool, costFlag uint64, op *coreusage.PhysicalOptimizeOp) float64 { buildCnt, probeCnt := lCnt, rCnt build := p.children[0] // Taking the right as the inner for right join or using the outer to build a hash table. @@ -896,7 +886,7 @@ func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64, _ bool, costFlag uint64, numPairs = 0 } } - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { numPairs = getOperatorActRows(p) } // Cost of querying hash table is cheap actually, so we just compute the cost of @@ -939,22 +929,22 @@ func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64, _ bool, costFlag uint64, return cpuCost + memoryCost + diskCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalHashJoin) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalHashJoin) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } p.planCost = 0 for _, child := range p.children { - childCost, err := child.getPlanCostVer1(taskType, option) + childCost, err := child.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } p.planCost += childCost } p.planCost += p.GetCost(getCardinality(p.children[0], costFlag), getCardinality(p.children[1], costFlag), - taskType == property.MppTaskType, costFlag, option.tracer) + taskType == property.MppTaskType, costFlag, option.GetTracer()) p.planCostInit = true return p.planCost, nil } @@ -974,13 +964,13 @@ func (p *PhysicalStreamAgg) GetCost(inputRows float64, isRoot, _ bool, costFlag return cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalStreamAgg) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalStreamAgg) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1015,13 +1005,13 @@ func (p *PhysicalHashAgg) GetCost(inputRows float64, isRoot, isMPP bool, costFla return cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalHashAgg) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalHashAgg) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1063,13 +1053,13 @@ func (p *PhysicalSort) GetCost(count float64, schema *expression.Schema) float64 return cpuCost + memoryCost + diskCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalSort) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalSort) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1102,13 +1092,13 @@ func (p *PhysicalTopN) GetCost(count float64, isRoot bool) float64 { return cpuCost + memoryCost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalTopN) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalTopN) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1119,7 +1109,7 @@ func (p *PhysicalTopN) getPlanCostVer1(taskType property.TaskType, option *PlanC } // GetCost returns cost of the PointGetPlan. -func (p *BatchPointGetPlan) GetCost(opt *physicalOptimizeOp) float64 { +func (p *BatchPointGetPlan) GetCost(opt *coreusage.PhysicalOptimizeOp) float64 { cols := p.accessCols if cols == nil { return 0 // the cost of BatchGet generated in fast plan optimization is always 0 @@ -1146,13 +1136,13 @@ func (p *BatchPointGetPlan) GetCost(opt *physicalOptimizeOp) float64 { return cost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *BatchPointGetPlan) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *BatchPointGetPlan) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - p.planCost = p.GetCost(option.tracer) + p.planCost = p.GetCost(option.GetTracer()) p.planCostInit = true return p.planCost, nil } @@ -1170,7 +1160,7 @@ func (p *BatchPointGetPlan) GetAvgRowSize() float64 { } // GetCost returns cost of the PointGetPlan. -func (p *PointGetPlan) GetCost(opt *physicalOptimizeOp) float64 { +func (p *PointGetPlan) GetCost(opt *coreusage.PhysicalOptimizeOp) float64 { cols := p.accessCols if cols == nil { return 0 // the cost of PointGet generated in fast plan optimization is always 0 @@ -1194,13 +1184,13 @@ func (p *PointGetPlan) GetCost(opt *physicalOptimizeOp) float64 { return cost } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PointGetPlan) getPlanCostVer1(_ property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PointGetPlan) GetPlanCostVer1(_ property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - p.planCost = p.GetCost(option.tracer) + p.planCost = p.GetCost(option.GetTracer()) p.planCostInit = true return p.planCost, nil } @@ -1217,15 +1207,15 @@ func (p *PointGetPlan) GetAvgRowSize() float64 { return cardinality.GetIndexAvgRowSize(p.SCtx(), p.StatsInfo().HistColl, cols, p.IndexInfo.Unique) } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalUnionAll) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalUnionAll) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } var childMaxCost float64 for _, child := range p.children { - childCost, err := child.getPlanCostVer1(taskType, option) + childCost, err := child.GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1236,13 +1226,13 @@ func (p *PhysicalUnionAll) getPlanCostVer1(taskType property.TaskType, option *P return p.planCost, nil } -// getPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *PhysicalExchangeReceiver) getPlanCostVer1(taskType property.TaskType, option *PlanCostOption) (float64, error) { +// GetPlanCostVer1 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *PhysicalExchangeReceiver) GetPlanCostVer1(taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { costFlag := option.CostFlag - if p.planCostInit && !hasCostFlag(costFlag, CostFlagRecalculate) { + if p.planCostInit && !hasCostFlag(costFlag, coreusage.CostFlagRecalculate) { return p.planCost, nil } - childCost, err := p.children[0].getPlanCostVer1(taskType, option) + childCost, err := p.children[0].GetPlanCostVer1(taskType, option) if err != nil { return 0, err } @@ -1271,8 +1261,8 @@ func getOperatorActRows(operator PhysicalPlan) float64 { } func getCardinality(operator PhysicalPlan, costFlag uint64) float64 { - if hasCostFlag(costFlag, CostFlagUseTrueCardinality) { - actualProbeCnt := operator.getActualProbeCnt(operator.SCtx().GetSessionVars().StmtCtx.RuntimeStatsColl) + if hasCostFlag(costFlag, coreusage.CostFlagUseTrueCardinality) { + actualProbeCnt := operator.GetActualProbeCnt(operator.SCtx().GetSessionVars().StmtCtx.RuntimeStatsColl) if actualProbeCnt == 0 { return 0 } diff --git a/pkg/planner/core/plan_cost_ver2.go b/pkg/planner/core/plan_cost_ver2.go index fd44e4aed344d..58e4e717a80fa 100644 --- a/pkg/planner/core/plan_cost_ver2.go +++ b/pkg/planner/core/plan_cost_ver2.go @@ -17,7 +17,6 @@ package core import ( "fmt" "math" - "strconv" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" @@ -26,56 +25,57 @@ import ( "github.com/pingcap/tidb/pkg/planner/cardinality" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/util/paging" "github.com/pingcap/tipb/go-tipb" ) // GetPlanCost returns the cost of this plan. -func GetPlanCost(p PhysicalPlan, taskType property.TaskType, option *PlanCostOption) (float64, error) { +func GetPlanCost(p PhysicalPlan, taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { return getPlanCost(p, taskType, option) } // GenPlanCostTrace define a hook function to customize the cost calculation. -var GenPlanCostTrace func(p PhysicalPlan, costV *costVer2, taskType property.TaskType, option *PlanCostOption) +var GenPlanCostTrace func(p PhysicalPlan, costV *coreusage.CostVer2, taskType property.TaskType, option *coreusage.PlanCostOption) -func getPlanCost(p PhysicalPlan, taskType property.TaskType, option *PlanCostOption) (float64, error) { +func getPlanCost(p PhysicalPlan, taskType property.TaskType, option *coreusage.PlanCostOption) (float64, error) { if p.SCtx().GetSessionVars().CostModelVersion == modelVer2 { - planCost, err := p.getPlanCostVer2(taskType, option) - if traceCost(option) && GenPlanCostTrace != nil { + planCost, err := p.GetPlanCostVer2(taskType, option) + if coreusage.TraceCost(option) && GenPlanCostTrace != nil { GenPlanCostTrace(p, &planCost, taskType, option) } - return planCost.cost, err + return planCost.GetCost(), err } - return p.getPlanCostVer1(taskType, option) + return p.GetPlanCostVer1(taskType, option) } -// getPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost. -func (p *basePhysicalPlan) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +// GetPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost. +func (p *basePhysicalPlan) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } - childCosts := make([]costVer2, 0, len(p.children)) + childCosts := make([]coreusage.CostVer2, 0, len(p.children)) for _, child := range p.children { - childCost, err := child.getPlanCostVer2(taskType, option) + childCost, err := child.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } childCosts = append(childCosts, childCost) } if len(childCosts) == 0 { - p.planCostVer2 = newZeroCostVer2(traceCost(option)) + p.planCostVer2 = coreusage.NewZeroCostVer2(coreusage.TraceCost(option)) } else { - p.planCostVer2 = sumCostVer2(childCosts...) + p.planCostVer2 = coreusage.SumCostVer2(childCosts...) } p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + filter-cost -func (p *PhysicalSelection) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalSelection) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -84,21 +84,21 @@ func (p *PhysicalSelection) getPlanCostVer2(taskType property.TaskType, option * filterCost := filterCostVer2(option, inputRows, p.Conditions, cpuFactor) - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(filterCost, childCost) + p.planCostVer2 = coreusage.SumCostVer2(filterCost, childCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + proj-cost / concurrency // proj-cost = input-rows * len(expressions) * cpu-factor -func (p *PhysicalProjection) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalProjection) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -111,21 +111,21 @@ func (p *PhysicalProjection) getPlanCostVer2(taskType property.TaskType, option projCost := filterCostVer2(option, inputRows, p.Exprs, cpuFactor) - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(childCost, divCostVer2(projCost, concurrency)) + p.planCostVer2 = coreusage.SumCostVer2(childCost, coreusage.DivCostVer2(projCost, concurrency)) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = rows * log2(row-size) * scan-factor // log2(row-size) is from experiments. -func (p *PhysicalIndexScan) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalIndexScan) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -138,11 +138,11 @@ func (p *PhysicalIndexScan) getPlanCostVer2(taskType property.TaskType, option * return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = rows * log2(row-size) * scan-factor // log2(row-size) is from experiments. -func (p *PhysicalTableScan) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalTableScan) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -160,18 +160,18 @@ func (p *PhysicalTableScan) getPlanCostVer2(taskType property.TaskType, option * // give TiFlash a start-up cost to let the optimizer prefers to use TiKV to process small table scans. if p.StoreType == kv.TiFlash { - p.planCostVer2 = sumCostVer2(p.planCostVer2, scanCostVer2(option, 10000, rowSize, scanFactor)) + p.planCostVer2 = coreusage.SumCostVer2(p.planCostVer2, scanCostVer2(option, 10000, rowSize, scanFactor)) } p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = (child-cost + net-cost) / concurrency // net-cost = rows * row-size * net-factor -func (p *PhysicalIndexReader) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalIndexReader) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -182,21 +182,21 @@ func (p *PhysicalIndexReader) getPlanCostVer2(taskType property.TaskType, option netCost := netCostVer2(option, rows, rowSize, netFactor) - childCost, err := p.indexPlan.getPlanCostVer2(property.CopSingleReadTaskType, option) + childCost, err := p.indexPlan.GetPlanCostVer2(property.CopSingleReadTaskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = divCostVer2(sumCostVer2(childCost, netCost), concurrency) + p.planCostVer2 = coreusage.DivCostVer2(coreusage.SumCostVer2(childCost, netCost), concurrency) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = (child-cost + net-cost) / concurrency // net-cost = rows * row-size * net-factor -func (p *PhysicalTableReader) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalTableReader) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -211,23 +211,23 @@ func (p *PhysicalTableReader) getPlanCostVer2(taskType property.TaskType, option netCost := netCostVer2(option, rows, rowSize, netFactor) - childCost, err := p.tablePlan.getPlanCostVer2(childType, option) + childCost, err := p.tablePlan.GetPlanCostVer2(childType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = divCostVer2(sumCostVer2(childCost, netCost), concurrency) + p.planCostVer2 = coreusage.DivCostVer2(coreusage.SumCostVer2(childCost, netCost), concurrency) p.planCostInit = true // consider tidb_enforce_mpp if p.StoreType == kv.TiFlash && p.SCtx().GetSessionVars().IsMPPEnforced() && - !hasCostFlag(option.CostFlag, CostFlagRecalculate) { // show the real cost in explain-statements - p.planCostVer2 = divCostVer2(p.planCostVer2, 1000000000) + !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { // show the real cost in explain-statements + p.planCostVer2 = coreusage.DivCostVer2(p.planCostVer2, 1000000000) } return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = index-side-cost + (table-side-cost + double-read-cost) / double-read-concurrency // index-side-cost = (index-child-cost + index-net-cost) / dist-concurrency # same with IndexReader // table-side-cost = (table-child-cost + table-net-cost) / dist-concurrency # same with TableReader @@ -235,8 +235,8 @@ func (p *PhysicalTableReader) getPlanCostVer2(taskType property.TaskType, option // double-read-request-cost = double-read-tasks * request-factor // double-read-cpu-cost = index-rows * cpu-factor // double-read-tasks = index-rows / batch-size * task-per-batch # task-per-batch is a magic number now -func (p *PhysicalIndexLookUpReader) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalIndexLookUpReader) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -252,83 +252,83 @@ func (p *PhysicalIndexLookUpReader) getPlanCostVer2(taskType property.TaskType, // index-side indexNetCost := netCostVer2(option, indexRows, indexRowSize, netFactor) - indexChildCost, err := p.indexPlan.getPlanCostVer2(property.CopMultiReadTaskType, option) + indexChildCost, err := p.indexPlan.GetPlanCostVer2(property.CopMultiReadTaskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - indexSideCost := divCostVer2(sumCostVer2(indexNetCost, indexChildCost), distConcurrency) + indexSideCost := coreusage.DivCostVer2(coreusage.SumCostVer2(indexNetCost, indexChildCost), distConcurrency) // table-side tableNetCost := netCostVer2(option, tableRows, tableRowSize, netFactor) - tableChildCost, err := p.tablePlan.getPlanCostVer2(property.CopMultiReadTaskType, option) + tableChildCost, err := p.tablePlan.GetPlanCostVer2(property.CopMultiReadTaskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - tableSideCost := divCostVer2(sumCostVer2(tableNetCost, tableChildCost), distConcurrency) + tableSideCost := coreusage.DivCostVer2(coreusage.SumCostVer2(tableNetCost, tableChildCost), distConcurrency) doubleReadRows := indexRows - doubleReadCPUCost := newCostVer2(option, cpuFactor, + doubleReadCPUCost := coreusage.NewCostVer2(option, cpuFactor, indexRows*cpuFactor.Value, func() string { return fmt.Sprintf("double-read-cpu(%v*%v)", doubleReadRows, cpuFactor) }) batchSize := float64(p.SCtx().GetSessionVars().IndexLookupSize) taskPerBatch := 32.0 // TODO: remove this magic number doubleReadTasks := doubleReadRows / batchSize * taskPerBatch doubleReadRequestCost := doubleReadCostVer2(option, doubleReadTasks, requestFactor) - doubleReadCost := sumCostVer2(doubleReadCPUCost, doubleReadRequestCost) + doubleReadCost := coreusage.SumCostVer2(doubleReadCPUCost, doubleReadRequestCost) - p.planCostVer2 = sumCostVer2(indexSideCost, divCostVer2(sumCostVer2(tableSideCost, doubleReadCost), doubleReadConcurrency)) + p.planCostVer2 = coreusage.SumCostVer2(indexSideCost, coreusage.DivCostVer2(coreusage.SumCostVer2(tableSideCost, doubleReadCost), doubleReadConcurrency)) if p.SCtx().GetSessionVars().EnablePaging && p.expectedCnt > 0 && p.expectedCnt <= paging.Threshold { // if the expectCnt is below the paging threshold, using paging API p.Paging = true // TODO: move this operation from cost model to physical optimization - p.planCostVer2 = mulCostVer2(p.planCostVer2, 0.6) + p.planCostVer2 = coreusage.MulCostVer2(p.planCostVer2, 0.6) } p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = table-side-cost + sum(index-side-cost) // index-side-cost = (index-child-cost + index-net-cost) / dist-concurrency # same with IndexReader // table-side-cost = (table-child-cost + table-net-cost) / dist-concurrency # same with TableReader -func (p *PhysicalIndexMergeReader) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalIndexMergeReader) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } netFactor := getTaskNetFactorVer2(p, taskType) distConcurrency := float64(p.SCtx().GetSessionVars().DistSQLScanConcurrency()) - var tableSideCost costVer2 + var tableSideCost coreusage.CostVer2 if tablePath := p.tablePlan; tablePath != nil { rows := getCardinality(tablePath, option.CostFlag) rowSize := getAvgRowSize(tablePath.StatsInfo(), tablePath.Schema().Columns) tableNetCost := netCostVer2(option, rows, rowSize, netFactor) - tableChildCost, err := tablePath.getPlanCostVer2(taskType, option) + tableChildCost, err := tablePath.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - tableSideCost = divCostVer2(sumCostVer2(tableNetCost, tableChildCost), distConcurrency) + tableSideCost = coreusage.DivCostVer2(coreusage.SumCostVer2(tableNetCost, tableChildCost), distConcurrency) } - indexSideCost := make([]costVer2, 0, len(p.partialPlans)) + indexSideCost := make([]coreusage.CostVer2, 0, len(p.partialPlans)) for _, indexPath := range p.partialPlans { rows := getCardinality(indexPath, option.CostFlag) rowSize := getAvgRowSize(indexPath.StatsInfo(), indexPath.Schema().Columns) indexNetCost := netCostVer2(option, rows, rowSize, netFactor) - indexChildCost, err := indexPath.getPlanCostVer2(taskType, option) + indexChildCost, err := indexPath.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } indexSideCost = append(indexSideCost, - divCostVer2(sumCostVer2(indexNetCost, indexChildCost), distConcurrency)) + coreusage.DivCostVer2(coreusage.SumCostVer2(indexNetCost, indexChildCost), distConcurrency)) } - sumIndexSideCost := sumCostVer2(indexSideCost...) + sumIndexSideCost := coreusage.SumCostVer2(indexSideCost...) - p.planCostVer2 = sumCostVer2(tableSideCost, sumIndexSideCost) + p.planCostVer2 = coreusage.SumCostVer2(tableSideCost, sumIndexSideCost) // give a bias to pushDown limit, since it will get the same cost with NON_PUSH_DOWN_LIMIT case via expect count. // push down limit case may reduce cop request consumption if any in some cases. // @@ -341,13 +341,13 @@ func (p *PhysicalIndexMergeReader) getPlanCostVer2(taskType property.TaskType, o // will have the same cost, actually if limit are more close to the fetch side, the fewer rows that table plan need to read. // todo: refine the cost computation out from cost model. if p.PushedLimit != nil { - p.planCostVer2 = mulCostVer2(p.planCostVer2, 0.99) + p.planCostVer2 = coreusage.MulCostVer2(p.planCostVer2, 0.99) } p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + sort-cpu-cost + sort-mem-cost + sort-disk-cost // sort-cpu-cost = rows * log2(rows) * len(sort-items) * cpu-factor // if no spill: @@ -356,8 +356,8 @@ func (p *PhysicalIndexMergeReader) getPlanCostVer2(taskType property.TaskType, o // else if spill: // 1. sort-mem-cost = mem-quota * mem-factor // 2. sort-disk-cost = rows * row-size * disk-factor -func (p *PhysicalSort) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalSort) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -375,37 +375,37 @@ func (p *PhysicalSort) getPlanCostVer2(taskType property.TaskType, option *PlanC sortCPUCost := orderCostVer2(option, rows, rows, p.ByItems, cpuFactor) - var sortMemCost, sortDiskCost costVer2 + var sortMemCost, sortDiskCost coreusage.CostVer2 if !spill { - sortMemCost = newCostVer2(option, memFactor, + sortMemCost = coreusage.NewCostVer2(option, memFactor, rows*rowSize*memFactor.Value, func() string { return fmt.Sprintf("sortMem(%v*%v*%v)", rows, rowSize, memFactor) }) - sortDiskCost = zeroCostVer2 + sortDiskCost = coreusage.ZeroCostVer2 } else { - sortMemCost = newCostVer2(option, memFactor, + sortMemCost = coreusage.NewCostVer2(option, memFactor, float64(memQuota)*memFactor.Value, func() string { return fmt.Sprintf("sortMem(%v*%v)", memQuota, memFactor) }) - sortDiskCost = newCostVer2(option, diskFactor, + sortDiskCost = coreusage.NewCostVer2(option, diskFactor, rows*rowSize*diskFactor.Value, func() string { return fmt.Sprintf("sortDisk(%v*%v*%v)", rows, rowSize, diskFactor) }) } - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(childCost, sortCPUCost, sortMemCost, sortDiskCost) + p.planCostVer2 = coreusage.SumCostVer2(childCost, sortCPUCost, sortMemCost, sortDiskCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + topn-cpu-cost + topn-mem-cost // topn-cpu-cost = rows * log2(N) * len(sort-items) * cpu-factor // topn-mem-cost = N * row-size * mem-factor -func (p *PhysicalTopN) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalTopN) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -421,24 +421,24 @@ func (p *PhysicalTopN) getPlanCostVer2(taskType property.TaskType, option *PlanC memFactor := getTaskMemFactorVer2(p, taskType) topNCPUCost := orderCostVer2(option, rows, n, p.ByItems, cpuFactor) - topNMemCost := newCostVer2(option, memFactor, + topNMemCost := coreusage.NewCostVer2(option, memFactor, n*rowSize*memFactor.Value, func() string { return fmt.Sprintf("topMem(%v*%v*%v)", n, rowSize, memFactor) }) - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(childCost, topNCPUCost, topNMemCost) + p.planCostVer2 = coreusage.SumCostVer2(childCost, topNCPUCost, topNMemCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + agg-cost + group-cost -func (p *PhysicalStreamAgg) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalStreamAgg) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -448,20 +448,20 @@ func (p *PhysicalStreamAgg) getPlanCostVer2(taskType property.TaskType, option * aggCost := aggCostVer2(option, rows, p.AggFuncs, cpuFactor) groupCost := groupCostVer2(option, rows, p.GroupByItems, cpuFactor) - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(childCost, aggCost, groupCost) + p.planCostVer2 = coreusage.SumCostVer2(childCost, aggCost, groupCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + (agg-cost + group-cost + hash-build-cost + hash-probe-cost) / concurrency -func (p *PhysicalHashAgg) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalHashAgg) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -476,24 +476,24 @@ func (p *PhysicalHashAgg) getPlanCostVer2(taskType property.TaskType, option *Pl groupCost := groupCostVer2(option, inputRows, p.GroupByItems, cpuFactor) hashBuildCost := hashBuildCostVer2(option, outputRows, outputRowSize, float64(len(p.GroupByItems)), cpuFactor, memFactor) hashProbeCost := hashProbeCostVer2(option, inputRows, float64(len(p.GroupByItems)), cpuFactor) - startCost := newCostVer2(option, cpuFactor, + startCost := coreusage.NewCostVer2(option, cpuFactor, 10*3*cpuFactor.Value, // 10rows * 3func * cpuFactor func() string { return fmt.Sprintf("cpu(10*3*%v)", cpuFactor) }) - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(startCost, childCost, divCostVer2(sumCostVer2(aggCost, groupCost, hashBuildCost, hashProbeCost), concurrency)) + p.planCostVer2 = coreusage.SumCostVer2(startCost, childCost, coreusage.DivCostVer2(coreusage.SumCostVer2(aggCost, groupCost, hashBuildCost, hashProbeCost), concurrency)) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = left-child-cost + right-child-cost + filter-cost + group-cost -func (p *PhysicalMergeJoin) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalMergeJoin) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -501,31 +501,31 @@ func (p *PhysicalMergeJoin) getPlanCostVer2(taskType property.TaskType, option * rightRows := getCardinality(p.children[1], option.CostFlag) cpuFactor := getTaskCPUFactorVer2(p, taskType) - filterCost := sumCostVer2(filterCostVer2(option, leftRows, p.LeftConditions, cpuFactor), + filterCost := coreusage.SumCostVer2(filterCostVer2(option, leftRows, p.LeftConditions, cpuFactor), filterCostVer2(option, rightRows, p.RightConditions, cpuFactor)) - groupCost := sumCostVer2(groupCostVer2(option, leftRows, cols2Exprs(p.LeftJoinKeys), cpuFactor), + groupCost := coreusage.SumCostVer2(groupCostVer2(option, leftRows, cols2Exprs(p.LeftJoinKeys), cpuFactor), groupCostVer2(option, rightRows, cols2Exprs(p.LeftJoinKeys), cpuFactor)) - leftChildCost, err := p.children[0].getPlanCostVer2(taskType, option) + leftChildCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - rightChildCost, err := p.children[1].getPlanCostVer2(taskType, option) + rightChildCost, err := p.children[1].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(leftChildCost, rightChildCost, filterCost, groupCost) + p.planCostVer2 = coreusage.SumCostVer2(leftChildCost, rightChildCost, filterCost, groupCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = build-child-cost + probe-child-cost + // build-hash-cost + build-filter-cost + // (probe-filter-cost + probe-hash-cost) / concurrency -func (p *PhysicalHashJoin) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalHashJoin) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -550,31 +550,31 @@ func (p *PhysicalHashJoin) getPlanCostVer2(taskType property.TaskType, option *P probeFilterCost := filterCostVer2(option, probeRows, probeFilters, cpuFactor) probeHashCost := hashProbeCostVer2(option, probeRows, float64(len(probeKeys)), cpuFactor) - buildChildCost, err := build.getPlanCostVer2(taskType, option) + buildChildCost, err := build.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - probeChildCost, err := probe.getPlanCostVer2(taskType, option) + probeChildCost, err := probe.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } if taskType == property.MppTaskType { // BCast or Shuffle Join, use mppConcurrency - p.planCostVer2 = sumCostVer2(buildChildCost, probeChildCost, - divCostVer2(sumCostVer2(buildHashCost, buildFilterCost, probeHashCost, probeFilterCost), mppConcurrency)) + p.planCostVer2 = coreusage.SumCostVer2(buildChildCost, probeChildCost, + coreusage.DivCostVer2(coreusage.SumCostVer2(buildHashCost, buildFilterCost, probeHashCost, probeFilterCost), mppConcurrency)) } else { // TiDB HashJoin - startCost := newCostVer2(option, cpuFactor, + startCost := coreusage.NewCostVer2(option, cpuFactor, 10*3*cpuFactor.Value, // 10rows * 3func * cpuFactor func() string { return fmt.Sprintf("cpu(10*3*%v)", cpuFactor) }) - p.planCostVer2 = sumCostVer2(startCost, buildChildCost, probeChildCost, buildHashCost, buildFilterCost, - divCostVer2(sumCostVer2(probeFilterCost, probeHashCost), tidbConcurrency)) + p.planCostVer2 = coreusage.SumCostVer2(startCost, buildChildCost, probeChildCost, buildHashCost, buildFilterCost, + coreusage.DivCostVer2(coreusage.SumCostVer2(probeFilterCost, probeHashCost), tidbConcurrency)) } p.planCostInit = true return p.planCostVer2, nil } -func (p *PhysicalIndexJoin) getIndexJoinCostVer2(taskType property.TaskType, option *PlanCostOption, indexJoinType int) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalIndexJoin) getIndexJoinCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption, indexJoinType int) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -591,29 +591,29 @@ func (p *PhysicalIndexJoin) getIndexJoinCostVer2(taskType property.TaskType, opt requestFactor := getTaskRequestFactorVer2(p, taskType) buildFilterCost := filterCostVer2(option, buildRows, buildFilters, cpuFactor) - buildChildCost, err := build.getPlanCostVer2(taskType, option) + buildChildCost, err := build.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - buildTaskCost := newCostVer2(option, cpuFactor, + buildTaskCost := coreusage.NewCostVer2(option, cpuFactor, buildRows*10*cpuFactor.Value, func() string { return fmt.Sprintf("cpu(%v*10*%v)", buildRows, cpuFactor) }) - startCost := newCostVer2(option, cpuFactor, + startCost := coreusage.NewCostVer2(option, cpuFactor, 10*3*cpuFactor.Value, func() string { return fmt.Sprintf("cpu(10*3*%v)", cpuFactor) }) probeFilterCost := filterCostVer2(option, probeRowsTot, probeFilters, cpuFactor) - probeChildCost, err := probe.getPlanCostVer2(taskType, option) + probeChildCost, err := probe.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - var hashTableCost costVer2 + var hashTableCost coreusage.CostVer2 switch indexJoinType { case 1: // IndexHashJoin hashTableCost = hashBuildCostVer2(option, buildRows, buildRowSize, float64(len(p.RightJoinKeys)), cpuFactor, memFactor) case 2: // IndexMergeJoin - hashTableCost = newZeroCostVer2(traceCost(option)) + hashTableCost = coreusage.NewZeroCostVer2(coreusage.TraceCost(option)) default: // IndexJoin hashTableCost = hashBuildCostVer2(option, probeRowsTot, probeRowSize, float64(len(p.LeftJoinKeys)), cpuFactor, memFactor) } @@ -623,44 +623,44 @@ func (p *PhysicalIndexJoin) getIndexJoinCostVer2(taskType property.TaskType, opt // Use an empirical value batchRatio to handle this now. // TODO: remove this empirical value. batchRatio := 6.0 - probeCost := divCostVer2(mulCostVer2(probeChildCost, buildRows), batchRatio) + probeCost := coreusage.DivCostVer2(coreusage.MulCostVer2(probeChildCost, buildRows), batchRatio) // Double Read Cost - doubleReadCost := newZeroCostVer2(traceCost(option)) + doubleReadCost := coreusage.NewZeroCostVer2(coreusage.TraceCost(option)) if p.SCtx().GetSessionVars().IndexJoinDoubleReadPenaltyCostRate > 0 { batchSize := float64(p.SCtx().GetSessionVars().IndexJoinBatchSize) taskPerBatch := 1024.0 // TODO: remove this magic number doubleReadTasks := buildRows / batchSize * taskPerBatch doubleReadCost = doubleReadCostVer2(option, doubleReadTasks, requestFactor) - doubleReadCost = mulCostVer2(doubleReadCost, p.SCtx().GetSessionVars().IndexJoinDoubleReadPenaltyCostRate) + doubleReadCost = coreusage.MulCostVer2(doubleReadCost, p.SCtx().GetSessionVars().IndexJoinDoubleReadPenaltyCostRate) } - p.planCostVer2 = sumCostVer2(startCost, buildChildCost, buildFilterCost, buildTaskCost, divCostVer2(sumCostVer2(doubleReadCost, probeCost, probeFilterCost, hashTableCost), probeConcurrency)) + p.planCostVer2 = coreusage.SumCostVer2(startCost, buildChildCost, buildFilterCost, buildTaskCost, coreusage.DivCostVer2(coreusage.SumCostVer2(doubleReadCost, probeCost, probeFilterCost, hashTableCost), probeConcurrency)) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = build-child-cost + build-filter-cost + // (probe-cost + probe-filter-cost) / concurrency // probe-cost = probe-child-cost * build-rows / batchRatio -func (p *PhysicalIndexJoin) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { +func (p *PhysicalIndexJoin) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { return p.getIndexJoinCostVer2(taskType, option, 0) } -func (p *PhysicalIndexHashJoin) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { +func (p *PhysicalIndexHashJoin) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { return p.getIndexJoinCostVer2(taskType, option, 1) } -func (p *PhysicalIndexMergeJoin) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { +func (p *PhysicalIndexMergeJoin) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { return p.getIndexJoinCostVer2(taskType, option, 2) } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = build-child-cost + build-filter-cost + probe-cost + probe-filter-cost // probe-cost = probe-child-cost * build-rows -func (p *PhysicalApply) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalApply) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -670,48 +670,48 @@ func (p *PhysicalApply) getPlanCostVer2(taskType property.TaskType, option *Plan cpuFactor := getTaskCPUFactorVer2(p, taskType) buildFilterCost := filterCostVer2(option, buildRows, p.LeftConditions, cpuFactor) - buildChildCost, err := p.children[0].getPlanCostVer2(taskType, option) + buildChildCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } probeFilterCost := filterCostVer2(option, probeRowsTot, p.RightConditions, cpuFactor) - probeChildCost, err := p.children[1].getPlanCostVer2(taskType, option) + probeChildCost, err := p.children[1].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - probeCost := mulCostVer2(probeChildCost, buildRows) + probeCost := coreusage.MulCostVer2(probeChildCost, buildRows) - p.planCostVer2 = sumCostVer2(buildChildCost, buildFilterCost, probeCost, probeFilterCost) + p.planCostVer2 = coreusage.SumCostVer2(buildChildCost, buildFilterCost, probeCost, probeFilterCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost. +// GetPlanCostVer2 calculates the cost of the plan if it has not been calculated yet and returns the cost. // plan-cost = sum(child-cost) / concurrency -func (p *PhysicalUnionAll) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalUnionAll) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } concurrency := float64(p.SCtx().GetSessionVars().UnionConcurrency()) - childCosts := make([]costVer2, 0, len(p.children)) + childCosts := make([]coreusage.CostVer2, 0, len(p.children)) for _, child := range p.children { - childCost, err := child.getPlanCostVer2(taskType, option) + childCost, err := child.GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } childCosts = append(childCosts, childCost) } - p.planCostVer2 = divCostVer2(sumCostVer2(childCosts...), concurrency) + p.planCostVer2 = coreusage.DivCostVer2(coreusage.SumCostVer2(childCosts...), concurrency) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: // plan-cost = child-cost + net-cost -func (p *PhysicalExchangeReceiver) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalExchangeReceiver) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -726,28 +726,28 @@ func (p *PhysicalExchangeReceiver) getPlanCostVer2(taskType property.TaskType, o netCost := netCostVer2(option, rows, rowSize, netFactor) if isBCast { - netCost = mulCostVer2(netCost, numNode) + netCost = coreusage.MulCostVer2(netCost, numNode) } - childCost, err := p.children[0].getPlanCostVer2(taskType, option) + childCost, err := p.children[0].GetPlanCostVer2(taskType, option) if err != nil { - return zeroCostVer2, err + return coreusage.ZeroCostVer2, err } - p.planCostVer2 = sumCostVer2(childCost, netCost) + p.planCostVer2 = coreusage.SumCostVer2(childCost, netCost) p.planCostInit = true return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: -func (p *PointGetPlan) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: +func (p *PointGetPlan) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } if p.accessCols == nil { // from fast plan code path - p.planCostVer2 = zeroCostVer2 + p.planCostVer2 = coreusage.ZeroCostVer2 p.planCostInit = true - return zeroCostVer2, nil + return coreusage.ZeroCostVer2, nil } rowSize := getAvgRowSize(p.StatsInfo(), p.schema.Columns) netFactor := getTaskNetFactorVer2(p, taskType) @@ -757,16 +757,16 @@ func (p *PointGetPlan) getPlanCostVer2(taskType property.TaskType, option *PlanC return p.planCostVer2, nil } -// getPlanCostVer2 returns the plan-cost of this sub-plan, which is: -func (p *BatchPointGetPlan) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +// GetPlanCostVer2 returns the plan-cost of this sub-plan, which is: +func (p *BatchPointGetPlan) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } if p.accessCols == nil { // from fast plan code path - p.planCostVer2 = zeroCostVer2 + p.planCostVer2 = coreusage.ZeroCostVer2 p.planCostInit = true - return zeroCostVer2, nil + return coreusage.ZeroCostVer2, nil } rows := getCardinality(p, option.CostFlag) rowSize := getAvgRowSize(p.StatsInfo(), p.schema.Columns) @@ -777,8 +777,8 @@ func (p *BatchPointGetPlan) getPlanCostVer2(taskType property.TaskType, option * return p.planCostVer2, nil } -func (p *PhysicalCTE) getPlanCostVer2(taskType property.TaskType, option *PlanCostOption) (costVer2, error) { - if p.planCostInit && !hasCostFlag(option.CostFlag, CostFlagRecalculate) { +func (p *PhysicalCTE) GetPlanCostVer2(taskType property.TaskType, option *coreusage.PlanCostOption) (coreusage.CostVer2, error) { + if p.planCostInit && !hasCostFlag(option.CostFlag, coreusage.CostFlagRecalculate) { return p.planCostVer2, nil } @@ -792,39 +792,39 @@ func (p *PhysicalCTE) getPlanCostVer2(taskType property.TaskType, option *PlanCo return p.planCostVer2, nil } -func scanCostVer2(option *PlanCostOption, rows, rowSize float64, scanFactor costVer2Factor) costVer2 { +func scanCostVer2(option *coreusage.PlanCostOption, rows, rowSize float64, scanFactor coreusage.CostVer2Factor) coreusage.CostVer2 { if rowSize < 1 { rowSize = 1 } - return newCostVer2(option, scanFactor, + return coreusage.NewCostVer2(option, scanFactor, // rows * log(row-size) * scanFactor, log2 from experiments rows*math.Log2(rowSize)*scanFactor.Value, func() string { return fmt.Sprintf("scan(%v*logrowsize(%v)*%v)", rows, rowSize, scanFactor) }) } -func netCostVer2(option *PlanCostOption, rows, rowSize float64, netFactor costVer2Factor) costVer2 { - return newCostVer2(option, netFactor, +func netCostVer2(option *coreusage.PlanCostOption, rows, rowSize float64, netFactor coreusage.CostVer2Factor) coreusage.CostVer2 { + return coreusage.NewCostVer2(option, netFactor, rows*rowSize*netFactor.Value, func() string { return fmt.Sprintf("net(%v*rowsize(%v)*%v)", rows, rowSize, netFactor) }) } -func filterCostVer2(option *PlanCostOption, rows float64, filters []expression.Expression, cpuFactor costVer2Factor) costVer2 { +func filterCostVer2(option *coreusage.PlanCostOption, rows float64, filters []expression.Expression, cpuFactor coreusage.CostVer2Factor) coreusage.CostVer2 { numFuncs := numFunctions(filters) - return newCostVer2(option, cpuFactor, + return coreusage.NewCostVer2(option, cpuFactor, rows*numFuncs*cpuFactor.Value, func() string { return fmt.Sprintf("cpu(%v*filters(%v)*%v)", rows, numFuncs, cpuFactor) }) } -func aggCostVer2(option *PlanCostOption, rows float64, aggFuncs []*aggregation.AggFuncDesc, cpuFactor costVer2Factor) costVer2 { - return newCostVer2(option, cpuFactor, +func aggCostVer2(option *coreusage.PlanCostOption, rows float64, aggFuncs []*aggregation.AggFuncDesc, cpuFactor coreusage.CostVer2Factor) coreusage.CostVer2 { + return coreusage.NewCostVer2(option, cpuFactor, // TODO: consider types of agg-funcs rows*float64(len(aggFuncs))*cpuFactor.Value, func() string { return fmt.Sprintf("agg(%v*aggs(%v)*%v)", rows, len(aggFuncs), cpuFactor) }) } -func groupCostVer2(option *PlanCostOption, rows float64, groupItems []expression.Expression, cpuFactor costVer2Factor) costVer2 { +func groupCostVer2(option *coreusage.PlanCostOption, rows float64, groupItems []expression.Expression, cpuFactor coreusage.CostVer2Factor) coreusage.CostVer2 { numFuncs := numFunctions(groupItems) - return newCostVer2(option, cpuFactor, + return coreusage.NewCostVer2(option, cpuFactor, rows*numFuncs*cpuFactor.Value, func() string { return fmt.Sprintf("group(%v*cols(%v)*%v)", rows, numFuncs, cpuFactor) }) } @@ -841,106 +841,97 @@ func numFunctions(exprs []expression.Expression) float64 { return num } -func orderCostVer2(option *PlanCostOption, rows, n float64, byItems []*util.ByItems, cpuFactor costVer2Factor) costVer2 { +func orderCostVer2(option *coreusage.PlanCostOption, rows, n float64, byItems []*util.ByItems, cpuFactor coreusage.CostVer2Factor) coreusage.CostVer2 { numFuncs := 0 for _, byItem := range byItems { if _, ok := byItem.Expr.(*expression.ScalarFunction); ok { numFuncs++ } } - exprCost := newCostVer2(option, cpuFactor, + exprCost := coreusage.NewCostVer2(option, cpuFactor, rows*float64(numFuncs)*cpuFactor.Value, func() string { return fmt.Sprintf("exprCPU(%v*%v*%v)", rows, numFuncs, cpuFactor) }) - orderCost := newCostVer2(option, cpuFactor, + orderCost := coreusage.NewCostVer2(option, cpuFactor, rows*math.Log2(n)*cpuFactor.Value, func() string { return fmt.Sprintf("orderCPU(%v*log(%v)*%v)", rows, n, cpuFactor) }) - return sumCostVer2(exprCost, orderCost) + return coreusage.SumCostVer2(exprCost, orderCost) } -func hashBuildCostVer2(option *PlanCostOption, buildRows, buildRowSize, nKeys float64, cpuFactor, memFactor costVer2Factor) costVer2 { +func hashBuildCostVer2(option *coreusage.PlanCostOption, buildRows, buildRowSize, nKeys float64, cpuFactor, memFactor coreusage.CostVer2Factor) coreusage.CostVer2 { // TODO: 1) consider types of keys, 2) dedicated factor for build-probe hash table - hashKeyCost := newCostVer2(option, cpuFactor, + hashKeyCost := coreusage.NewCostVer2(option, cpuFactor, buildRows*nKeys*cpuFactor.Value, func() string { return fmt.Sprintf("hashkey(%v*%v*%v)", buildRows, nKeys, cpuFactor) }) - hashMemCost := newCostVer2(option, memFactor, + hashMemCost := coreusage.NewCostVer2(option, memFactor, buildRows*buildRowSize*memFactor.Value, func() string { return fmt.Sprintf("hashmem(%v*%v*%v)", buildRows, buildRowSize, memFactor) }) - hashBuildCost := newCostVer2(option, cpuFactor, + hashBuildCost := coreusage.NewCostVer2(option, cpuFactor, buildRows*cpuFactor.Value, func() string { return fmt.Sprintf("hashbuild(%v*%v)", buildRows, cpuFactor) }) - return sumCostVer2(hashKeyCost, hashMemCost, hashBuildCost) + return coreusage.SumCostVer2(hashKeyCost, hashMemCost, hashBuildCost) } -func hashProbeCostVer2(option *PlanCostOption, probeRows, nKeys float64, cpuFactor costVer2Factor) costVer2 { +func hashProbeCostVer2(option *coreusage.PlanCostOption, probeRows, nKeys float64, cpuFactor coreusage.CostVer2Factor) coreusage.CostVer2 { // TODO: 1) consider types of keys, 2) dedicated factor for build-probe hash table - hashKeyCost := newCostVer2(option, cpuFactor, + hashKeyCost := coreusage.NewCostVer2(option, cpuFactor, probeRows*nKeys*cpuFactor.Value, func() string { return fmt.Sprintf("hashkey(%v*%v*%v)", probeRows, nKeys, cpuFactor) }) - hashProbeCost := newCostVer2(option, cpuFactor, + hashProbeCost := coreusage.NewCostVer2(option, cpuFactor, probeRows*cpuFactor.Value, func() string { return fmt.Sprintf("hashprobe(%v*%v)", probeRows, cpuFactor) }) - return sumCostVer2(hashKeyCost, hashProbeCost) + return coreusage.SumCostVer2(hashKeyCost, hashProbeCost) } // For simplicity and robust, only operators that need double-read like IndexLookup and IndexJoin consider this cost. -func doubleReadCostVer2(option *PlanCostOption, numTasks float64, requestFactor costVer2Factor) costVer2 { - return newCostVer2(option, requestFactor, +func doubleReadCostVer2(option *coreusage.PlanCostOption, numTasks float64, requestFactor coreusage.CostVer2Factor) coreusage.CostVer2 { + return coreusage.NewCostVer2(option, requestFactor, numTasks*requestFactor.Value, func() string { return fmt.Sprintf("doubleRead(tasks(%v)*%v)", numTasks, requestFactor) }) } -type costVer2Factor struct { - Name string - Value float64 -} - -func (f costVer2Factor) String() string { - return fmt.Sprintf("%s(%v)", f.Name, f.Value) -} - // In Cost Ver2, we hide cost factors from users and deprecate SQL variables like `tidb_opt_scan_factor`. type costVer2Factors struct { - TiDBTemp costVer2Factor // operations on TiDB temporary table - TiKVScan costVer2Factor // per byte - TiKVDescScan costVer2Factor // per byte - TiFlashScan costVer2Factor // per byte - TiDBCPU costVer2Factor // per column or expression - TiKVCPU costVer2Factor // per column or expression - TiFlashCPU costVer2Factor // per column or expression - TiDB2KVNet costVer2Factor // per byte - TiDB2FlashNet costVer2Factor // per byte - TiFlashMPPNet costVer2Factor // per byte - TiDBMem costVer2Factor // per byte - TiKVMem costVer2Factor // per byte - TiFlashMem costVer2Factor // per byte - TiDBDisk costVer2Factor // per byte - TiDBRequest costVer2Factor // per net request + TiDBTemp coreusage.CostVer2Factor // operations on TiDB temporary table + TiKVScan coreusage.CostVer2Factor // per byte + TiKVDescScan coreusage.CostVer2Factor // per byte + TiFlashScan coreusage.CostVer2Factor // per byte + TiDBCPU coreusage.CostVer2Factor // per column or expression + TiKVCPU coreusage.CostVer2Factor // per column or expression + TiFlashCPU coreusage.CostVer2Factor // per column or expression + TiDB2KVNet coreusage.CostVer2Factor // per byte + TiDB2FlashNet coreusage.CostVer2Factor // per byte + TiFlashMPPNet coreusage.CostVer2Factor // per byte + TiDBMem coreusage.CostVer2Factor // per byte + TiKVMem coreusage.CostVer2Factor // per byte + TiFlashMem coreusage.CostVer2Factor // per byte + TiDBDisk coreusage.CostVer2Factor // per byte + TiDBRequest coreusage.CostVer2Factor // per net request } -func (c costVer2Factors) tolist() (l []costVer2Factor) { +func (c costVer2Factors) tolist() (l []coreusage.CostVer2Factor) { return append(l, c.TiDBTemp, c.TiKVScan, c.TiKVDescScan, c.TiFlashScan, c.TiDBCPU, c.TiKVCPU, c.TiFlashCPU, c.TiDB2KVNet, c.TiDB2FlashNet, c.TiFlashMPPNet, c.TiDBMem, c.TiKVMem, c.TiFlashMem, c.TiDBDisk, c.TiDBRequest) } var defaultVer2Factors = costVer2Factors{ - TiDBTemp: costVer2Factor{"tidb_temp_table_factor", 0.00}, - TiKVScan: costVer2Factor{"tikv_scan_factor", 40.70}, - TiKVDescScan: costVer2Factor{"tikv_desc_scan_factor", 61.05}, - TiFlashScan: costVer2Factor{"tiflash_scan_factor", 11.60}, - TiDBCPU: costVer2Factor{"tidb_cpu_factor", 49.90}, - TiKVCPU: costVer2Factor{"tikv_cpu_factor", 49.90}, - TiFlashCPU: costVer2Factor{"tiflash_cpu_factor", 2.40}, - TiDB2KVNet: costVer2Factor{"tidb_kv_net_factor", 3.96}, - TiDB2FlashNet: costVer2Factor{"tidb_flash_net_factor", 2.20}, - TiFlashMPPNet: costVer2Factor{"tiflash_mpp_net_factor", 1.00}, - TiDBMem: costVer2Factor{"tidb_mem_factor", 0.20}, - TiKVMem: costVer2Factor{"tikv_mem_factor", 0.20}, - TiFlashMem: costVer2Factor{"tiflash_mem_factor", 0.05}, - TiDBDisk: costVer2Factor{"tidb_disk_factor", 200.00}, - TiDBRequest: costVer2Factor{"tidb_request_factor", 6000000.00}, + TiDBTemp: coreusage.CostVer2Factor{"tidb_temp_table_factor", 0.00}, + TiKVScan: coreusage.CostVer2Factor{"tikv_scan_factor", 40.70}, + TiKVDescScan: coreusage.CostVer2Factor{"tikv_desc_scan_factor", 61.05}, + TiFlashScan: coreusage.CostVer2Factor{"tiflash_scan_factor", 11.60}, + TiDBCPU: coreusage.CostVer2Factor{"tidb_cpu_factor", 49.90}, + TiKVCPU: coreusage.CostVer2Factor{"tikv_cpu_factor", 49.90}, + TiFlashCPU: coreusage.CostVer2Factor{"tiflash_cpu_factor", 2.40}, + TiDB2KVNet: coreusage.CostVer2Factor{"tidb_kv_net_factor", 3.96}, + TiDB2FlashNet: coreusage.CostVer2Factor{"tidb_flash_net_factor", 2.20}, + TiFlashMPPNet: coreusage.CostVer2Factor{"tiflash_mpp_net_factor", 1.00}, + TiDBMem: coreusage.CostVer2Factor{"tidb_mem_factor", 0.20}, + TiKVMem: coreusage.CostVer2Factor{"tikv_mem_factor", 0.20}, + TiFlashMem: coreusage.CostVer2Factor{"tiflash_mem_factor", 0.05}, + TiDBDisk: coreusage.CostVer2Factor{"tidb_disk_factor", 200.00}, + TiDBRequest: coreusage.CostVer2Factor{"tidb_request_factor", 6000000.00}, } -func getTaskCPUFactorVer2(_ PhysicalPlan, taskType property.TaskType) costVer2Factor { +func getTaskCPUFactorVer2(_ PhysicalPlan, taskType property.TaskType) coreusage.CostVer2Factor { switch taskType { case property.RootTaskType: // TiDB return defaultVer2Factors.TiDBCPU @@ -951,7 +942,7 @@ func getTaskCPUFactorVer2(_ PhysicalPlan, taskType property.TaskType) costVer2Fa } } -func getTaskMemFactorVer2(_ PhysicalPlan, taskType property.TaskType) costVer2Factor { +func getTaskMemFactorVer2(_ PhysicalPlan, taskType property.TaskType) coreusage.CostVer2Factor { switch taskType { case property.RootTaskType: // TiDB return defaultVer2Factors.TiDBMem @@ -962,7 +953,7 @@ func getTaskMemFactorVer2(_ PhysicalPlan, taskType property.TaskType) costVer2Fa } } -func getTaskScanFactorVer2(p PhysicalPlan, storeType kv.StoreType, taskType property.TaskType) costVer2Factor { +func getTaskScanFactorVer2(p PhysicalPlan, storeType kv.StoreType, taskType property.TaskType) coreusage.CostVer2Factor { if isTemporaryTable(getTableInfo(p)) { return defaultVer2Factors.TiDBTemp } @@ -987,7 +978,7 @@ func getTaskScanFactorVer2(p PhysicalPlan, storeType kv.StoreType, taskType prop } } -func getTaskNetFactorVer2(p PhysicalPlan, _ property.TaskType) costVer2Factor { +func getTaskNetFactorVer2(p PhysicalPlan, _ property.TaskType) coreusage.CostVer2Factor { if isTemporaryTable(getTableInfo(p)) { return defaultVer2Factors.TiDBTemp } @@ -1002,7 +993,7 @@ func getTaskNetFactorVer2(p PhysicalPlan, _ property.TaskType) costVer2Factor { return defaultVer2Factors.TiDB2KVNet } -func getTaskRequestFactorVer2(p PhysicalPlan, _ property.TaskType) costVer2Factor { +func getTaskRequestFactorVer2(p PhysicalPlan, _ property.TaskType) coreusage.CostVer2Factor { if isTemporaryTable(getTableInfo(p)) { return defaultVer2Factors.TiDBTemp } @@ -1045,85 +1036,3 @@ func cols2Exprs(cols []*expression.Column) []expression.Expression { } return exprs } - -type costTrace struct { - factorCosts map[string]float64 // map[factorName]cost, used to calibrate the cost model - formula string // It used to trace the cost calculation. -} - -type costVer2 struct { - cost float64 - trace *costTrace -} - -func traceCost(option *PlanCostOption) bool { - if option != nil && hasCostFlag(option.CostFlag, CostFlagTrace) { - return true - } - return false -} - -func newZeroCostVer2(trace bool) (ret costVer2) { - if trace { - ret.trace = &costTrace{make(map[string]float64), ""} - } - return -} - -func newCostVer2(option *PlanCostOption, factor costVer2Factor, cost float64, lazyFormula func() string) (ret costVer2) { - ret.cost = cost - if traceCost(option) { - ret.trace = &costTrace{make(map[string]float64), ""} - ret.trace.factorCosts[factor.Name] = cost - ret.trace.formula = lazyFormula() - } - return ret -} - -func sumCostVer2(costs ...costVer2) (ret costVer2) { - if len(costs) == 0 { - return - } - for _, c := range costs { - ret.cost += c.cost - if c.trace != nil { - if ret.trace == nil { // init - ret.trace = &costTrace{make(map[string]float64), ""} - } - for factor, factorCost := range c.trace.factorCosts { - ret.trace.factorCosts[factor] += factorCost - } - if ret.trace.formula != "" { - ret.trace.formula += " + " - } - ret.trace.formula += "(" + c.trace.formula + ")" - } - } - return ret -} - -func divCostVer2(cost costVer2, denominator float64) (ret costVer2) { - ret.cost = cost.cost / denominator - if cost.trace != nil { - ret.trace = &costTrace{make(map[string]float64), ""} - for f, c := range cost.trace.factorCosts { - ret.trace.factorCosts[f] = c / denominator - } - ret.trace.formula = "(" + cost.trace.formula + ")/" + strconv.FormatFloat(denominator, 'f', 2, 64) - } - return ret -} - -func mulCostVer2(cost costVer2, scale float64) (ret costVer2) { - ret.cost = cost.cost * scale - if cost.trace != nil { - ret.trace = &costTrace{make(map[string]float64), ""} - for f, c := range cost.trace.factorCosts { - ret.trace.factorCosts[f] = c * scale - } - ret.trace.formula = "(" + cost.trace.formula + ")*" + strconv.FormatFloat(scale, 'f', 2, 64) - } - return ret -} - -var zeroCostVer2 = newZeroCostVer2(false) diff --git a/pkg/planner/core/plan_cost_ver2_test.go b/pkg/planner/core/plan_cost_ver2_test.go index 657092f0e7bf3..de94de53fb242 100644 --- a/pkg/planner/core/plan_cost_ver2_test.go +++ b/pkg/planner/core/plan_cost_ver2_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/pkg/planner" "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/planner/property" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" @@ -168,13 +169,13 @@ func BenchmarkGetPlanCost(b *testing.B) { b.Fatal(err) } phyPlan := plan.(core.PhysicalPlan) - _, err = core.GetPlanCost(phyPlan, property.RootTaskType, core.NewDefaultPlanCostOption().WithCostFlag(core.CostFlagRecalculate)) + _, err = core.GetPlanCost(phyPlan, property.RootTaskType, coreusage.NewDefaultPlanCostOption().WithCostFlag(coreusage.CostFlagRecalculate)) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { - _, _ = core.GetPlanCost(phyPlan, property.RootTaskType, core.NewDefaultPlanCostOption().WithCostFlag(core.CostFlagRecalculate)) + _, _ = core.GetPlanCost(phyPlan, property.RootTaskType, coreusage.NewDefaultPlanCostOption().WithCostFlag(coreusage.CostFlagRecalculate)) } } diff --git a/pkg/planner/core/planbuilder.go b/pkg/planner/core/planbuilder.go index 6991d7eeec93e..7fdb02e1e6ab7 100644 --- a/pkg/planner/core/planbuilder.go +++ b/pkg/planner/core/planbuilder.go @@ -1562,11 +1562,11 @@ func (b *PlanBuilder) buildPhysicalIndexLookUpReader(_ context.Context, dbName m extraHandleCol: extraCol, commonHandleCols: commonCols, } - rootT := cop.convertToRootTask(b.ctx) - if err := rootT.p.ResolveIndices(); err != nil { + rootT := cop.ConvertToRootTask(b.ctx) + if err := rootT.GetPlan().ResolveIndices(); err != nil { return nil, err } - return rootT.p, nil + return rootT.GetPlan(), nil } func getIndexColumnInfos(tblInfo *model.TableInfo, idx *model.IndexInfo) []*model.ColumnInfo { diff --git a/pkg/planner/core/point_get_plan.go b/pkg/planner/core/point_get_plan.go index a49dcdcf00f0e..c7d0cf529975a 100644 --- a/pkg/planner/core/point_get_plan.go +++ b/pkg/planner/core/point_get_plan.go @@ -15,6 +15,7 @@ package core import ( + "github.com/pingcap/tidb/pkg/planner/util/coreusage" math2 "math" "strconv" "strings" @@ -95,7 +96,7 @@ type PointGetPlan struct { // required by cost model planCostInit bool planCost float64 - planCostVer2 costVer2 + planCostVer2 coreusage.CostVer2 // accessCols represents actual columns the PointGet will access, which are used to calculate row-size accessCols []*expression.Column @@ -108,21 +109,21 @@ type PointGetPlan struct { PartitionNames []model.CIStr } -func (p *PointGetPlan) getEstRowCountForDisplay() float64 { +func (p *PointGetPlan) GetEstRowCountForDisplay() float64 { if p == nil { return 0 } return p.StatsInfo().RowCount * getEstimatedProbeCntFromProbeParents(p.probeParents) } -func (p *PointGetPlan) getActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { +func (p *PointGetPlan) GetActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { if p == nil { return 1 } return getActualProbeCntFromProbeParents(p.probeParents, statsColl) } -func (p *PointGetPlan) setProbeParents(probeParents []PhysicalPlan) { +func (p *PointGetPlan) SetProbeParents(probeParents []PhysicalPlan) { p.probeParents = probeParents } @@ -148,9 +149,9 @@ func (p *PointGetPlan) SetCost(cost float64) { p.cost = cost } -// attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of +// Attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of // current task. If the child's task is cop task, some operator may close this task and return a new rootTask. -func (*PointGetPlan) attach2Task(...task) task { +func (*PointGetPlan) Attach2Task(...Task) Task { return nil } @@ -260,7 +261,7 @@ func (p *PointGetPlan) SetOutputNames(names types.NameSlice) { p.outputNames = names } -func (*PointGetPlan) appendChildCandidate(_ *physicalOptimizeOp) {} +func (*PointGetPlan) AppendChildCandidate(_ *coreusage.PhysicalOptimizeOp) {} const emptyPointGetPlanSize = int64(unsafe.Sizeof(PointGetPlan{})) @@ -434,7 +435,7 @@ type BatchPointGetPlan struct { // required by cost model planCostInit bool planCost float64 - planCostVer2 costVer2 + planCostVer2 coreusage.CostVer2 // accessCols represents actual columns the PointGet will access, which are used to calculate row-size accessCols []*expression.Column @@ -445,20 +446,20 @@ type BatchPointGetPlan struct { PartitionNames []model.CIStr } -func (p *BatchPointGetPlan) getEstRowCountForDisplay() float64 { +func (p *BatchPointGetPlan) GetEstRowCountForDisplay() float64 { if p == nil { return 0 } return p.StatsInfo().RowCount * getEstimatedProbeCntFromProbeParents(p.probeParents) } -func (p *BatchPointGetPlan) getActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { +func (p *BatchPointGetPlan) GetActualProbeCnt(statsColl *execdetails.RuntimeStatsColl) int64 { if p == nil { return 1 } return getActualProbeCntFromProbeParents(p.probeParents, statsColl) } -func (p *BatchPointGetPlan) setProbeParents(probeParents []PhysicalPlan) { +func (p *BatchPointGetPlan) SetProbeParents(probeParents []PhysicalPlan) { p.probeParents = probeParents } @@ -482,9 +483,9 @@ func (*BatchPointGetPlan) ExtractCorrelatedCols() []*expression.CorrelatedColumn return nil } -// attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of +// Attach2Task makes the current physical plan as the father of task's physicalPlan and updates the cost of // current task. If the child's task is cop task, some operator may close this task and return a new rootTask. -func (*BatchPointGetPlan) attach2Task(...task) task { +func (*BatchPointGetPlan) Attach2Task(...Task) Task { return nil } @@ -571,7 +572,7 @@ func (p *BatchPointGetPlan) SetOutputNames(names types.NameSlice) { p.names = names } -func (*BatchPointGetPlan) appendChildCandidate(_ *physicalOptimizeOp) {} +func (*BatchPointGetPlan) AppendChildCandidate(_ *coreusage.PhysicalOptimizeOp) {} const emptyBatchPointGetPlanSize = int64(unsafe.Sizeof(BatchPointGetPlan{})) diff --git a/pkg/planner/core/rule_aggregation_elimination.go b/pkg/planner/core/rule_aggregation_elimination.go index 0670a1dca0ec5..70e31c18fdc49 100644 --- a/pkg/planner/core/rule_aggregation_elimination.go +++ b/pkg/planner/core/rule_aggregation_elimination.go @@ -17,13 +17,13 @@ package core import ( "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "math" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/types" ) @@ -48,7 +48,7 @@ type aggregationEliminateChecker struct { // e.g. select min(b) from t group by a. If a is a unique key, then this sql is equal to `select b from t group by a`. // For count(expr), sum(expr), avg(expr), count(distinct expr, [expr...]) we may need to rewrite the expr. Details are shown below. // If we can eliminate agg successful, we return a projection. Else we return a nil pointer. -func (a *aggregationEliminateChecker) tryToEliminateAggregation(agg *LogicalAggregation, opt *util.LogicalOptimizeOp) *LogicalProjection { +func (a *aggregationEliminateChecker) tryToEliminateAggregation(agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) *LogicalProjection { for _, af := range agg.AggFuncs { // TODO(issue #9968): Actually, we can rewrite GROUP_CONCAT when all the // arguments it accepts are promised to be NOT-NULL. @@ -89,7 +89,7 @@ func (a *aggregationEliminateChecker) tryToEliminateAggregation(agg *LogicalAggr // tryToEliminateDistinct will eliminate distinct in the aggregation function if the aggregation args // have unique key column. see detail example in https://github.com/pingcap/tidb/issues/23436 -func (*aggregationEliminateChecker) tryToEliminateDistinct(agg *LogicalAggregation, opt *util.LogicalOptimizeOp) { +func (*aggregationEliminateChecker) tryToEliminateDistinct(agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) { for _, af := range agg.AggFuncs { if af.HasDistinct { cols := make([]*expression.Column, 0, len(af.Args)) @@ -129,7 +129,7 @@ func (*aggregationEliminateChecker) tryToEliminateDistinct(agg *LogicalAggregati } } -func appendAggregationEliminateTraceStep(agg *LogicalAggregation, proj *LogicalProjection, uniqueKey expression.KeyInfo, opt *util.LogicalOptimizeOp) { +func appendAggregationEliminateTraceStep(agg *LogicalAggregation, proj *LogicalProjection, uniqueKey expression.KeyInfo, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return fmt.Sprintf("%s is a unique key", uniqueKey.String()) } @@ -141,7 +141,7 @@ func appendAggregationEliminateTraceStep(agg *LogicalAggregation, proj *LogicalP } func appendDistinctEliminateTraceStep(agg *LogicalAggregation, uniqueKey expression.KeyInfo, af *aggregation.AggFuncDesc, - opt *util.LogicalOptimizeOp) { + opt *coreusage.LogicalOptimizeOp) { reason := func() string { return fmt.Sprintf("%s is a unique key", uniqueKey.String()) } @@ -254,7 +254,7 @@ func wrapCastFunction(ctx expression.BuildContext, arg expression.Expression, ta return expression.BuildCastFunction(ctx, arg, targetTp) } -func (a *aggregationEliminator) optimize(ctx context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (a *aggregationEliminator) optimize(ctx context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false newChildren := make([]LogicalPlan, 0, len(p.Children())) for _, child := range p.Children() { diff --git a/pkg/planner/core/rule_aggregation_push_down.go b/pkg/planner/core/rule_aggregation_push_down.go index 4e4e4e0a72ee7..00267760cce3b 100644 --- a/pkg/planner/core/rule_aggregation_push_down.go +++ b/pkg/planner/core/rule_aggregation_push_down.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" @@ -248,7 +249,7 @@ func (*aggregationPushDownSolver) decompose(ctx PlanContext, aggFunc *aggregatio // process it temporarily. If not, We will add additional group by columns and first row functions. We make a new aggregation operator. // If the pushed aggregation is grouped by unique key, it's no need to push it down. func (a *aggregationPushDownSolver) tryToPushDownAgg(oldAgg *LogicalAggregation, aggFuncs []*aggregation.AggFuncDesc, gbyCols []*expression.Column, - join *LogicalJoin, childIdx int, blockOffset int, opt *util.LogicalOptimizeOp) (_ LogicalPlan, err error) { + join *LogicalJoin, childIdx int, blockOffset int, opt *coreusage.LogicalOptimizeOp) (_ LogicalPlan, err error) { child := join.children[childIdx] if aggregation.IsAllFirstRow(aggFuncs) { return child, nil @@ -433,13 +434,13 @@ func (*aggregationPushDownSolver) pushAggCrossUnion(agg *LogicalAggregation, uni return newAgg, nil } -func (a *aggregationPushDownSolver) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (a *aggregationPushDownSolver) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false newLogicalPlan, err := a.aggPushDown(p, opt) return newLogicalPlan, planChanged, err } -func (a *aggregationPushDownSolver) tryAggPushDownForUnion(union *LogicalUnionAll, agg *LogicalAggregation, opt *util.LogicalOptimizeOp) error { +func (a *aggregationPushDownSolver) tryAggPushDownForUnion(union *LogicalUnionAll, agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) error { for _, aggFunc := range agg.AggFuncs { if !a.isDecomposableWithUnion(aggFunc) { return nil @@ -474,7 +475,7 @@ func (a *aggregationPushDownSolver) tryAggPushDownForUnion(union *LogicalUnionAl } // aggPushDown tries to push down aggregate functions to join paths. -func (a *aggregationPushDownSolver) aggPushDown(p LogicalPlan, opt *util.LogicalOptimizeOp) (_ LogicalPlan, err error) { +func (a *aggregationPushDownSolver) aggPushDown(p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (_ LogicalPlan, err error) { if agg, ok := p.(*LogicalAggregation); ok { proj := a.tryToEliminateAggregation(agg, opt) if proj != nil { @@ -683,7 +684,7 @@ func (*aggregationPushDownSolver) name() string { } func appendAggPushDownAcrossJoinTraceStep(oldAgg, newAgg *LogicalAggregation, aggFuncs []*aggregation.AggFuncDesc, join *LogicalJoin, - childIdx int, opt *util.LogicalOptimizeOp) { + childIdx int, opt *coreusage.LogicalOptimizeOp) { reason := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v's functions[", oldAgg.TP(), oldAgg.ID())) for i, aggFunc := range aggFuncs { @@ -708,7 +709,7 @@ func appendAggPushDownAcrossJoinTraceStep(oldAgg, newAgg *LogicalAggregation, ag opt.AppendStepToCurrent(join.ID(), join.TP(), reason, action) } -func appendAggPushDownAcrossProjTraceStep(agg *LogicalAggregation, proj *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendAggPushDownAcrossProjTraceStep(agg *LogicalAggregation, proj *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v is eliminated, and %v_%v's functions changed into[", proj.TP(), proj.ID(), agg.TP(), agg.ID())) for i, aggFunc := range agg.AggFuncs { @@ -726,7 +727,7 @@ func appendAggPushDownAcrossProjTraceStep(agg *LogicalAggregation, proj *Logical opt.AppendStepToCurrent(agg.ID(), agg.TP(), reason, action) } -func appendAggPushDownAcrossUnionTraceStep(union *LogicalUnionAll, agg *LogicalAggregation, opt *util.LogicalOptimizeOp) { +func appendAggPushDownAcrossUnionTraceStep(union *LogicalUnionAll, agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) { reason := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v functions[", agg.TP(), agg.ID())) for i, aggFunc := range agg.AggFuncs { diff --git a/pkg/planner/core/rule_aggregation_skew_rewrite.go b/pkg/planner/core/rule_aggregation_skew_rewrite.go index 46c251d4bd544..9bd8f29b1a4c7 100644 --- a/pkg/planner/core/rule_aggregation_skew_rewrite.go +++ b/pkg/planner/core/rule_aggregation_skew_rewrite.go @@ -17,11 +17,11 @@ package core import ( "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/util/intset" ) @@ -47,7 +47,7 @@ type skewDistinctAggRewriter struct { // - The aggregate has 1 and only 1 distinct aggregate function (limited to count, avg, sum) // // This rule is disabled by default. Use tidb_opt_skew_distinct_agg to enable the rule. -func (a *skewDistinctAggRewriter) rewriteSkewDistinctAgg(agg *LogicalAggregation, opt *util.LogicalOptimizeOp) LogicalPlan { +func (a *skewDistinctAggRewriter) rewriteSkewDistinctAgg(agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) LogicalPlan { // only group aggregate is applicable if len(agg.GroupByItems) == 0 { return nil @@ -263,7 +263,7 @@ func (*skewDistinctAggRewriter) isQualifiedAgg(aggFunc *aggregation.AggFuncDesc) } } -func appendSkewDistinctAggRewriteTraceStep(agg *LogicalAggregation, result LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendSkewDistinctAggRewriteTraceStep(agg *LogicalAggregation, result LogicalPlan, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return fmt.Sprintf("%v_%v has a distinct agg function", agg.TP(), agg.ID()) } @@ -274,7 +274,7 @@ func appendSkewDistinctAggRewriteTraceStep(agg *LogicalAggregation, result Logic opt.AppendStepToCurrent(agg.ID(), agg.TP(), reason, action) } -func (a *skewDistinctAggRewriter) optimize(ctx context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (a *skewDistinctAggRewriter) optimize(ctx context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false newChildren := make([]LogicalPlan, 0, len(p.Children())) for _, child := range p.Children() { diff --git a/pkg/planner/core/rule_build_key_info.go b/pkg/planner/core/rule_build_key_info.go index 4bb39c17be1cf..358f01599391c 100644 --- a/pkg/planner/core/rule_build_key_info.go +++ b/pkg/planner/core/rule_build_key_info.go @@ -16,17 +16,17 @@ package core import ( "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/planner/util" ) type buildKeySolver struct{} -func (*buildKeySolver) optimize(_ context.Context, p LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*buildKeySolver) optimize(_ context.Context, p LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false buildKeyInfo(p) return p, planChanged, nil diff --git a/pkg/planner/core/rule_collect_plan_stats.go b/pkg/planner/core/rule_collect_plan_stats.go index 027522a111d7f..53fa8aec89958 100644 --- a/pkg/planner/core/rule_collect_plan_stats.go +++ b/pkg/planner/core/rule_collect_plan_stats.go @@ -16,13 +16,13 @@ package core import ( "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "time" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/table" @@ -32,7 +32,7 @@ import ( type collectPredicateColumnsPoint struct{} -func (collectPredicateColumnsPoint) optimize(_ context.Context, plan LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (collectPredicateColumnsPoint) optimize(_ context.Context, plan LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false if plan.SCtx().GetSessionVars().InRestrictedSQL { return plan, planChanged, nil @@ -78,7 +78,7 @@ func (collectPredicateColumnsPoint) name() string { type syncWaitStatsLoadPoint struct{} -func (syncWaitStatsLoadPoint) optimize(_ context.Context, plan LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (syncWaitStatsLoadPoint) optimize(_ context.Context, plan LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false if plan.SCtx().GetSessionVars().InRestrictedSQL { return plan, planChanged, nil diff --git a/pkg/planner/core/rule_column_pruning.go b/pkg/planner/core/rule_column_pruning.go index 36ee0309d9008..cf7e3f85de3e3 100644 --- a/pkg/planner/core/rule_column_pruning.go +++ b/pkg/planner/core/rule_column_pruning.go @@ -26,13 +26,14 @@ import ( "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/planner/util/fixcontrol" ) type columnPruner struct { } -func (*columnPruner) optimize(_ context.Context, lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*columnPruner) optimize(_ context.Context, lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false lp, err := lp.PruneColumns(lp.Schema().Columns, opt) if err != nil { @@ -74,7 +75,7 @@ func exprHasSetVarOrSleep(expr expression.Expression) bool { // the level projection expressions construction is left to the last logical optimize rule) // // so when do the rule_column_pruning here, we just prune the schema is enough. -func (p *LogicalExpand) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalExpand) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { // Expand need those extra redundant distinct group by columns projected from underlying projection. // distinct GroupByCol must be used by aggregate above, to make sure this, append distinctGroupByCol again. parentUsedCols = append(parentUsedCols, p.distinctGroupByCol...) @@ -99,7 +100,7 @@ func (p *LogicalExpand) PruneColumns(parentUsedCols []*expression.Column, opt *u // PruneColumns implements LogicalPlan interface. // If any expression has SetVar function or Sleep function, we do not prune it. -func (p *LogicalProjection) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalProjection) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used := expression.GetUsedList(p.SCtx().GetExprCtx().GetEvalCtx(), parentUsedCols, p.schema) prunedColumns := make([]*expression.Column, 0) @@ -123,7 +124,7 @@ func (p *LogicalProjection) PruneColumns(parentUsedCols []*expression.Column, op } // PruneColumns implements LogicalPlan interface. -func (p *LogicalSelection) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalSelection) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { child := p.children[0] parentUsedCols = expression.ExtractColumnsFromExpressions(parentUsedCols, p.Conditions, nil) var err error @@ -135,7 +136,7 @@ func (p *LogicalSelection) PruneColumns(parentUsedCols []*expression.Column, opt } // PruneColumns implements LogicalPlan interface. -func (la *LogicalAggregation) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (la *LogicalAggregation) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { child := la.children[0] used := expression.GetUsedList(la.SCtx().GetExprCtx().GetEvalCtx(), parentUsedCols, la.Schema()) prunedColumns := make([]*expression.Column, 0) @@ -228,7 +229,7 @@ func (la *LogicalAggregation) PruneColumns(parentUsedCols []*expression.Column, return la, nil } -func pruneByItems(p LogicalPlan, old []*util.ByItems, opt *util.LogicalOptimizeOp) (byItems []*util.ByItems, +func pruneByItems(p LogicalPlan, old []*util.ByItems, opt *coreusage.LogicalOptimizeOp) (byItems []*util.ByItems, parentUsedCols []*expression.Column) { prunedByItems := make([]*util.ByItems, 0) byItems = make([]*util.ByItems, 0, len(old)) @@ -262,7 +263,7 @@ func pruneByItems(p LogicalPlan, old []*util.ByItems, opt *util.LogicalOptimizeO // PruneColumns implements LogicalPlan interface. // If any expression can view as a constant in execution stage, such as correlated column, constant, // we do prune them. Note that we can't prune the expressions contain non-deterministic functions, such as rand(). -func (ls *LogicalSort) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (ls *LogicalSort) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { var cols []*expression.Column ls.ByItems, cols = pruneByItems(ls, ls.ByItems, opt) parentUsedCols = append(parentUsedCols, cols...) @@ -277,7 +278,7 @@ func (ls *LogicalSort) PruneColumns(parentUsedCols []*expression.Column, opt *ut // PruneColumns implements LogicalPlan interface. // If any expression can view as a constant in execution stage, such as correlated column, constant, // we do prune them. Note that we can't prune the expressions contain non-deterministic functions, such as rand(). -func (lt *LogicalTopN) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (lt *LogicalTopN) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { child := lt.children[0] var cols []*expression.Column lt.ByItems, cols = pruneByItems(lt, lt.ByItems, opt) @@ -291,7 +292,7 @@ func (lt *LogicalTopN) PruneColumns(parentUsedCols []*expression.Column, opt *ut } // PruneColumns implements LogicalPlan interface. -func (p *LogicalUnionAll) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalUnionAll) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used := expression.GetUsedList(p.SCtx().GetExprCtx().GetEvalCtx(), parentUsedCols, p.schema) hasBeenUsed := false for i := range used { @@ -346,7 +347,7 @@ func (p *LogicalUnionAll) PruneColumns(parentUsedCols []*expression.Column, opt } // PruneColumns implements LogicalPlan interface. -func (p *LogicalUnionScan) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalUnionScan) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { for i := 0; i < p.handleCols.NumCols(); i++ { parentUsedCols = append(parentUsedCols, p.handleCols.GetCol(i)) } @@ -366,7 +367,7 @@ func (p *LogicalUnionScan) PruneColumns(parentUsedCols []*expression.Column, opt } // PruneColumns implements LogicalPlan interface. -func (ds *DataSource) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (ds *DataSource) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used := expression.GetUsedList(ds.SCtx().GetExprCtx().GetEvalCtx(), parentUsedCols, ds.schema) exprCols := expression.ExtractColumnsFromExpressions(nil, ds.allConds, nil) @@ -418,7 +419,7 @@ func (ds *DataSource) PruneColumns(parentUsedCols []*expression.Column, opt *uti } // PruneColumns implements LogicalPlan interface. -func (p *LogicalMemTable) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalMemTable) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { switch p.TableInfo.Name.O { case infoschema.TableStatementsSummary, infoschema.TableStatementsSummaryHistory, @@ -450,7 +451,7 @@ func (p *LogicalMemTable) PruneColumns(parentUsedCols []*expression.Column, opt } // PruneColumns implements LogicalPlan interface. -func (p *LogicalTableDual) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalTableDual) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used := expression.GetUsedList(p.SCtx().GetExprCtx().GetEvalCtx(), parentUsedCols, p.Schema()) prunedColumns := make([]*expression.Column, 0) for i := len(used) - 1; i >= 0; i-- { @@ -496,7 +497,7 @@ func (p *LogicalJoin) mergeSchema() { } // PruneColumns implements LogicalPlan interface. -func (p *LogicalJoin) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalJoin) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { leftCols, rightCols := p.extractUsedCols(parentUsedCols) var err error @@ -522,7 +523,7 @@ func (p *LogicalJoin) PruneColumns(parentUsedCols []*expression.Column, opt *uti } // PruneColumns implements LogicalPlan interface. -func (la *LogicalApply) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (la *LogicalApply) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { leftCols, rightCols := la.extractUsedCols(parentUsedCols) allowEliminateApply := fixcontrol.GetBoolWithDefault(la.SCtx().GetSessionVars().GetOptimizerFixControlMap(), fixcontrol.Fix45822, true) var err error @@ -556,7 +557,7 @@ func (la *LogicalApply) PruneColumns(parentUsedCols []*expression.Column, opt *u } // PruneColumns implements LogicalPlan interface. -func (p *LogicalLock) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalLock) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { var err error if !IsSelectForUpdateLockType(p.Lock.LockType) { // when use .baseLogicalPlan to call the PruneColumns, it means current plan itself has @@ -589,7 +590,7 @@ func (p *LogicalLock) PruneColumns(parentUsedCols []*expression.Column, opt *uti } // PruneColumns implements LogicalPlan interface. -func (p *LogicalWindow) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalWindow) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { windowColumns := p.GetWindowResultColumns() cnt := 0 for _, col := range parentUsedCols { @@ -634,7 +635,7 @@ func (p *LogicalWindow) extractUsedCols(parentUsedCols []*expression.Column) []* } // PruneColumns implements LogicalPlan interface. -func (p *LogicalLimit) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalLimit) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { if len(parentUsedCols) == 0 { // happens when LIMIT appears in UPDATE. return p, nil } @@ -676,7 +677,7 @@ func addConstOneForEmptyProjection(p LogicalPlan) { }) } -func appendColumnPruneTraceStep(p LogicalPlan, prunedColumns []*expression.Column, opt *util.LogicalOptimizeOp) { +func appendColumnPruneTraceStep(p LogicalPlan, prunedColumns []*expression.Column, opt *coreusage.LogicalOptimizeOp) { if len(prunedColumns) < 1 { return } @@ -687,7 +688,7 @@ func appendColumnPruneTraceStep(p LogicalPlan, prunedColumns []*expression.Colum appendItemPruneTraceStep(p, "columns", s, opt) } -func appendFunctionPruneTraceStep(p LogicalPlan, prunedFunctions []*aggregation.AggFuncDesc, opt *util.LogicalOptimizeOp) { +func appendFunctionPruneTraceStep(p LogicalPlan, prunedFunctions []*aggregation.AggFuncDesc, opt *coreusage.LogicalOptimizeOp) { if len(prunedFunctions) < 1 { return } @@ -698,7 +699,7 @@ func appendFunctionPruneTraceStep(p LogicalPlan, prunedFunctions []*aggregation. appendItemPruneTraceStep(p, "aggregation functions", s, opt) } -func appendByItemsPruneTraceStep(p LogicalPlan, prunedByItems []*util.ByItems, opt *util.LogicalOptimizeOp) { +func appendByItemsPruneTraceStep(p LogicalPlan, prunedByItems []*util.ByItems, opt *coreusage.LogicalOptimizeOp) { if len(prunedByItems) < 1 { return } @@ -709,7 +710,7 @@ func appendByItemsPruneTraceStep(p LogicalPlan, prunedByItems []*util.ByItems, o appendItemPruneTraceStep(p, "byItems", s, opt) } -func appendGroupByItemsPruneTraceStep(p LogicalPlan, prunedGroupByItems []expression.Expression, opt *util.LogicalOptimizeOp) { +func appendGroupByItemsPruneTraceStep(p LogicalPlan, prunedGroupByItems []expression.Expression, opt *coreusage.LogicalOptimizeOp) { if len(prunedGroupByItems) < 1 { return } @@ -720,7 +721,7 @@ func appendGroupByItemsPruneTraceStep(p LogicalPlan, prunedGroupByItems []expres appendItemPruneTraceStep(p, "groupByItems", s, opt) } -func appendItemPruneTraceStep(p LogicalPlan, itemType string, prunedObjects []fmt.Stringer, opt *util.LogicalOptimizeOp) { +func appendItemPruneTraceStep(p LogicalPlan, itemType string, prunedObjects []fmt.Stringer, opt *coreusage.LogicalOptimizeOp) { if len(prunedObjects) < 1 { return } @@ -768,12 +769,12 @@ func preferKeyColumnFromTable(dataSource *DataSource, originColumns []*expressio // PruneColumns implements the interface of LogicalPlan. // LogicalCTE just do a empty function call. It's logical optimize is indivisual phase. -func (p *LogicalCTE) PruneColumns(_ []*expression.Column, _ *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalCTE) PruneColumns(_ []*expression.Column, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { return p, nil } // PruneColumns implements the interface of LogicalPlan. -func (p *LogicalSequence) PruneColumns(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (p *LogicalSequence) PruneColumns(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { var err error p.children[len(p.children)-1], err = p.children[len(p.children)-1].PruneColumns(parentUsedCols, opt) if err != nil { @@ -782,7 +783,7 @@ func (p *LogicalSequence) PruneColumns(parentUsedCols []*expression.Column, opt return p, nil } -func applyEliminateTraceStep(lp LogicalPlan, opt *util.LogicalOptimizeOp) { +func applyEliminateTraceStep(lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString( fmt.Sprintf("%v_%v is eliminated.", lp.TP(), lp.ID())) diff --git a/pkg/planner/core/rule_constant_propagation.go b/pkg/planner/core/rule_constant_propagation.go index ee53f78e218f9..8ef968eb2fa1a 100644 --- a/pkg/planner/core/rule_constant_propagation.go +++ b/pkg/planner/core/rule_constant_propagation.go @@ -16,10 +16,10 @@ package core import ( "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" ) // constantPropagationSolver can support constant propagated cross-query block. @@ -51,7 +51,7 @@ type constantPropagationSolver struct { // which is mainly implemented in the interface "constantPropagation" of LogicalPlan. // Currently only the Logical Join implements this function. (Used for the subquery in FROM List) // In the future, the Logical Apply will implements this function. (Used for the subquery in WHERE or SELECT list) -func (cp *constantPropagationSolver) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (cp *constantPropagationSolver) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false // constant propagation root plan newRoot := p.constantPropagation(nil, 0, opt) @@ -68,7 +68,7 @@ func (cp *constantPropagationSolver) optimize(_ context.Context, p LogicalPlan, } // execOptimize optimize constant propagation exclude root plan node -func (cp *constantPropagationSolver) execOptimize(currentPlan LogicalPlan, parentPlan LogicalPlan, currentChildIdx int, opt *util.LogicalOptimizeOp) { +func (cp *constantPropagationSolver) execOptimize(currentPlan LogicalPlan, parentPlan LogicalPlan, currentChildIdx int, opt *coreusage.LogicalOptimizeOp) { if parentPlan == nil { // Attention: The function 'execOptimize' could not handle the root plan, so the parent plan could not be nil. return @@ -85,7 +85,7 @@ func (*constantPropagationSolver) name() string { return "constant_propagation" } -func (*baseLogicalPlan) constantPropagation(_ LogicalPlan, _ int, _ *util.LogicalOptimizeOp) (newRoot LogicalPlan) { +func (*baseLogicalPlan) constantPropagation(_ LogicalPlan, _ int, _ *coreusage.LogicalOptimizeOp) (newRoot LogicalPlan) { // Only LogicalJoin can apply constant propagation // Other Logical plan do nothing return nil @@ -143,7 +143,7 @@ func (*baseLogicalPlan) constantPropagation(_ LogicalPlan, _ int, _ *util.Logica */ // Return nil if the root of plan has not been changed // Return new root if the root of plan is changed to selection -func (logicalJoin *LogicalJoin) constantPropagation(parentPlan LogicalPlan, currentChildIdx int, opt *util.LogicalOptimizeOp) (newRoot LogicalPlan) { +func (logicalJoin *LogicalJoin) constantPropagation(parentPlan LogicalPlan, currentChildIdx int, opt *coreusage.LogicalOptimizeOp) (newRoot LogicalPlan) { // step1: get constant predicate from left or right according to the JoinType var getConstantPredicateFromLeft bool var getConstantPredicateFromRight bool @@ -268,7 +268,7 @@ func validCompareConstantPredicate(candidatePredicate expression.Expression) boo // If the currentPlan at the top of query plan, return new root plan (selection) // Else return nil func addCandidateSelection(currentPlan LogicalPlan, currentChildIdx int, parentPlan LogicalPlan, - candidatePredicates []expression.Expression, opt *util.LogicalOptimizeOp) (newRoot LogicalPlan) { + candidatePredicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (newRoot LogicalPlan) { // generate a new selection for candidatePredicates selection := LogicalSelection{Conditions: candidatePredicates}.Init(currentPlan.SCtx(), currentPlan.QueryBlockOffset()) // add selection above of p diff --git a/pkg/planner/core/rule_decorrelate.go b/pkg/planner/core/rule_decorrelate.go index 48450912d1757..e27ea34618b65 100644 --- a/pkg/planner/core/rule_decorrelate.go +++ b/pkg/planner/core/rule_decorrelate.go @@ -18,13 +18,13 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "math" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/plancodec" ) @@ -194,7 +194,7 @@ func (*decorrelateSolver) aggDefaultValueMap(agg *LogicalAggregation) map[int]*e } // optimize implements logicalOptRule interface. -func (s *decorrelateSolver) optimize(ctx context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (s *decorrelateSolver) optimize(ctx context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false if apply, ok := p.(*LogicalApply); ok { outerPlan := apply.children[0] @@ -464,7 +464,7 @@ func (*decorrelateSolver) name() string { return "decorrelate" } -func appendApplySimplifiedTraceStep(p *LogicalApply, j *LogicalJoin, opt *util.LogicalOptimizeOp) { +func appendApplySimplifiedTraceStep(p *LogicalApply, j *LogicalJoin, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v simplified into %v_%v", plancodec.TypeApply, p.ID(), plancodec.TypeJoin, j.ID()) } @@ -474,7 +474,7 @@ func appendApplySimplifiedTraceStep(p *LogicalApply, j *LogicalJoin, opt *util.L opt.AppendStepToCurrent(p.ID(), p.TP(), reason, action) } -func appendRemoveSelectionTraceStep(p LogicalPlan, s *LogicalSelection, opt *util.LogicalOptimizeOp) { +func appendRemoveSelectionTraceStep(p LogicalPlan, s *LogicalSelection, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v removed from plan tree", s.TP(), s.ID()) } @@ -484,7 +484,7 @@ func appendRemoveSelectionTraceStep(p LogicalPlan, s *LogicalSelection, opt *uti opt.AppendStepToCurrent(s.ID(), s.TP(), reason, action) } -func appendRemoveMaxOneRowTraceStep(m *LogicalMaxOneRow, opt *util.LogicalOptimizeOp) { +func appendRemoveMaxOneRowTraceStep(m *LogicalMaxOneRow, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v removed from plan tree", m.TP(), m.ID()) } @@ -494,7 +494,7 @@ func appendRemoveMaxOneRowTraceStep(m *LogicalMaxOneRow, opt *util.LogicalOptimi opt.AppendStepToCurrent(m.ID(), m.TP(), reason, action) } -func appendRemoveLimitTraceStep(limit *LogicalLimit, opt *util.LogicalOptimizeOp) { +func appendRemoveLimitTraceStep(limit *LogicalLimit, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v removed from plan tree", limit.TP(), limit.ID()) } @@ -504,7 +504,7 @@ func appendRemoveLimitTraceStep(limit *LogicalLimit, opt *util.LogicalOptimizeOp opt.AppendStepToCurrent(limit.ID(), limit.TP(), reason, action) } -func appendRemoveProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendRemoveProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v removed from plan tree", proj.TP(), proj.ID()) } @@ -514,7 +514,7 @@ func appendRemoveProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *ut opt.AppendStepToCurrent(proj.ID(), proj.TP(), reason, action) } -func appendMoveProjTraceStep(p *LogicalApply, np LogicalPlan, proj *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendMoveProjTraceStep(p *LogicalApply, np LogicalPlan, proj *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v is moved as %v_%v's parent", proj.TP(), proj.ID(), np.TP(), np.ID()) } @@ -524,7 +524,7 @@ func appendMoveProjTraceStep(p *LogicalApply, np LogicalPlan, proj *LogicalProje opt.AppendStepToCurrent(proj.ID(), proj.TP(), reason, action) } -func appendRemoveSortTraceStep(sort *LogicalSort, opt *util.LogicalOptimizeOp) { +func appendRemoveSortTraceStep(sort *LogicalSort, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v removed from plan tree", sort.TP(), sort.ID()) } @@ -534,7 +534,7 @@ func appendRemoveSortTraceStep(sort *LogicalSort, opt *util.LogicalOptimizeOp) { opt.AppendStepToCurrent(sort.ID(), sort.TP(), reason, action) } -func appendPullUpAggTraceStep(p *LogicalApply, np LogicalPlan, agg *LogicalAggregation, opt *util.LogicalOptimizeOp) { +func appendPullUpAggTraceStep(p *LogicalApply, np LogicalPlan, agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v pulled up as %v_%v's parent, and %v_%v's join type becomes %v", agg.TP(), agg.ID(), np.TP(), np.ID(), p.TP(), p.ID(), p.JoinType.String()) @@ -546,7 +546,7 @@ func appendPullUpAggTraceStep(p *LogicalApply, np LogicalPlan, agg *LogicalAggre opt.AppendStepToCurrent(agg.ID(), agg.TP(), reason, action) } -func appendAddProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendAddProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v is added as %v_%v's parent", proj.TP(), proj.ID(), p.TP(), p.ID()) } @@ -558,7 +558,7 @@ func appendAddProjTraceStep(p *LogicalApply, proj *LogicalProjection, opt *util. func appendModifyAggTraceStep(outerPlan LogicalPlan, p *LogicalApply, agg *LogicalAggregation, sel *LogicalSelection, appendedGroupByCols *expression.Schema, appendedAggFuncs []*aggregation.AggFuncDesc, - eqCondWithCorCol []*expression.ScalarFunction, opt *util.LogicalOptimizeOp) { + eqCondWithCorCol []*expression.ScalarFunction, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v's groupby items added [", agg.TP(), agg.ID())) for i, col := range appendedGroupByCols.Columns { diff --git a/pkg/planner/core/rule_derive_topn_from_window.go b/pkg/planner/core/rule_derive_topn_from_window.go index b2ca744023308..9e98985d486d6 100644 --- a/pkg/planner/core/rule_derive_topn_from_window.go +++ b/pkg/planner/core/rule_derive_topn_from_window.go @@ -17,6 +17,7 @@ package core import ( "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" @@ -28,7 +29,7 @@ import ( type deriveTopNFromWindow struct { } -func appendDerivedTopNTrace(topN LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendDerivedTopNTrace(topN LogicalPlan, opt *coreusage.LogicalOptimizeOp) { child := topN.Children()[0] action := func() string { return fmt.Sprintf("%v_%v top N added below %v_%v ", topN.TP(), topN.ID(), child.TP(), child.ID()) @@ -116,12 +117,12 @@ func windowIsTopN(p *LogicalSelection) (bool, uint64) { return false, 0 } -func (*deriveTopNFromWindow) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*deriveTopNFromWindow) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false return p.deriveTopN(opt), planChanged, nil } -func (s *baseLogicalPlan) deriveTopN(opt *util.LogicalOptimizeOp) LogicalPlan { +func (s *baseLogicalPlan) deriveTopN(opt *coreusage.LogicalOptimizeOp) LogicalPlan { p := s.self if p.SCtx().GetSessionVars().AllowDeriveTopN { for i, child := range p.Children() { @@ -132,7 +133,7 @@ func (s *baseLogicalPlan) deriveTopN(opt *util.LogicalOptimizeOp) LogicalPlan { return p } -func (s *LogicalSelection) deriveTopN(opt *util.LogicalOptimizeOp) LogicalPlan { +func (s *LogicalSelection) deriveTopN(opt *coreusage.LogicalOptimizeOp) LogicalPlan { p := s.self.(*LogicalSelection) windowIsTopN, limitValue := windowIsTopN(p) if windowIsTopN { diff --git a/pkg/planner/core/rule_eliminate_projection.go b/pkg/planner/core/rule_eliminate_projection.go index f821ac440154e..98d1b40db3f21 100644 --- a/pkg/planner/core/rule_eliminate_projection.go +++ b/pkg/planner/core/rule_eliminate_projection.go @@ -18,12 +18,12 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/planner/util" ) // canProjectionBeEliminatedLoose checks whether a projection can be eliminated, @@ -168,14 +168,14 @@ type projectionEliminator struct { } // optimize implements the logicalOptRule interface. -func (pe *projectionEliminator) optimize(_ context.Context, lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (pe *projectionEliminator) optimize(_ context.Context, lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false root := pe.eliminate(lp, make(map[string]*expression.Column), false, opt) return root, planChanged, nil } // eliminate eliminates the redundant projection in a logical plan. -func (pe *projectionEliminator) eliminate(p LogicalPlan, replace map[string]*expression.Column, canEliminate bool, opt *util.LogicalOptimizeOp) LogicalPlan { +func (pe *projectionEliminator) eliminate(p LogicalPlan, replace map[string]*expression.Column, canEliminate bool, opt *coreusage.LogicalOptimizeOp) LogicalPlan { // LogicalCTE's logical optimization is independent. if _, ok := p.(*LogicalCTE); ok { return p @@ -339,7 +339,7 @@ func (*projectionEliminator) name() string { return "projection_eliminate" } -func appendDupProjEliminateTraceStep(parent, child *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendDupProjEliminateTraceStep(parent, child *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString( fmt.Sprintf("%v_%v is eliminated, %v_%v's expressions changed into[", child.TP(), child.ID(), parent.TP(), parent.ID())) @@ -358,7 +358,7 @@ func appendDupProjEliminateTraceStep(parent, child *LogicalProjection, opt *util opt.AppendStepToCurrent(child.ID(), child.TP(), reason, action) } -func appendProjEliminateTraceStep(proj *LogicalProjection, opt *util.LogicalOptimizeOp) { +func appendProjEliminateTraceStep(proj *LogicalProjection, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return fmt.Sprintf("%v_%v's Exprs are all Columns", proj.TP(), proj.ID()) } diff --git a/pkg/planner/core/rule_generate_column_substitute.go b/pkg/planner/core/rule_generate_column_substitute.go index 46f9906fd0935..f7ce622f44925 100644 --- a/pkg/planner/core/rule_generate_column_substitute.go +++ b/pkg/planner/core/rule_generate_column_substitute.go @@ -17,10 +17,10 @@ package core import ( "bytes" "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/types" h "github.com/pingcap/tidb/pkg/util/hint" ) @@ -38,7 +38,7 @@ type ExprColumnMap map[expression.Expression]*expression.Column // For example: select a+1 from t order by a+1, with a virtual generate column c as (a+1) and // an index on c. We need to replace a+1 with c so that we can use the index on c. // See also https://dev.mysql.com/doc/refman/8.0/en/generated-column-index-optimizations.html -func (gc *gcSubstituter) optimize(ctx context.Context, lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (gc *gcSubstituter) optimize(ctx context.Context, lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false exprToColumn := make(ExprColumnMap) collectGenerateColumn(lp, exprToColumn) @@ -85,7 +85,7 @@ func collectGenerateColumn(lp LogicalPlan, exprToColumn ExprColumnMap) { } } -func tryToSubstituteExpr(expr *expression.Expression, lp LogicalPlan, candidateExpr expression.Expression, tp types.EvalType, schema *expression.Schema, col *expression.Column, opt *util.LogicalOptimizeOp) bool { +func tryToSubstituteExpr(expr *expression.Expression, lp LogicalPlan, candidateExpr expression.Expression, tp types.EvalType, schema *expression.Schema, col *expression.Column, opt *coreusage.LogicalOptimizeOp) bool { changed := false if (*expr).Equal(lp.SCtx().GetExprCtx().GetEvalCtx(), candidateExpr) && candidateExpr.GetType().EvalType() == tp && schema.ColumnIndex(col) != -1 { @@ -96,7 +96,7 @@ func tryToSubstituteExpr(expr *expression.Expression, lp LogicalPlan, candidateE return changed } -func appendSubstituteColumnStep(lp LogicalPlan, candidateExpr expression.Expression, col *expression.Column, opt *util.LogicalOptimizeOp) { +func appendSubstituteColumnStep(lp LogicalPlan, candidateExpr expression.Expression, col *expression.Column, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return "" } action := func() string { buffer := bytes.NewBufferString("expression:") @@ -110,11 +110,11 @@ func appendSubstituteColumnStep(lp LogicalPlan, candidateExpr expression.Express } // SubstituteExpression is Exported for bench -func SubstituteExpression(cond expression.Expression, lp LogicalPlan, exprToColumn ExprColumnMap, schema *expression.Schema, opt *util.LogicalOptimizeOp) bool { +func SubstituteExpression(cond expression.Expression, lp LogicalPlan, exprToColumn ExprColumnMap, schema *expression.Schema, opt *coreusage.LogicalOptimizeOp) bool { return substituteExpression(cond, lp, exprToColumn, schema, opt) } -func substituteExpression(cond expression.Expression, lp LogicalPlan, exprToColumn ExprColumnMap, schema *expression.Schema, opt *util.LogicalOptimizeOp) bool { +func substituteExpression(cond expression.Expression, lp LogicalPlan, exprToColumn ExprColumnMap, schema *expression.Schema, opt *coreusage.LogicalOptimizeOp) bool { sf, ok := cond.(*expression.ScalarFunction) if !ok { return false @@ -173,7 +173,7 @@ func substituteExpression(cond expression.Expression, lp LogicalPlan, exprToColu return changed } -func (gc *gcSubstituter) substitute(ctx context.Context, lp LogicalPlan, exprToColumn ExprColumnMap, opt *util.LogicalOptimizeOp) LogicalPlan { +func (gc *gcSubstituter) substitute(ctx context.Context, lp LogicalPlan, exprToColumn ExprColumnMap, opt *coreusage.LogicalOptimizeOp) LogicalPlan { var tp types.EvalType switch x := lp.(type) { case *LogicalSelection: diff --git a/pkg/planner/core/rule_join_elimination.go b/pkg/planner/core/rule_join_elimination.go index 2a71714a78a48..b9476394230bf 100644 --- a/pkg/planner/core/rule_join_elimination.go +++ b/pkg/planner/core/rule_join_elimination.go @@ -18,10 +18,10 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/util/set" ) @@ -35,7 +35,7 @@ type outerJoinEliminator struct { // 2. outer join elimination with duplicate agnostic aggregate functions: For example left outer join. // If the parent only use the columns from left table with 'distinct' label. The left outer join can // be eliminated. -func (o *outerJoinEliminator) tryToEliminateOuterJoin(p *LogicalJoin, aggCols []*expression.Column, parentCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (o *outerJoinEliminator) tryToEliminateOuterJoin(p *LogicalJoin, aggCols []*expression.Column, parentCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { var innerChildIdx int switch p.JoinType { case LeftOuterJoin: @@ -192,7 +192,7 @@ func GetDupAgnosticAggCols( return true, newAggCols } -func (o *outerJoinEliminator) doOptimize(p LogicalPlan, aggCols []*expression.Column, parentCols []*expression.Column, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (o *outerJoinEliminator) doOptimize(p LogicalPlan, aggCols []*expression.Column, parentCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { // CTE's logical optimization is independent. if _, ok := p.(*LogicalCTE); ok { return p, nil @@ -246,7 +246,7 @@ func (o *outerJoinEliminator) doOptimize(p LogicalPlan, aggCols []*expression.Co return p, nil } -func (o *outerJoinEliminator) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (o *outerJoinEliminator) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false p, err := o.doOptimize(p, nil, nil, opt) return p, planChanged, err @@ -257,7 +257,7 @@ func (*outerJoinEliminator) name() string { } func appendOuterJoinEliminateTraceStep(join *LogicalJoin, outerPlan LogicalPlan, parentCols []*expression.Column, - innerJoinKeys *expression.Schema, opt *util.LogicalOptimizeOp) { + innerJoinKeys *expression.Schema, opt *coreusage.LogicalOptimizeOp) { reason := func() string { buffer := bytes.NewBufferString("The columns[") for i, col := range parentCols { @@ -282,7 +282,7 @@ func appendOuterJoinEliminateTraceStep(join *LogicalJoin, outerPlan LogicalPlan, opt.AppendStepToCurrent(join.ID(), join.TP(), reason, action) } -func appendOuterJoinEliminateAggregationTraceStep(join *LogicalJoin, outerPlan LogicalPlan, aggCols []*expression.Column, opt *util.LogicalOptimizeOp) { +func appendOuterJoinEliminateAggregationTraceStep(join *LogicalJoin, outerPlan LogicalPlan, aggCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) { reason := func() string { buffer := bytes.NewBufferString("The columns[") for i, col := range aggCols { diff --git a/pkg/planner/core/rule_join_reorder.go b/pkg/planner/core/rule_join_reorder.go index 6acdb08a21381..09fd05d018405 100644 --- a/pkg/planner/core/rule_join_reorder.go +++ b/pkg/planner/core/rule_join_reorder.go @@ -18,11 +18,11 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "slices" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" h "github.com/pingcap/tidb/pkg/util/hint" "github.com/pingcap/tidb/pkg/util/plancodec" "github.com/pingcap/tidb/pkg/util/tracing" @@ -223,7 +223,7 @@ type joinTypeWithExtMsg struct { outerBindCondition []expression.Expression } -func (s *joinReOrderSolver) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (s *joinReOrderSolver) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false tracer := &joinReorderTrace{cost: map[string]float64{}, opt: opt} tracer.traceJoinReorder(p) @@ -663,7 +663,7 @@ func (*joinReOrderSolver) name() string { return "join_reorder" } -func appendJoinReorderTraceStep(tracer *joinReorderTrace, plan LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendJoinReorderTraceStep(tracer *joinReorderTrace, plan LogicalPlan, opt *coreusage.LogicalOptimizeOp) { if len(tracer.initial) < 1 || len(tracer.final) < 1 { return } @@ -773,7 +773,7 @@ func findRoots(t *tracing.PlanTrace) []*tracing.PlanTrace { } type joinReorderTrace struct { - opt *util.LogicalOptimizeOp + opt *coreusage.LogicalOptimizeOp initial string final string cost map[string]float64 diff --git a/pkg/planner/core/rule_max_min_eliminate.go b/pkg/planner/core/rule_max_min_eliminate.go index 9f523d469ccb7..329020d77c13d 100644 --- a/pkg/planner/core/rule_max_min_eliminate.go +++ b/pkg/planner/core/rule_max_min_eliminate.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" @@ -36,13 +37,13 @@ import ( type maxMinEliminator struct { } -func (a *maxMinEliminator) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (a *maxMinEliminator) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false return a.eliminateMaxMin(p, opt), planChanged, nil } // composeAggsByInnerJoin composes the scalar aggregations by cartesianJoin. -func (*maxMinEliminator) composeAggsByInnerJoin(originAgg *LogicalAggregation, aggs []*LogicalAggregation, opt *util.LogicalOptimizeOp) (plan LogicalPlan) { +func (*maxMinEliminator) composeAggsByInnerJoin(originAgg *LogicalAggregation, aggs []*LogicalAggregation, opt *coreusage.LogicalOptimizeOp) (plan LogicalPlan) { plan = aggs[0] sctx := plan.SCtx() joins := make([]*LogicalJoin, 0) @@ -138,7 +139,7 @@ func (a *maxMinEliminator) cloneSubPlans(plan LogicalPlan) LogicalPlan { // `select max(a) from t` + `select min(a) from t` + `select max(b) from t`. // Then we check whether `a` and `b` have indices. If any of the used column has no index, we cannot eliminate // this aggregation. -func (a *maxMinEliminator) splitAggFuncAndCheckIndices(agg *LogicalAggregation, opt *util.LogicalOptimizeOp) (aggs []*LogicalAggregation, canEliminate bool) { +func (a *maxMinEliminator) splitAggFuncAndCheckIndices(agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) (aggs []*LogicalAggregation, canEliminate bool) { for _, f := range agg.AggFuncs { // We must make sure the args of max/min is a simple single column. col, ok := f.Args[0].(*expression.Column) @@ -170,7 +171,7 @@ func (a *maxMinEliminator) splitAggFuncAndCheckIndices(agg *LogicalAggregation, } // eliminateSingleMaxMin tries to convert a single max/min to Limit+Sort operators. -func (*maxMinEliminator) eliminateSingleMaxMin(agg *LogicalAggregation, opt *util.LogicalOptimizeOp) *LogicalAggregation { +func (*maxMinEliminator) eliminateSingleMaxMin(agg *LogicalAggregation, opt *coreusage.LogicalOptimizeOp) *LogicalAggregation { f := agg.AggFuncs[0] child := agg.Children()[0] ctx := agg.SCtx() @@ -211,7 +212,7 @@ func (*maxMinEliminator) eliminateSingleMaxMin(agg *LogicalAggregation, opt *uti } // eliminateMaxMin tries to convert max/min to Limit+Sort operators. -func (a *maxMinEliminator) eliminateMaxMin(p LogicalPlan, opt *util.LogicalOptimizeOp) LogicalPlan { +func (a *maxMinEliminator) eliminateMaxMin(p LogicalPlan, opt *coreusage.LogicalOptimizeOp) LogicalPlan { // CTE's logical optimization is indenpent. if _, ok := p.(*LogicalCTE); ok { return p @@ -261,7 +262,7 @@ func (*maxMinEliminator) name() string { return "max_min_eliminate" } -func appendEliminateSingleMaxMinTrace(agg *LogicalAggregation, sel *LogicalSelection, sort *LogicalSort, limit *LogicalLimit, opt *util.LogicalOptimizeOp) { +func appendEliminateSingleMaxMinTrace(agg *LogicalAggregation, sel *LogicalSelection, sort *LogicalSort, limit *LogicalLimit, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString("") if sel != nil { @@ -286,7 +287,7 @@ func appendEliminateSingleMaxMinTrace(agg *LogicalAggregation, sel *LogicalSelec opt.AppendStepToCurrent(agg.ID(), agg.TP(), reason, action) } -func appendEliminateMultiMinMaxTraceStep(originAgg *LogicalAggregation, aggs []*LogicalAggregation, joins []*LogicalJoin, opt *util.LogicalOptimizeOp) { +func appendEliminateMultiMinMaxTraceStep(originAgg *LogicalAggregation, aggs []*LogicalAggregation, joins []*LogicalJoin, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v splited into [", originAgg.TP(), originAgg.ID())) for i, agg := range aggs { diff --git a/pkg/planner/core/rule_partition_processor.go b/pkg/planner/core/rule_partition_processor.go index 017538239f55e..6d6d4cf949231 100644 --- a/pkg/planner/core/rule_partition_processor.go +++ b/pkg/planner/core/rule_partition_processor.go @@ -19,6 +19,7 @@ import ( "cmp" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "math" "slices" "sort" @@ -29,7 +30,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/types" @@ -62,13 +62,13 @@ const FullRange = -1 // partitionProcessor is here because it's easier to prune partition after predicate push down. type partitionProcessor struct{} -func (s *partitionProcessor) optimize(_ context.Context, lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (s *partitionProcessor) optimize(_ context.Context, lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false p, err := s.rewriteDataSource(lp, opt) return p, planChanged, err } -func (s *partitionProcessor) rewriteDataSource(lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) rewriteDataSource(lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { // Assert there will not be sel -> sel in the ast. switch p := lp.(type) { case *DataSource: @@ -502,7 +502,7 @@ func (*partitionProcessor) reconstructTableColNames(ds *DataSource) ([]*types.Fi return names, nil } -func (s *partitionProcessor) processHashOrKeyPartition(ds *DataSource, pi *model.PartitionInfo, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) processHashOrKeyPartition(ds *DataSource, pi *model.PartitionInfo, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { names, err := s.reconstructTableColNames(ds) if err != nil { return nil, err @@ -824,7 +824,7 @@ func (s *partitionProcessor) pruneListPartition(ctx PlanContext, tbl table.Table return used, nil } -func (s *partitionProcessor) prune(ds *DataSource, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) prune(ds *DataSource, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { pi := ds.tableInfo.GetPartitionInfo() if pi == nil { return ds, nil @@ -1037,7 +1037,7 @@ func (s *partitionProcessor) pruneRangePartition(ctx PlanContext, pi *model.Part return result, nil } -func (s *partitionProcessor) processRangePartition(ds *DataSource, pi *model.PartitionInfo, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) processRangePartition(ds *DataSource, pi *model.PartitionInfo, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used, err := s.pruneRangePartition(ds.SCtx(), pi, ds.table.(table.PartitionedTable), ds.allConds, ds.TblCols, ds.names) if err != nil { return nil, err @@ -1045,7 +1045,7 @@ func (s *partitionProcessor) processRangePartition(ds *DataSource, pi *model.Par return s.makeUnionAllChildren(ds, pi, used, opt) } -func (s *partitionProcessor) processListPartition(ds *DataSource, pi *model.PartitionInfo, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) processListPartition(ds *DataSource, pi *model.PartitionInfo, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { used, err := s.pruneListPartition(ds.SCtx(), ds.table, ds.partitionNames, ds.allConds, ds.TblCols) if err != nil { return nil, err @@ -1767,7 +1767,7 @@ func (*partitionProcessor) checkHintsApplicable(ds *DataSource, partitionSet set appendWarnForUnknownPartitions(ds.SCtx(), h.HintReadFromStorage, unknownPartitions) } -func (s *partitionProcessor) makeUnionAllChildren(ds *DataSource, pi *model.PartitionInfo, or partitionRangeOR, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func (s *partitionProcessor) makeUnionAllChildren(ds *DataSource, pi *model.PartitionInfo, or partitionRangeOR, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { children := make([]LogicalPlan, 0, len(pi.Definitions)) partitionNameSet := make(set.StringSet) usedDefinition := make(map[int64]model.PartitionDefinition) @@ -2003,7 +2003,7 @@ func (p *rangeColumnsPruner) pruneUseBinarySearch(sctx PlanContext, op string, d return start, end } -func appendMakeUnionAllChildrenTranceStep(origin *DataSource, usedMap map[int64]model.PartitionDefinition, plan LogicalPlan, children []LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendMakeUnionAllChildrenTranceStep(origin *DataSource, usedMap map[int64]model.PartitionDefinition, plan LogicalPlan, children []LogicalPlan, opt *coreusage.LogicalOptimizeOp) { if opt.TracerIsNil() { return } @@ -2059,7 +2059,7 @@ func appendMakeUnionAllChildrenTranceStep(origin *DataSource, usedMap map[int64] opt.AppendStepToCurrent(origin.ID(), origin.TP(), reason, action) } -func appendNoPartitionChildTraceStep(ds *DataSource, dual LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendNoPartitionChildTraceStep(ds *DataSource, dual LogicalPlan, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v becomes %v_%v", ds.TP(), ds.ID(), dual.TP(), dual.ID()) } diff --git a/pkg/planner/core/rule_predicate_push_down.go b/pkg/planner/core/rule_predicate_push_down.go index 9561108586257..9f4f73753599e 100644 --- a/pkg/planner/core/rule_predicate_push_down.go +++ b/pkg/planner/core/rule_predicate_push_down.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/kv" @@ -41,13 +42,13 @@ type exprPrefixAdder struct { lengths []int } -func (*ppdSolver) optimize(_ context.Context, lp LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*ppdSolver) optimize(_ context.Context, lp LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false _, p := lp.PredicatePushDown(nil, opt) return p, planChanged, nil } -func addSelection(p LogicalPlan, child LogicalPlan, conditions []expression.Expression, chIdx int, opt *util.LogicalOptimizeOp) { +func addSelection(p LogicalPlan, child LogicalPlan, conditions []expression.Expression, chIdx int, opt *coreusage.LogicalOptimizeOp) { if len(conditions) == 0 { p.Children()[chIdx] = child return @@ -73,7 +74,7 @@ func addSelection(p LogicalPlan, child LogicalPlan, conditions []expression.Expr } // PredicatePushDown implements LogicalPlan interface. -func (p *baseLogicalPlan) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *baseLogicalPlan) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { if len(p.children) == 0 { return predicates, p.self } @@ -97,7 +98,7 @@ func splitSetGetVarFunc(filters []expression.Expression) ([]expression.Expressio } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalSelection) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalSelection) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { predicates = DeleteTrueExprs(p, predicates) p.Conditions = DeleteTrueExprs(p, p.Conditions) var child LogicalPlan @@ -123,7 +124,7 @@ func (p *LogicalSelection) PredicatePushDown(predicates []expression.Expression, } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalUnionScan) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalUnionScan) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { retainedPredicates, _ := p.children[0].PredicatePushDown(predicates, opt) p.conditions = make([]expression.Expression, 0, len(predicates)) p.conditions = append(p.conditions, predicates...) @@ -132,7 +133,7 @@ func (p *LogicalUnionScan) PredicatePushDown(predicates []expression.Expression, } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (ds *DataSource) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (ds *DataSource) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { predicates = expression.PropagateConstant(ds.SCtx().GetExprCtx(), predicates) predicates = DeleteTrueExprs(ds, predicates) // Add tidb_shard() prefix to the condtion for shard index in some scenarios @@ -145,12 +146,12 @@ func (ds *DataSource) PredicatePushDown(predicates []expression.Expression, opt } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalTableDual) PredicatePushDown(predicates []expression.Expression, _ *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalTableDual) PredicatePushDown(predicates []expression.Expression, _ *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { return predicates, p } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalJoin) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { +func (p *LogicalJoin) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { simplifyOuterJoin(p, predicates) var equalCond []*expression.ScalarFunction var leftPushCond, rightPushCond, otherCond, leftCond, rightCond []expression.Expression @@ -493,7 +494,7 @@ func specialNullRejectedCase1(ctx PlanContext, schema *expression.Schema, expr e } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalExpand) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { +func (p *LogicalExpand) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { // Note that, grouping column related predicates can't be pushed down, since grouping column has nullability change after Expand OP itself. // condition related with grouping column shouldn't be pushed down through it. // currently, since expand is adjacent to aggregate, any filter above aggregate wanted to be push down through expand only have two cases: @@ -505,7 +506,7 @@ func (p *LogicalExpand) PredicatePushDown(predicates []expression.Expression, op } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalProjection) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { +func (p *LogicalProjection) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { canBePushed := make([]expression.Expression, 0, len(predicates)) canNotBePushed := make([]expression.Expression, 0, len(predicates)) for _, expr := range p.Exprs { @@ -528,7 +529,7 @@ func (p *LogicalProjection) PredicatePushDown(predicates []expression.Expression } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalUnionAll) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { +func (p *LogicalUnionAll) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { for i, proj := range p.children { newExprs := make([]expression.Expression, 0, len(predicates)) newExprs = append(newExprs, predicates...) @@ -629,7 +630,7 @@ func (la *LogicalAggregation) pushDownDNFPredicatesForAggregation(cond expressio } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (la *LogicalAggregation) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { +func (la *LogicalAggregation) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) (ret []expression.Expression, retPlan LogicalPlan) { var condsToPush []expression.Expression exprsOriginal := make([]expression.Expression, 0, len(la.AggFuncs)) for _, fun := range la.AggFuncs { @@ -651,14 +652,14 @@ func (la *LogicalAggregation) PredicatePushDown(predicates []expression.Expressi } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalLimit) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalLimit) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { // Limit forbids any condition to push down. p.baseLogicalPlan.PredicatePushDown(nil, opt) return predicates, p } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalMaxOneRow) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalMaxOneRow) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { // MaxOneRow forbids any condition to push down. p.baseLogicalPlan.PredicatePushDown(nil, opt) return predicates, p @@ -809,7 +810,7 @@ func (p *LogicalWindow) GetPartitionByCols() []*expression.Column { } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalWindow) PredicatePushDown(predicates []expression.Expression, opt *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalWindow) PredicatePushDown(predicates []expression.Expression, opt *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { canBePushed := make([]expression.Expression, 0, len(predicates)) canNotBePushed := make([]expression.Expression, 0, len(predicates)) partitionCols := expression.NewSchema(p.GetPartitionByCols()...) @@ -827,7 +828,7 @@ func (p *LogicalWindow) PredicatePushDown(predicates []expression.Expression, op } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalMemTable) PredicatePushDown(predicates []expression.Expression, _ *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalMemTable) PredicatePushDown(predicates []expression.Expression, _ *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { if p.Extractor != nil { predicates = p.Extractor.Extract(p.SCtx(), p.schema, p.names, predicates) } @@ -838,7 +839,7 @@ func (*ppdSolver) name() string { return "predicate_push_down" } -func appendTableDualTraceStep(replaced LogicalPlan, dual LogicalPlan, conditions []expression.Expression, opt *util.LogicalOptimizeOp) { +func appendTableDualTraceStep(replaced LogicalPlan, dual LogicalPlan, conditions []expression.Expression, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v is replaced by %v_%v", replaced.TP(), replaced.ID(), dual.TP(), dual.ID()) } @@ -856,7 +857,7 @@ func appendTableDualTraceStep(replaced LogicalPlan, dual LogicalPlan, conditions opt.AppendStepToCurrent(dual.ID(), dual.TP(), reason, action) } -func appendSelectionPredicatePushDownTraceStep(p *LogicalSelection, conditions []expression.Expression, opt *util.LogicalOptimizeOp) { +func appendSelectionPredicatePushDownTraceStep(p *LogicalSelection, conditions []expression.Expression, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v is removed", p.TP(), p.ID()) } @@ -879,7 +880,7 @@ func appendSelectionPredicatePushDownTraceStep(p *LogicalSelection, conditions [ opt.AppendStepToCurrent(p.ID(), p.TP(), reason, action) } -func appendDataSourcePredicatePushDownTraceStep(ds *DataSource, opt *util.LogicalOptimizeOp) { +func appendDataSourcePredicatePushDownTraceStep(ds *DataSource, opt *coreusage.LogicalOptimizeOp) { if len(ds.pushedDownConds) < 1 { return } @@ -900,7 +901,7 @@ func appendDataSourcePredicatePushDownTraceStep(ds *DataSource, opt *util.Logica opt.AppendStepToCurrent(ds.ID(), ds.TP(), reason, action) } -func appendAddSelectionTraceStep(p LogicalPlan, child LogicalPlan, sel *LogicalSelection, opt *util.LogicalOptimizeOp) { +func appendAddSelectionTraceStep(p LogicalPlan, child LogicalPlan, sel *LogicalSelection, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return "" } @@ -1031,7 +1032,7 @@ func (adder *exprPrefixAdder) addExprPrefix4DNFCond(condition *expression.Scalar } // PredicatePushDown implements LogicalPlan PredicatePushDown interface. -func (p *LogicalCTE) PredicatePushDown(predicates []expression.Expression, _ *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalCTE) PredicatePushDown(predicates []expression.Expression, _ *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { if p.cte.recursivePartLogicalPlan != nil { // Doesn't support recursive CTE yet. return predicates, p.self @@ -1067,7 +1068,7 @@ func (p *LogicalCTE) PredicatePushDown(predicates []expression.Expression, _ *ut // PredicatePushDown implements the LogicalPlan interface. // Currently, we only maintain the main query tree. -func (p *LogicalSequence) PredicatePushDown(predicates []expression.Expression, op *util.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { +func (p *LogicalSequence) PredicatePushDown(predicates []expression.Expression, op *coreusage.LogicalOptimizeOp) ([]expression.Expression, LogicalPlan) { lastIdx := len(p.children) - 1 remained, newLastChild := p.children[lastIdx].PredicatePushDown(predicates, op) p.SetChild(lastIdx, newLastChild) diff --git a/pkg/planner/core/rule_predicate_simplification.go b/pkg/planner/core/rule_predicate_simplification.go index 354d8bb428c96..0b26f734a1d42 100644 --- a/pkg/planner/core/rule_predicate_simplification.go +++ b/pkg/planner/core/rule_predicate_simplification.go @@ -17,11 +17,11 @@ package core import ( "context" "errors" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "slices" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" ) // predicateSimplification consolidates different predcicates on a column and its equivalence classes. Initial out is for @@ -65,12 +65,12 @@ func findPredicateType(expr expression.Expression) (*expression.Column, predicat return nil, otherPredicate } -func (*predicateSimplification) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*predicateSimplification) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false return p.predicateSimplification(opt), planChanged, nil } -func (s *baseLogicalPlan) predicateSimplification(opt *util.LogicalOptimizeOp) LogicalPlan { +func (s *baseLogicalPlan) predicateSimplification(opt *coreusage.LogicalOptimizeOp) LogicalPlan { p := s.self for i, child := range p.Children() { newChild := child.predicateSimplification(opt) @@ -155,7 +155,7 @@ func applyPredicateSimplification(sctx PlanContext, predicates []expression.Expr return newValues } -func (ds *DataSource) predicateSimplification(*util.LogicalOptimizeOp) LogicalPlan { +func (ds *DataSource) predicateSimplification(*coreusage.LogicalOptimizeOp) LogicalPlan { p := ds.self.(*DataSource) p.pushedDownConds = applyPredicateSimplification(p.SCtx(), p.pushedDownConds) p.allConds = applyPredicateSimplification(p.SCtx(), p.allConds) diff --git a/pkg/planner/core/rule_push_down_sequence.go b/pkg/planner/core/rule_push_down_sequence.go index 17964353c5b52..fc248c3744b51 100644 --- a/pkg/planner/core/rule_push_down_sequence.go +++ b/pkg/planner/core/rule_push_down_sequence.go @@ -17,7 +17,7 @@ package core import ( "context" - "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" ) type pushDownSequenceSolver struct { @@ -27,7 +27,7 @@ func (*pushDownSequenceSolver) name() string { return "push_down_sequence" } -func (pdss *pushDownSequenceSolver) optimize(_ context.Context, lp LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (pdss *pushDownSequenceSolver) optimize(_ context.Context, lp LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false return pdss.recursiveOptimize(nil, lp), planChanged, nil } diff --git a/pkg/planner/core/rule_resolve_grouping_expand.go b/pkg/planner/core/rule_resolve_grouping_expand.go index 53e5892aaea05..563d7fd7650b1 100644 --- a/pkg/planner/core/rule_resolve_grouping_expand.go +++ b/pkg/planner/core/rule_resolve_grouping_expand.go @@ -16,8 +16,7 @@ package core import ( "context" - - "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" ) // For normal rollup Expand construction, its logical Expand should be bound @@ -74,7 +73,7 @@ type resolveExpand struct { // (upper required) (grouping sets columns appended) // // Expand operator itself is kind like a projection, while difference is that it has a multi projection list, named as leveled projection. -func (*resolveExpand) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*resolveExpand) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false // As you see, Expand's leveled projection should be built after all column-prune is done. So we just make generating-leveled-projection // as the last rule of logical optimization, which is more clear. (spark has column prune action before building expand) @@ -86,7 +85,7 @@ func (*resolveExpand) name() string { return "resolve_expand" } -func genExpand(p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, error) { +func genExpand(p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, error) { for i, child := range p.Children() { np, err := genExpand(child, opt) if err != nil { diff --git a/pkg/planner/core/rule_result_reorder.go b/pkg/planner/core/rule_result_reorder.go index b503a6ede7326..42764f028cb86 100644 --- a/pkg/planner/core/rule_result_reorder.go +++ b/pkg/planner/core/rule_result_reorder.go @@ -16,6 +16,7 @@ package core import ( "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/planner/util" @@ -39,7 +40,7 @@ This rule reorders results by modifying or injecting a Sort operator: type resultReorder struct { } -func (rs *resultReorder) optimize(_ context.Context, lp LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (rs *resultReorder) optimize(_ context.Context, lp LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false ordered := rs.completeSort(lp) if !ordered { diff --git a/pkg/planner/core/rule_semi_join_rewrite.go b/pkg/planner/core/rule_semi_join_rewrite.go index 0c78e6ec6e921..fc9222b9784b1 100644 --- a/pkg/planner/core/rule_semi_join_rewrite.go +++ b/pkg/planner/core/rule_semi_join_rewrite.go @@ -16,11 +16,11 @@ package core import ( "context" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/expression/aggregation" "github.com/pingcap/tidb/pkg/parser/ast" - "github.com/pingcap/tidb/pkg/planner/util" h "github.com/pingcap/tidb/pkg/util/hint" ) @@ -37,7 +37,7 @@ import ( type semiJoinRewriter struct { } -func (smj *semiJoinRewriter) optimize(_ context.Context, p LogicalPlan, _ *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (smj *semiJoinRewriter) optimize(_ context.Context, p LogicalPlan, _ *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false newLogicalPlan, err := smj.recursivePlan(p) return newLogicalPlan, planChanged, err diff --git a/pkg/planner/core/rule_topn_push_down.go b/pkg/planner/core/rule_topn_push_down.go index 830995f55387d..d1802374b7f4f 100644 --- a/pkg/planner/core/rule_topn_push_down.go +++ b/pkg/planner/core/rule_topn_push_down.go @@ -18,6 +18,7 @@ import ( "bytes" "context" "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/planner/util" @@ -27,12 +28,12 @@ import ( type pushDownTopNOptimizer struct { } -func (*pushDownTopNOptimizer) optimize(_ context.Context, p LogicalPlan, opt *util.LogicalOptimizeOp) (LogicalPlan, bool, error) { +func (*pushDownTopNOptimizer) optimize(_ context.Context, p LogicalPlan, opt *coreusage.LogicalOptimizeOp) (LogicalPlan, bool, error) { planChanged := false return p.pushDownTopN(nil, opt), planChanged, nil } -func (s *baseLogicalPlan) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (s *baseLogicalPlan) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { p := s.self for i, child := range p.Children() { p.Children()[i] = child.pushDownTopN(nil, opt) @@ -43,7 +44,7 @@ func (s *baseLogicalPlan) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptim return p } -func (p *LogicalCTE) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalCTE) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { if topN != nil { return topN.setChild(p, opt) } @@ -51,7 +52,7 @@ func (p *LogicalCTE) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp } // setChild set p as topn's child. -func (lt *LogicalTopN) setChild(p LogicalPlan, opt *util.LogicalOptimizeOp) LogicalPlan { +func (lt *LogicalTopN) setChild(p LogicalPlan, opt *coreusage.LogicalOptimizeOp) LogicalPlan { // Remove this TopN if its child is a TableDual. dual, isDual := p.(*LogicalTableDual) if isDual { @@ -81,7 +82,7 @@ func (lt *LogicalTopN) setChild(p LogicalPlan, opt *util.LogicalOptimizeOp) Logi return lt } -func (ls *LogicalSort) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (ls *LogicalSort) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { if topN == nil { return ls.baseLogicalPlan.pushDownTopN(nil, opt) } else if topN.isLimit() { @@ -93,13 +94,13 @@ func (ls *LogicalSort) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimize return ls.children[0].pushDownTopN(topN, opt) } -func (p *LogicalLimit) convertToTopN(opt *util.LogicalOptimizeOp) *LogicalTopN { +func (p *LogicalLimit) convertToTopN(opt *coreusage.LogicalOptimizeOp) *LogicalTopN { topn := LogicalTopN{Offset: p.Offset, Count: p.Count, PreferLimitToCop: p.PreferLimitToCop}.Init(p.SCtx(), p.QueryBlockOffset()) appendConvertTopNTraceStep(p, topn, opt) return topn } -func (p *LogicalLimit) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalLimit) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { child := p.children[0].pushDownTopN(p.convertToTopN(opt), opt) if topN != nil { return topN.setChild(child, opt) @@ -107,7 +108,7 @@ func (p *LogicalLimit) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimize return child } -func (p *LogicalUnionAll) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalUnionAll) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { for i, child := range p.children { var newTopN *LogicalTopN if topN != nil { @@ -126,7 +127,7 @@ func (p *LogicalUnionAll) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptim return p } -func (p *LogicalProjection) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalProjection) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { for _, expr := range p.Exprs { if expression.HasAssignSetVarFunc(expr) { return p.baseLogicalPlan.pushDownTopN(topN, opt) @@ -164,7 +165,7 @@ func (p *LogicalProjection) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOpt return p } -func (p *LogicalLock) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalLock) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { if topN != nil { p.children[0] = p.children[0].pushDownTopN(topN, opt) } @@ -172,7 +173,7 @@ func (p *LogicalLock) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeO } // pushDownTopNToChild will push a topN to one child of join. The idx stands for join child index. 0 is for left child. -func (p *LogicalJoin) pushDownTopNToChild(topN *LogicalTopN, idx int, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalJoin) pushDownTopNToChild(topN *LogicalTopN, idx int, opt *coreusage.LogicalOptimizeOp) LogicalPlan { if topN == nil { return p.children[idx].pushDownTopN(nil, opt) } @@ -198,7 +199,7 @@ func (p *LogicalJoin) pushDownTopNToChild(topN *LogicalTopN, idx int, opt *util. return p.children[idx].pushDownTopN(newTopN, opt) } -func (p *LogicalJoin) pushDownTopN(topN *LogicalTopN, opt *util.LogicalOptimizeOp) LogicalPlan { +func (p *LogicalJoin) pushDownTopN(topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) LogicalPlan { switch p.JoinType { case LeftOuterJoin, LeftOuterSemiJoin, AntiLeftOuterSemiJoin: p.children[0] = p.pushDownTopNToChild(topN, 0, opt) @@ -221,7 +222,7 @@ func (*pushDownTopNOptimizer) name() string { return "topn_push_down" } -func appendTopNPushDownTraceStep(parent LogicalPlan, child LogicalPlan, opt *util.LogicalOptimizeOp) { +func appendTopNPushDownTraceStep(parent LogicalPlan, child LogicalPlan, opt *coreusage.LogicalOptimizeOp) { action := func() string { return fmt.Sprintf("%v_%v is added as %v_%v's parent", parent.TP(), parent.ID(), child.TP(), child.ID()) } @@ -231,7 +232,7 @@ func appendTopNPushDownTraceStep(parent LogicalPlan, child LogicalPlan, opt *uti opt.AppendStepToCurrent(parent.ID(), parent.TP(), reason, action) } -func appendTopNPushDownJoinTraceStep(p *LogicalJoin, topN *LogicalTopN, idx int, opt *util.LogicalOptimizeOp) { +func appendTopNPushDownJoinTraceStep(p *LogicalJoin, topN *LogicalTopN, idx int, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v is added and pushed into %v_%v's ", topN.TP(), topN.ID(), p.TP(), p.ID())) @@ -263,7 +264,7 @@ func appendTopNPushDownJoinTraceStep(p *LogicalJoin, topN *LogicalTopN, idx int, opt.AppendStepToCurrent(p.ID(), p.TP(), reason, action) } -func appendSortPassByItemsTraceStep(sort *LogicalSort, topN *LogicalTopN, opt *util.LogicalOptimizeOp) { +func appendSortPassByItemsTraceStep(sort *LogicalSort, topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) { action := func() string { buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v passes ByItems[", sort.TP(), sort.ID())) for i, item := range sort.ByItems { @@ -281,7 +282,7 @@ func appendSortPassByItemsTraceStep(sort *LogicalSort, topN *LogicalTopN, opt *u opt.AppendStepToCurrent(sort.ID(), sort.TP(), reason, action) } -func appendNewTopNTraceStep(topN *LogicalTopN, union *LogicalUnionAll, opt *util.LogicalOptimizeOp) { +func appendNewTopNTraceStep(topN *LogicalTopN, union *LogicalUnionAll, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return "" } @@ -291,7 +292,7 @@ func appendNewTopNTraceStep(topN *LogicalTopN, union *LogicalUnionAll, opt *util opt.AppendStepToCurrent(topN.ID(), topN.TP(), reason, action) } -func appendConvertTopNTraceStep(p LogicalPlan, topN *LogicalTopN, opt *util.LogicalOptimizeOp) { +func appendConvertTopNTraceStep(p LogicalPlan, topN *LogicalTopN, opt *coreusage.LogicalOptimizeOp) { reason := func() string { return "" } diff --git a/pkg/planner/core/task.go b/pkg/planner/core/task.go index 966d4ec4172c0..23207012aee89 100644 --- a/pkg/planner/core/task.go +++ b/pkg/planner/core/task.go @@ -41,22 +41,8 @@ import ( "go.uber.org/zap" ) -var ( - _ task = &copTask{} - _ task = &rootTask{} - _ task = &mppTask{} -) - -// task is a new version of `PhysicalPlanInfo`. It stores cost information for a task. -// A task may be CopTask, RootTask, MPPTaskMeta or a ParallelTask. -type task interface { - count() float64 - copy() task - plan() PhysicalPlan - invalid() bool - convertToRootTask(ctx PlanContext) *rootTask - MemoryUsage() int64 -} +var _ Task = &copTask{} +var _ Task = &mppTask{} // copTask is a task that runs in a distributed kv store. // TODO: In future, we should split copTask to indexTask and tableTask. @@ -98,36 +84,32 @@ type copTask struct { expectCnt uint64 } -func (t *copTask) invalid() bool { +func (t *copTask) Invalid() bool { return t.tablePlan == nil && t.indexPlan == nil && len(t.idxMergePartPlans) == 0 } -func (t *rootTask) invalid() bool { - return t.p == nil -} - -func (t *copTask) count() float64 { +func (t *copTask) Count() float64 { if t.indexPlanFinished { return t.tablePlan.StatsInfo().RowCount } return t.indexPlan.StatsInfo().RowCount } -func (t *copTask) copy() task { +func (t *copTask) Copy() Task { nt := *t return &nt } // copTask plan should be careful with indexMergeReader, whose real plan is stored in // idxMergePartPlans, when its indexPlanFinished is marked with false. -func (t *copTask) plan() PhysicalPlan { +func (t *copTask) Plan() PhysicalPlan { if t.indexPlanFinished { return t.tablePlan } return t.indexPlan } -func attachPlan2Task(p PhysicalPlan, t task) task { +func attachPlan2Task(p PhysicalPlan, t Task) Task { switch v := t.(type) { case *copTask: if v.indexPlanFinished { @@ -137,9 +119,9 @@ func attachPlan2Task(p PhysicalPlan, t task) task { p.SetChildren(v.indexPlan) v.indexPlan = p } - case *rootTask: - p.SetChildren(v.p) - v.p = p + case *RootTask: + p.SetChildren(v.GetPlan()) + v.SetPlan(p) case *mppTask: p.SetChildren(v.p) v.p = p @@ -219,89 +201,85 @@ func (t *copTask) MemoryUsage() (sum int64) { return } -func (p *basePhysicalPlan) attach2Task(tasks ...task) task { - t := tasks[0].convertToRootTask(p.SCtx()) +func (p *basePhysicalPlan) Attach2Task(tasks ...Task) Task { + t := tasks[0].ConvertToRootTask(p.SCtx()) return attachPlan2Task(p.self, t) } -func (p *PhysicalUnionScan) attach2Task(tasks ...task) task { +func (p *PhysicalUnionScan) Attach2Task(tasks ...Task) Task { // We need to pull the projection under unionScan upon unionScan. // Since the projection only prunes columns, it's ok the put it upon unionScan. - if sel, ok := tasks[0].plan().(*PhysicalSelection); ok { + if sel, ok := tasks[0].Plan().(*PhysicalSelection); ok { if pj, ok := sel.children[0].(*PhysicalProjection); ok { // Convert unionScan->selection->projection to projection->unionScan->selection. sel.SetChildren(pj.children...) p.SetChildren(sel) - p.SetStats(tasks[0].plan().StatsInfo()) - rt, _ := tasks[0].(*rootTask) - rt.p = p + p.SetStats(tasks[0].Plan().StatsInfo()) + rt, _ := tasks[0].(*RootTask) + rt.SetPlan(p) pj.SetChildren(p) return pj.attach2Task(tasks...) } } - if pj, ok := tasks[0].plan().(*PhysicalProjection); ok { + if pj, ok := tasks[0].Plan().(*PhysicalProjection); ok { // Convert unionScan->projection to projection->unionScan, because unionScan can't handle projection as its children. p.SetChildren(pj.children...) - p.SetStats(tasks[0].plan().StatsInfo()) - rt, _ := tasks[0].(*rootTask) - rt.p = pj.children[0] + p.SetStats(tasks[0].Plan().StatsInfo()) + rt, _ := tasks[0].(*RootTask) + rt.SetPlan(pj.children[0]) pj.SetChildren(p) - return pj.attach2Task(p.basePhysicalPlan.attach2Task(tasks...)) + return pj.attach2Task(p.basePhysicalPlan.Attach2Task(tasks...)) } - p.SetStats(tasks[0].plan().StatsInfo()) - return p.basePhysicalPlan.attach2Task(tasks...) + p.SetStats(tasks[0].Plan().StatsInfo()) + return p.basePhysicalPlan.Attach2Task(tasks...) } -func (p *PhysicalApply) attach2Task(tasks ...task) task { - lTask := tasks[0].convertToRootTask(p.SCtx()) - rTask := tasks[1].convertToRootTask(p.SCtx()) - p.SetChildren(lTask.plan(), rTask.plan()) +func (p *PhysicalApply) Attach2Task(tasks ...Task) Task { + lTask := tasks[0].ConvertToRootTask(p.SCtx()) + rTask := tasks[1].ConvertToRootTask(p.SCtx()) + p.SetChildren(lTask.Plan(), rTask.Plan()) p.schema = BuildPhysicalJoinSchema(p.JoinType, p) - t := &rootTask{ - p: p, - } + t := &RootTask{} + t.SetPlan(p) return t } -func (p *PhysicalIndexMergeJoin) attach2Task(tasks ...task) task { +func (p *PhysicalIndexMergeJoin) Attach2Task(tasks ...Task) Task { innerTask := p.innerTask - outerTask := tasks[1-p.InnerChildIdx].convertToRootTask(p.SCtx()) + outerTask := tasks[1-p.InnerChildIdx].ConvertToRootTask(p.SCtx()) if p.InnerChildIdx == 1 { - p.SetChildren(outerTask.plan(), innerTask.plan()) + p.SetChildren(outerTask.Plan(), innerTask.Plan()) } else { - p.SetChildren(innerTask.plan(), outerTask.plan()) - } - t := &rootTask{ - p: p, + p.SetChildren(innerTask.Plan(), outerTask.Plan()) } + t := &RootTask{} + t.SetPlan(p) return t } -func (p *PhysicalIndexHashJoin) attach2Task(tasks ...task) task { +func (p *PhysicalIndexHashJoin) attach2Task(tasks ...Task) Task { innerTask := p.innerTask - outerTask := tasks[1-p.InnerChildIdx].convertToRootTask(p.SCtx()) + outerTask := tasks[1-p.InnerChildIdx].ConvertToRootTask(p.SCtx()) if p.InnerChildIdx == 1 { - p.SetChildren(outerTask.plan(), innerTask.plan()) + p.SetChildren(outerTask.Plan(), innerTask.Plan()) } else { - p.SetChildren(innerTask.plan(), outerTask.plan()) - } - t := &rootTask{ - p: p, + p.SetChildren(innerTask.Plan(), outerTask.Plan()) } + t := &RootTask{} + t.SetPlan(p) return t } -func (p *PhysicalIndexJoin) attach2Task(tasks ...task) task { +func (p *PhysicalIndexJoin) Attach2Task(tasks ...Task) Task { innerTask := p.innerTask - outerTask := tasks[1-p.InnerChildIdx].convertToRootTask(p.SCtx()) + outerTask := tasks[1-p.InnerChildIdx].ConvertToRootTask(p.SCtx()) if p.InnerChildIdx == 1 { - p.SetChildren(outerTask.plan(), innerTask.plan()) + p.SetChildren(outerTask.Plan(), innerTask.Plan()) } else { - p.SetChildren(innerTask.plan(), outerTask.plan()) - } - t := &rootTask{ - p: p, + p.SetChildren(innerTask.Plan(), outerTask.Plan()) } + t := &RootTask{} + t.SetPlan(p) return t } @@ -318,16 +296,15 @@ func getAvgRowSize(stats *property.StatsInfo, cols []*expression.Column) (size f return } -func (p *PhysicalHashJoin) attach2Task(tasks ...task) task { +func (p *PhysicalHashJoin) attach2Task(tasks ...Task) Task { if p.storeTp == kv.TiFlash { return p.attach2TaskForTiFlash(tasks...) } - lTask := tasks[0].convertToRootTask(p.SCtx()) - rTask := tasks[1].convertToRootTask(p.SCtx()) - p.SetChildren(lTask.plan(), rTask.plan()) - task := &rootTask{ - p: p, - } + lTask := tasks[0].ConvertToRootTask(p.SCtx()) + rTask := tasks[1].ConvertToRootTask(p.SCtx()) + p.SetChildren(lTask.Plan(), rTask.Plan()) + task := &RootTask{} + task.SetPlan(p) return task } @@ -482,7 +459,7 @@ func (p *PhysicalHashJoin) convertPartitionKeysIfNeed(lTask, rTask *mppTask) (*m } // if left or right child changes, we need to add enforcer. if lChanged { - nlTask := lTask.copy().(*mppTask) + nlTask := lTask.Copy().(*mppTask) nlTask.p = lProj nlTask = nlTask.enforceExchanger(&property.PhysicalProperty{ TaskTp: property.MppTaskType, @@ -492,7 +469,7 @@ func (p *PhysicalHashJoin) convertPartitionKeysIfNeed(lTask, rTask *mppTask) (*m lTask = nlTask } if rChanged { - nrTask := rTask.copy().(*mppTask) + nrTask := rTask.Copy().(*mppTask) nrTask.p = rProj nrTask = nrTask.enforceExchanger(&property.PhysicalProperty{ TaskTp: property.MppTaskType, @@ -504,7 +481,7 @@ func (p *PhysicalHashJoin) convertPartitionKeysIfNeed(lTask, rTask *mppTask) (*m return lTask, rTask } -func (p *PhysicalHashJoin) attach2TaskForMpp(tasks ...task) task { +func (p *PhysicalHashJoin) attach2TaskForMpp(tasks ...Task) Task { lTask, lok := tasks[0].(*mppTask) rTask, rok := tasks[1].(*mppTask) if !lok || !rok { @@ -517,7 +494,7 @@ func (p *PhysicalHashJoin) attach2TaskForMpp(tasks ...task) task { } lTask, rTask = p.convertPartitionKeysIfNeed(lTask, rTask) } - p.SetChildren(lTask.plan(), rTask.plan()) + p.SetChildren(lTask.Plan(), rTask.Plan()) p.schema = BuildPhysicalJoinSchema(p.JoinType, p) // outer task is the task that will pass its MPPPartitionType to the join result @@ -547,13 +524,13 @@ func (p *PhysicalHashJoin) attach2TaskForMpp(tasks ...task) task { return task } -func (p *PhysicalHashJoin) attach2TaskForTiFlash(tasks ...task) task { +func (p *PhysicalHashJoin) attach2TaskForTiFlash(tasks ...Task) Task { lTask, lok := tasks[0].(*copTask) rTask, rok := tasks[1].(*copTask) if !lok || !rok { return p.attach2TaskForMpp(tasks...) } - p.SetChildren(lTask.plan(), rTask.plan()) + p.SetChildren(lTask.Plan(), rTask.Plan()) p.schema = BuildPhysicalJoinSchema(p.JoinType, p) if !lTask.indexPlanFinished { lTask.finishIndexPlan() @@ -570,18 +547,17 @@ func (p *PhysicalHashJoin) attach2TaskForTiFlash(tasks ...task) task { return task } -func (p *PhysicalMergeJoin) attach2Task(tasks ...task) task { - lTask := tasks[0].convertToRootTask(p.SCtx()) - rTask := tasks[1].convertToRootTask(p.SCtx()) - p.SetChildren(lTask.plan(), rTask.plan()) - t := &rootTask{ - p: p, - } +func (p *PhysicalMergeJoin) attach2Task(tasks ...Task) Task { + lTask := tasks[0].ConvertToRootTask(p.SCtx()) + rTask := tasks[1].ConvertToRootTask(p.SCtx()) + p.SetChildren(lTask.Plan(), rTask.Plan()) + t := &RootTask{} + t.SetPlan(p) return t } -func buildIndexLookUpTask(ctx PlanContext, t *copTask) *rootTask { - newTask := &rootTask{} +func buildIndexLookUpTask(ctx PlanContext, t *copTask) *RootTask { + newTask := &RootTask{} p := PhysicalIndexLookUpReader{ tablePlan: t.tablePlan, indexPlan: t.indexPlan, @@ -608,9 +584,9 @@ func buildIndexLookUpTask(ctx PlanContext, t *copTask) *rootTask { proj := PhysicalProjection{Exprs: expression.Column2Exprs(schema.Columns)}.Init(ctx, p.StatsInfo(), t.tablePlan.QueryBlockOffset(), nil) proj.SetSchema(schema) proj.SetChildren(p) - newTask.p = proj + newTask.SetPlan(proj) } else { - newTask.p = p + newTask.SetPlan(p) } return newTask } @@ -649,16 +625,12 @@ func calcPagingCost(ctx PlanContext, indexPlan PhysicalPlan, expectCnt uint64) f return math.Max(pagingCst-sessVars.GetSeekFactor(nil), 0) } -func (t *rootTask) convertToRootTask(_ PlanContext) *rootTask { - return t.copy().(*rootTask) -} - -func (t *copTask) convertToRootTask(ctx PlanContext) *rootTask { +func (t *copTask) ConvertToRootTask(ctx PlanContext) *RootTask { // copy one to avoid changing itself. - return t.copy().(*copTask).convertToRootTaskImpl(ctx) + return t.Copy().(*copTask).convertToRootTaskImpl(ctx) } -func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { +func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *RootTask { // copTasks are run in parallel, to make the estimated cost closer to execution time, we amortize // the cost to cop iterator workers. According to `CopClient::Send`, the concurrency // is Min(DistSQLScanConcurrency, numRegionsInvolvedInScan), since we cannot infer @@ -685,7 +657,7 @@ func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { t.originSchema = prevSchema } } - newTask := &rootTask{} + newTask := &RootTask{} if t.idxMergePartPlans != nil { p := PhysicalIndexMergeReader{ partialPlans: t.idxMergePartPlans, @@ -696,14 +668,14 @@ func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { }.Init(ctx, t.idxMergePartPlans[0].QueryBlockOffset()) p.PlanPartInfo = t.physPlanPartInfo setTableScanToTableRowIDScan(p.tablePlan) - newTask.p = p + newTask.SetPlan(p) t.handleRootTaskConds(ctx, newTask) if t.needExtraProj { schema := t.originSchema proj := PhysicalProjection{Exprs: expression.Column2Exprs(schema.Columns)}.Init(ctx, p.StatsInfo(), t.idxMergePartPlans[0].QueryBlockOffset(), nil) proj.SetSchema(schema) proj.SetChildren(p) - newTask.p = proj + newTask.SetPlan(proj) } return newTask } @@ -713,7 +685,7 @@ func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { p := PhysicalIndexReader{indexPlan: t.indexPlan}.Init(ctx, t.indexPlan.QueryBlockOffset()) p.PlanPartInfo = t.physPlanPartInfo p.SetStats(t.indexPlan.StatsInfo()) - newTask.p = p + newTask.SetPlan(p) } else { tp := t.tablePlan for len(tp.Children()) > 0 { @@ -749,9 +721,9 @@ func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { proj := PhysicalProjection{Exprs: expression.Column2Exprs(t.originSchema.Columns)}.Init(ts.SCtx(), ts.StatsInfo(), ts.QueryBlockOffset(), nil) proj.SetSchema(t.originSchema) proj.SetChildren(p) - newTask.p = proj + newTask.SetPlan(proj) } else { - newTask.p = p + newTask.SetPlan(p) } } @@ -759,17 +731,17 @@ func (t *copTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { return newTask } -func (t *copTask) handleRootTaskConds(ctx PlanContext, newTask *rootTask) { +func (t *copTask) handleRootTaskConds(ctx PlanContext, newTask *RootTask) { if len(t.rootTaskConds) > 0 { selectivity, _, err := cardinality.Selectivity(ctx, t.tblColHists, t.rootTaskConds, nil) if err != nil { logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) selectivity = SelectionFactor } - sel := PhysicalSelection{Conditions: t.rootTaskConds}.Init(ctx, newTask.p.StatsInfo().Scale(selectivity), newTask.p.QueryBlockOffset()) + sel := PhysicalSelection{Conditions: t.rootTaskConds}.Init(ctx, newTask.GetPlan().StatsInfo().Scale(selectivity), newTask.GetPlan().QueryBlockOffset()) sel.fromDataSource = true - sel.SetChildren(newTask.p) - newTask.p = sel + sel.SetChildren(newTask.GetPlan()) + newTask.SetPlan(sel) } } @@ -784,40 +756,6 @@ func setTableScanToTableRowIDScan(p PhysicalPlan) { } } -// rootTask is the final sink node of a plan graph. It should be a single goroutine on tidb. -type rootTask struct { - p PhysicalPlan - isEmpty bool // isEmpty indicates if this task contains a dual table and returns empty data. - // TODO: The flag 'isEmpty' is only checked by Projection and UnionAll. We should support more cases in the future. -} - -func (t *rootTask) copy() task { - return &rootTask{ - p: t.p, - } -} - -func (t *rootTask) count() float64 { - return t.p.StatsInfo().RowCount -} - -func (t *rootTask) plan() PhysicalPlan { - return t.p -} - -// MemoryUsage return the memory usage of rootTask -func (t *rootTask) MemoryUsage() (sum int64) { - if t == nil { - return - } - - sum = size.SizeOfInterface + size.SizeOfBool - if t.p != nil { - sum += t.p.MemoryUsage() - } - return sum -} - // attach2Task attach limit to different cases. // For Normal Index Lookup // 1: attach the limit to table side or index side of normal index lookup cop task. (normal case, old code, no more @@ -834,8 +772,8 @@ func (t *rootTask) MemoryUsage() (sum int64) { // // 4: attach the limit to the TOP of root index merge operator if there is some root condition exists for index merge // intersection/union case. -func (p *PhysicalLimit) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalLimit) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() newPartitionBy := make([]property.SortItem, 0, len(p.GetPartitionBy())) for _, expr := range p.GetPartitionBy() { newPartitionBy = append(newPartitionBy, expr.Clone()) @@ -853,7 +791,7 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { cop.tablePlan = pushedDownLimit // Don't use clone() so that Limit and its children share the same schema. Otherwise, the virtual generated column may not be resolved right. pushedDownLimit.SetSchema(pushedDownLimit.children[0].Schema()) - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) } if len(cop.idxMergePartPlans) == 0 { // For double read which requires order being kept, the limit cannot be pushed down to the table side, @@ -861,7 +799,7 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { if (!cop.keepOrder || !cop.indexPlanFinished || cop.indexPlan == nil) && len(cop.rootTaskConds) == 0 { // When limit is pushed down, we should remove its offset. newCount := p.Offset + p.Count - childProfile := cop.plan().StatsInfo() + childProfile := cop.Plan().StatsInfo() // Strictly speaking, for the row count of stats, we should multiply newCount with "regionNum", // but "regionNum" is unknown since the copTask can be a double read, so we ignore it now. stats := deriveLimitStats(childProfile, float64(newCount)) @@ -870,7 +808,7 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { // Don't use clone() so that Limit and its children share the same schema. Otherwise the virtual generated column may not be resolved right. pushedDownLimit.SetSchema(pushedDownLimit.children[0].Schema()) } - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) sunk = p.sinkIntoIndexLookUp(t) } else if !cop.idxMergeIsIntersection { // We only support push part of the order prop down to index merge build case. @@ -891,12 +829,12 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { limitChildren = append(limitChildren, pushedDownLimit) } cop.idxMergePartPlans = limitChildren - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) sunk = p.sinkIntoIndexMerge(t) } } else { // when there are some root conditions, just sink the limit upon the index merge reader. - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) sunk = p.sinkIntoIndexMerge(t) } } else if cop.idxMergeIsIntersection { @@ -909,26 +847,26 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { // indicates the table side is not a pure table-scan, so we could only append the limit upon the table plan. suspendLimitAboveTablePlan() } else { - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) sunk = p.sinkIntoIndexMerge(t) } } else { // Otherwise, suspend the limit out of index merge reader. - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) sunk = p.sinkIntoIndexMerge(t) } } else { // Whatever the remained case is, we directly convert to it to root task. - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) } } else if mpp, ok := t.(*mppTask); ok { newCount := p.Offset + p.Count - childProfile := mpp.plan().StatsInfo() + childProfile := mpp.Plan().StatsInfo() stats := deriveLimitStats(childProfile, float64(newCount)) pushedDownLimit := PhysicalLimit{Count: newCount, PartitionBy: newPartitionBy}.Init(p.SCtx(), stats, p.QueryBlockOffset()) mpp = attachPlan2Task(pushedDownLimit, mpp).(*mppTask) pushedDownLimit.SetSchema(pushedDownLimit.children[0].Schema()) - t = mpp.convertToRootTask(p.SCtx()) + t = mpp.ConvertToRootTask(p.SCtx()) } if sunk { return t @@ -941,10 +879,10 @@ func (p *PhysicalLimit) attach2Task(tasks ...task) task { return attachPlan2Task(p, t) } -func (p *PhysicalLimit) sinkIntoIndexLookUp(t task) bool { - root := t.(*rootTask) - reader, isDoubleRead := root.p.(*PhysicalIndexLookUpReader) - proj, isProj := root.p.(*PhysicalProjection) +func (p *PhysicalLimit) sinkIntoIndexLookUp(t Task) bool { + root := t.(*RootTask) + reader, isDoubleRead := root.GetPlan().(*PhysicalIndexLookUpReader) + proj, isProj := root.GetPlan().(*PhysicalProjection) if !isDoubleRead && !isProj { return false } @@ -972,8 +910,8 @@ func (p *PhysicalLimit) sinkIntoIndexLookUp(t task) bool { }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset(), nil) extraProj.SetSchema(p.schema) // If the root.p is already a Projection. We left the optimization for the later Projection Elimination. - extraProj.SetChildren(root.p) - root.p = extraProj + extraProj.SetChildren(root.GetPlan()) + root.SetPlan(extraProj) } reader.PushedLimit = &PushedDownLimit{ @@ -993,10 +931,10 @@ func (p *PhysicalLimit) sinkIntoIndexLookUp(t task) bool { return true } -func (p *PhysicalLimit) sinkIntoIndexMerge(t task) bool { - root := t.(*rootTask) - imReader, isIm := root.p.(*PhysicalIndexMergeReader) - proj, isProj := root.p.(*PhysicalProjection) +func (p *PhysicalLimit) sinkIntoIndexMerge(t Task) bool { + root := t.(*RootTask) + imReader, isIm := root.GetPlan().(*PhysicalIndexMergeReader) + proj, isProj := root.GetPlan().(*PhysicalProjection) if !isIm && !isProj { return false } @@ -1025,10 +963,10 @@ func (p *PhysicalLimit) sinkIntoIndexMerge(t task) bool { ts.StatsInfo().RowCount = originStats.RowCount } } - needProj := p.schema.Len() != root.p.Schema().Len() + needProj := p.schema.Len() != root.GetPlan().Schema().Len() if !needProj { for i := 0; i < p.schema.Len(); i++ { - if !p.schema.Columns[i].EqualColumn(root.p.Schema().Columns[i]) { + if !p.schema.Columns[i].EqualColumn(root.GetPlan().Schema().Columns[i]) { needProj = true break } @@ -1040,23 +978,23 @@ func (p *PhysicalLimit) sinkIntoIndexMerge(t task) bool { }.Init(p.SCtx(), p.StatsInfo(), p.QueryBlockOffset(), nil) extraProj.SetSchema(p.schema) // If the root.p is already a Projection. We left the optimization for the later Projection Elimination. - extraProj.SetChildren(root.p) - root.p = extraProj + extraProj.SetChildren(root.GetPlan()) + root.SetPlan(extraProj) } return true } -func (p *PhysicalSort) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalSort) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() t = attachPlan2Task(p, t) return t } -func (p *NominalSort) attach2Task(tasks ...task) task { +func (p *NominalSort) attach2Task(tasks ...Task) Task { if p.OnlyColumn { return tasks[0] } - t := tasks[0].copy() + t := tasks[0].Copy() t = attachPlan2Task(p, t) return t } @@ -1149,7 +1087,7 @@ func (p *PhysicalTopN) canPushDownToTiKV(copTask *copTask) bool { return false } } - } else if p.containVirtualColumn(copTask.plan().Schema().Columns) { + } else if p.containVirtualColumn(copTask.Plan().Schema().Columns) { return false } return true @@ -1160,14 +1098,14 @@ func (p *PhysicalTopN) canPushDownToTiFlash(mppTask *mppTask) bool { if !p.canExpressionConvertedToPB(kv.TiFlash) { return false } - if p.containVirtualColumn(mppTask.plan().Schema().Columns) { + if p.containVirtualColumn(mppTask.Plan().Schema().Columns) { return false } return true } -func (p *PhysicalTopN) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalTopN) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() cols := make([]*expression.Column, 0, len(p.ByItems)) for _, item := range p.ByItems { cols = append(cols, expression.ExtractColumns(item.Expr)...) @@ -1190,7 +1128,7 @@ func (p *PhysicalTopN) attach2Task(tasks ...task) task { pushedDownTopN := p.getPushedDownTopN(mppTask.p) mppTask.p = pushedDownTopN } - rootTask := t.convertToRootTask(p.SCtx()) + rootTask := t.ConvertToRootTask(p.SCtx()) // Skip TopN with partition on the root. This is a derived topN and window function // will take care of the filter. if len(p.GetPartitionBy()) > 0 { @@ -1199,8 +1137,8 @@ func (p *PhysicalTopN) attach2Task(tasks ...task) task { return attachPlan2Task(p, rootTask) } -func (p *PhysicalExpand) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalExpand) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() // current expand can only be run in MPP TiFlash mode. if mpp, ok := t.(*mppTask); ok { p.SetChildren(mpp.p) @@ -1210,8 +1148,8 @@ func (p *PhysicalExpand) attach2Task(tasks ...task) task { return invalidTask } -func (p *PhysicalProjection) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalProjection) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() if cop, ok := t.(*copTask); ok { if (len(cop.rootTaskConds) == 0 && len(cop.idxMergePartPlans) == 0) && expression.CanExprsPushDown(GetPushDownCtx(p.SCtx()), p.Exprs, cop.getStoreType()) { copTask := attachPlan2Task(p, cop) @@ -1224,21 +1162,21 @@ func (p *PhysicalProjection) attach2Task(tasks ...task) task { return mpp } } - t = t.convertToRootTask(p.SCtx()) + t = t.ConvertToRootTask(p.SCtx()) t = attachPlan2Task(p, t) - if root, ok := tasks[0].(*rootTask); ok && root.isEmpty { - t.(*rootTask).isEmpty = true + if root, ok := tasks[0].(*RootTask); ok && root.IsEmpty() { + t.(*RootTask).SetEmpty(true) } return t } -func (p *PhysicalUnionAll) attach2MppTasks(tasks ...task) task { +func (p *PhysicalUnionAll) attach2MppTasks(tasks ...Task) Task { t := &mppTask{p: p} childPlans := make([]PhysicalPlan, 0, len(tasks)) for _, tk := range tasks { - if mpp, ok := tk.(*mppTask); ok && !tk.invalid() { - childPlans = append(childPlans, mpp.plan()) - } else if root, ok := tk.(*rootTask); ok && root.isEmpty { + if mpp, ok := tk.(*mppTask); ok && !tk.Invalid() { + childPlans = append(childPlans, mpp.Plan()) + } else if root, ok := tk.(*RootTask); ok && root.IsEmpty() { continue } else { return invalidTask @@ -1251,7 +1189,7 @@ func (p *PhysicalUnionAll) attach2MppTasks(tasks ...task) task { return t } -func (p *PhysicalUnionAll) attach2Task(tasks ...task) task { +func (p *PhysicalUnionAll) attach2Task(tasks ...Task) Task { for _, t := range tasks { if _, ok := t.(*mppTask); ok { if p.TP() == plancodec.TypePartitionUnion { @@ -1263,23 +1201,24 @@ func (p *PhysicalUnionAll) attach2Task(tasks ...task) task { return p.attach2MppTasks(tasks...) } } - t := &rootTask{p: p} + t := &RootTask{} + t.SetPlan(p) childPlans := make([]PhysicalPlan, 0, len(tasks)) for _, task := range tasks { - task = task.convertToRootTask(p.SCtx()) - childPlans = append(childPlans, task.plan()) + task = task.ConvertToRootTask(p.SCtx()) + childPlans = append(childPlans, task.Plan()) } p.SetChildren(childPlans...) return t } -func (sel *PhysicalSelection) attach2Task(tasks ...task) task { +func (sel *PhysicalSelection) attach2Task(tasks ...Task) Task { if mppTask, _ := tasks[0].(*mppTask); mppTask != nil { // always push to mpp task. if expression.CanExprsPushDown(GetPushDownCtx(sel.SCtx()), sel.Conditions, kv.TiFlash) { - return attachPlan2Task(sel, mppTask.copy()) + return attachPlan2Task(sel, mppTask.Copy()) } } - t := tasks[0].convertToRootTask(sel.SCtx()) + t := tasks[0].ConvertToRootTask(sel.SCtx()) return attachPlan2Task(sel, t) } @@ -1927,8 +1866,8 @@ func computePartialCursorOffset(name string) int { return offset } -func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalStreamAgg) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() if cop, ok := t.(*copTask); ok { // We should not push agg down across // 1. double read, since the data of second read is ordered by handle instead of index. The `extraHandleCol` is added @@ -1937,7 +1876,7 @@ func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task { // 2. the case that there's filters should be calculated on TiDB side. // 3. the case of index merge if (cop.indexPlan != nil && cop.tablePlan != nil && cop.keepOrder) || len(cop.rootTaskConds) > 0 || len(cop.idxMergePartPlans) > 0 { - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) attachPlan2Task(p, t) } else { storeType := cop.getStoreType() @@ -1963,11 +1902,11 @@ func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task { cop.indexPlan = partialAgg } } - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) attachPlan2Task(finalAgg, t) } } else if mpp, ok := t.(*mppTask); ok { - t = mpp.convertToRootTask(p.SCtx()) + t = mpp.ConvertToRootTask(p.SCtx()) attachPlan2Task(p, t) } else { attachPlan2Task(p, t) @@ -1993,7 +1932,7 @@ func (p *PhysicalHashAgg) cpuCostDivisor(hasDistinct bool) (divisor, con float64 return math.Min(float64(finalCon), float64(partialCon)), float64(finalCon + partialCon) } -func (p *PhysicalHashAgg) attach2TaskForMpp1Phase(mpp *mppTask) task { +func (p *PhysicalHashAgg) attach2TaskForMpp1Phase(mpp *mppTask) Task { // 1-phase agg: when the partition columns can be satisfied, where the plan does not need to enforce Exchange // only push down the original agg proj := p.convertAvgForMPP() @@ -2303,8 +2242,8 @@ func (p *PhysicalHashAgg) adjust3StagePhaseAgg(partialAgg, finalAgg PhysicalPlan return finalHashAgg, middleHashAgg, partialHashAgg, proj4Partial, nil } -func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...Task) Task { + t := tasks[0].Copy() mpp, ok := t.(*mppTask) if !ok { return invalidTask @@ -2344,7 +2283,7 @@ func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { } prop := &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, MPPPartitionTp: property.HashType, MPPPartitionCols: partitionCols} newMpp := mpp.enforceExchangerImpl(prop) - if newMpp.invalid() { + if newMpp.Invalid() { return newMpp } attachPlan2Task(finalAgg, newMpp) @@ -2358,7 +2297,7 @@ func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { if partialAgg != nil { attachPlan2Task(partialAgg, mpp) } - t = mpp.convertToRootTask(p.SCtx()) + t = mpp.ConvertToRootTask(p.SCtx()) attachPlan2Task(finalAgg, t) return t case MppScalar: @@ -2430,8 +2369,8 @@ func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { } } -func (p *PhysicalHashAgg) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalHashAgg) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() if cop, ok := t.(*copTask); ok { if len(cop.rootTaskConds) == 0 && len(cop.idxMergePartPlans) == 0 { copTaskType := cop.getStoreType() @@ -2459,10 +2398,10 @@ func (p *PhysicalHashAgg) attach2Task(tasks ...task) task { // column may be independent of the column used for region distribution, so a closer // estimation of network cost for hash aggregation may multiply the number of // regions involved in the `partialAgg`, which is unknown however. - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) attachPlan2Task(finalAgg, t) } else { - t = cop.convertToRootTask(p.SCtx()) + t = cop.ConvertToRootTask(p.SCtx()) attachPlan2Task(p, t) } } else if _, ok := t.(*mppTask); ok { @@ -2473,15 +2412,15 @@ func (p *PhysicalHashAgg) attach2Task(tasks ...task) task { return t } -func (p *PhysicalWindow) attach2TaskForMPP(mpp *mppTask) task { +func (p *PhysicalWindow) attach2TaskForMPP(mpp *mppTask) Task { // FIXME: currently, tiflash's join has different schema with TiDB, // so we have to rebuild the schema of join and operators which may inherit schema from join. // for window, we take the sub-plan's schema, and the schema generated by windowDescs. columns := p.Schema().Clone().Columns[len(p.Schema().Columns)-len(p.WindowFuncDescs):] - p.schema = expression.MergeSchema(mpp.plan().Schema(), expression.NewSchema(columns...)) + p.schema = expression.MergeSchema(mpp.Plan().Schema(), expression.NewSchema(columns...)) failpoint.Inject("CheckMPPWindowSchemaLength", func() { - if len(p.Schema().Columns) != len(mpp.plan().Schema().Columns)+len(p.WindowFuncDescs) { + if len(p.Schema().Columns) != len(mpp.Plan().Schema().Columns)+len(p.WindowFuncDescs) { panic("mpp physical window has incorrect schema length") } }) @@ -2489,18 +2428,18 @@ func (p *PhysicalWindow) attach2TaskForMPP(mpp *mppTask) task { return attachPlan2Task(p, mpp) } -func (p *PhysicalWindow) attach2Task(tasks ...task) task { - if mpp, ok := tasks[0].copy().(*mppTask); ok && p.storeTp == kv.TiFlash { +func (p *PhysicalWindow) attach2Task(tasks ...Task) Task { + if mpp, ok := tasks[0].Copy().(*mppTask); ok && p.storeTp == kv.TiFlash { return p.attach2TaskForMPP(mpp) } - t := tasks[0].convertToRootTask(p.SCtx()) + t := tasks[0].ConvertToRootTask(p.SCtx()) return attachPlan2Task(p.self, t) } -func (p *PhysicalCTEStorage) attach2Task(tasks ...task) task { - t := tasks[0].copy() +func (p *PhysicalCTEStorage) attach2Task(tasks ...Task) Task { + t := tasks[0].Copy() if mpp, ok := t.(*mppTask); ok { - p.SetChildren(t.plan()) + p.SetChildren(t.Plan()) return &mppTask{ p: p, partTp: mpp.partTp, @@ -2508,14 +2447,14 @@ func (p *PhysicalCTEStorage) attach2Task(tasks ...task) task { tblColHists: mpp.tblColHists, } } - t.convertToRootTask(p.SCtx()) - p.SetChildren(t.plan()) - return &rootTask{ - p: p, - } + t.ConvertToRootTask(p.SCtx()) + p.SetChildren(t.Plan()) + ta := &RootTask{} + ta.SetPlan(p) + return ta } -func (p *PhysicalSequence) attach2Task(tasks ...task) task { +func (p *PhysicalSequence) attach2Task(tasks ...Task) Task { for _, t := range tasks { _, isMpp := t.(*mppTask) if !isMpp { @@ -2527,7 +2466,7 @@ func (p *PhysicalSequence) attach2Task(tasks ...task) task { children := make([]PhysicalPlan, 0, len(tasks)) for _, t := range tasks { - children = append(children, t.plan()) + children = append(children, t.Plan()) } p.SetChildren(children...) @@ -2566,25 +2505,25 @@ type mppTask struct { tblColHists *statistics.HistColl } -func (t *mppTask) count() float64 { +func (t *mppTask) Count() float64 { return t.p.StatsInfo().RowCount } -func (t *mppTask) copy() task { +func (t *mppTask) Copy() Task { nt := *t return &nt } -func (t *mppTask) plan() PhysicalPlan { +func (t *mppTask) Plan() PhysicalPlan { return t.p } -func (t *mppTask) invalid() bool { +func (t *mppTask) Invalid() bool { return t.p == nil } -func (t *mppTask) convertToRootTask(ctx PlanContext) *rootTask { - return t.copy().(*mppTask).convertToRootTaskImpl(ctx) +func (t *mppTask) ConvertToRootTask(ctx PlanContext) *RootTask { + return t.Copy().(*mppTask).ConvertToRootTaskImpl(ctx) } // MemoryUsage return the memory usage of mppTask @@ -2638,7 +2577,7 @@ func tryExpandVirtualColumn(p PhysicalPlan) { } } -func (t *mppTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { +func (t *mppTask) ConvertToRootTaskImpl(ctx PlanContext) *RootTask { // In disaggregated-tiflash mode, need to consider generated column. tryExpandVirtualColumn(t.p) sender := PhysicalExchangeSender{ @@ -2652,9 +2591,8 @@ func (t *mppTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { }.Init(ctx, t.p.QueryBlockOffset()) p.SetStats(t.p.StatsInfo()) collectPartitionInfosFromMPPPlan(p, t.p) - rt := &rootTask{ - p: p, - } + rt := &RootTask{} + rt.SetPlan(p) if len(t.rootTaskConds) > 0 { // Some Filter cannot be pushed down to TiFlash, need to add Selection in rootTask, @@ -2676,10 +2614,10 @@ func (t *mppTask) convertToRootTaskImpl(ctx PlanContext) *rootTask { logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) selectivity = SelectionFactor } - sel := PhysicalSelection{Conditions: t.rootTaskConds}.Init(ctx, rt.p.StatsInfo().Scale(selectivity), rt.p.QueryBlockOffset()) + sel := PhysicalSelection{Conditions: t.rootTaskConds}.Init(ctx, rt.GetPlan().StatsInfo().Scale(selectivity), rt.GetPlan().QueryBlockOffset()) sel.fromDataSource = true - sel.SetChildren(rt.p) - rt.p = sel + sel.SetChildren(rt.GetPlan()) + rt.SetPlan(sel) } return rt } @@ -2715,7 +2653,7 @@ func (t *mppTask) enforceExchanger(prop *property.PhysicalProperty) *mppTask { if !t.needEnforceExchanger(prop) { return t } - return t.copy().(*mppTask).enforceExchangerImpl(prop) + return t.Copy().(*mppTask).enforceExchangerImpl(prop) } func (t *mppTask) enforceExchangerImpl(prop *property.PhysicalProperty) *mppTask { diff --git a/pkg/planner/core/task_base.go b/pkg/planner/core/task_base.go new file mode 100644 index 0000000000000..fef9b6f38b171 --- /dev/null +++ b/pkg/planner/core/task_base.go @@ -0,0 +1,77 @@ +package core + +import ( + "github.com/pingcap/tidb/pkg/util/size" +) + +var ( + _ Task = &RootTask{} +) + +// Task is a new version of `PhysicalPlanInfo`. It stores cost information for a task. +// A task may be CopTask, RootTask, MPPTaskMeta or a ParallelTask. +type Task interface { + Count() float64 + Copy() Task + Plan() PhysicalPlan + Invalid() bool + ConvertToRootTask(ctx PlanContext) *RootTask + MemoryUsage() int64 +} + +// rootTask is the final sink node of a plan graph. It should be a single goroutine on tidb. +type RootTask struct { + p PhysicalPlan + isEmpty bool // isEmpty indicates if this task contains a dual table and returns empty data. + // TODO: The flag 'isEmpty' is only checked by Projection and UnionAll. We should support more cases in the future. +} + +func (t *RootTask) GetPlan() PhysicalPlan { + return t.p +} + +func (t *RootTask) SetPlan(p PhysicalPlan) { + t.p = p +} + +func (t *RootTask) IsEmpty() bool { + return t.isEmpty +} + +func (t *RootTask) SetEmpty(x bool) { + t.isEmpty = x +} + +func (t *RootTask) Copy() Task { + return &RootTask{ + p: t.p, + } +} + +func (t *RootTask) ConvertToRootTask(_ PlanContext) *RootTask { + return t.Copy().(*RootTask) +} + +func (t *RootTask) Invalid() bool { + return t.p == nil +} + +func (t *RootTask) Count() float64 { + return t.p.StatsInfo().RowCount +} + +func (t *RootTask) Plan() PhysicalPlan { + return t.p +} + +// MemoryUsage return the memory usage of rootTask +func (t *RootTask) MemoryUsage() (sum int64) { + if t == nil { + return + } + sum = size.SizeOfInterface + size.SizeOfBool + if t.p != nil { + sum += t.p.MemoryUsage() + } + return sum +} diff --git a/pkg/planner/core/util.go b/pkg/planner/core/util.go index 012d0b47ebf1f..ad3dd1a25b4cb 100644 --- a/pkg/planner/core/util.go +++ b/pkg/planner/core/util.go @@ -16,6 +16,7 @@ package core import ( "fmt" + "github.com/pingcap/tidb/pkg/planner/util/coreusage" "slices" "strings" @@ -25,7 +26,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/internal/base" - "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/tablecodec" @@ -137,7 +137,7 @@ func (s *logicalSchemaProducer) setSchemaAndNames(schema *expression.Schema, nam } // inlineProjection prunes unneeded columns inline a executor. -func (s *logicalSchemaProducer) inlineProjection(parentUsedCols []*expression.Column, opt *util.LogicalOptimizeOp) { +func (s *logicalSchemaProducer) inlineProjection(parentUsedCols []*expression.Column, opt *coreusage.LogicalOptimizeOp) { prunedColumns := make([]*expression.Column, 0) used := expression.GetUsedList(s.SCtx().GetExprCtx().GetEvalCtx(), parentUsedCols, s.Schema()) for i := len(used) - 1; i >= 0; i-- { diff --git a/pkg/planner/util/coreusage/costMisc.go b/pkg/planner/util/coreusage/costMisc.go new file mode 100644 index 0000000000000..cfdb599878f0a --- /dev/null +++ b/pkg/planner/util/coreusage/costMisc.go @@ -0,0 +1,128 @@ +package coreusage + +import ( + "fmt" + "strconv" +) + +const ( + // CostFlagRecalculate indicates the optimizer to ignore cached cost and recalculate it again. + CostFlagRecalculate uint64 = 1 << iota + + // CostFlagUseTrueCardinality indicates the optimizer to use true cardinality to calculate the cost. + CostFlagUseTrueCardinality + + // CostFlagTrace indicates whether to trace the cost calculation. + CostFlagTrace +) + +type CostVer2 struct { + cost float64 + trace *CostTrace +} + +func (c *CostVer2) GetCost() float64 { + return c.cost +} + +func (c *CostVer2) GetTrace() *CostTrace { + return c.trace +} + +type CostTrace struct { + factorCosts map[string]float64 // map[factorName]cost, used to calibrate the cost model + formula string // It used to trace the cost calculation. +} + +func (c *CostTrace) GetFormula() string { + return c.formula +} + +func (c *CostTrace) GetFactorCosts() map[string]float64 { + return c.factorCosts +} + +func NewZeroCostVer2(trace bool) (ret CostVer2) { + if trace { + ret.trace = &CostTrace{make(map[string]float64), ""} + } + return +} + +func hasCostFlag(costFlag, flag uint64) bool { + return (costFlag & flag) > 0 +} + +func TraceCost(option *PlanCostOption) bool { + if option != nil && hasCostFlag(option.CostFlag, CostFlagTrace) { + return true + } + return false +} + +func NewCostVer2(option *PlanCostOption, factor CostVer2Factor, cost float64, lazyFormula func() string) (ret CostVer2) { + ret.cost = cost + if TraceCost(option) { + ret.trace = &CostTrace{make(map[string]float64), ""} + ret.trace.factorCosts[factor.Name] = cost + ret.trace.formula = lazyFormula() + } + return ret +} + +type CostVer2Factor struct { + Name string + Value float64 +} + +func (f CostVer2Factor) String() string { + return fmt.Sprintf("%s(%v)", f.Name, f.Value) +} + +func SumCostVer2(costs ...CostVer2) (ret CostVer2) { + if len(costs) == 0 { + return + } + for _, c := range costs { + ret.cost += c.cost + if c.trace != nil { + if ret.trace == nil { // init + ret.trace = &CostTrace{make(map[string]float64), ""} + } + for factor, factorCost := range c.trace.factorCosts { + ret.trace.factorCosts[factor] += factorCost + } + if ret.trace.formula != "" { + ret.trace.formula += " + " + } + ret.trace.formula += "(" + c.trace.formula + ")" + } + } + return ret +} + +func DivCostVer2(cost CostVer2, denominator float64) (ret CostVer2) { + ret.cost = cost.cost / denominator + if cost.trace != nil { + ret.trace = &CostTrace{make(map[string]float64), ""} + for f, c := range cost.trace.factorCosts { + ret.trace.factorCosts[f] = c / denominator + } + ret.trace.formula = "(" + cost.trace.formula + ")/" + strconv.FormatFloat(denominator, 'f', 2, 64) + } + return ret +} + +func MulCostVer2(cost CostVer2, scale float64) (ret CostVer2) { + ret.cost = cost.cost * scale + if cost.trace != nil { + ret.trace = &CostTrace{make(map[string]float64), ""} + for f, c := range cost.trace.factorCosts { + ret.trace.factorCosts[f] = c * scale + } + ret.trace.formula = "(" + cost.trace.formula + ")*" + strconv.FormatFloat(scale, 'f', 2, 64) + } + return ret +} + +var ZeroCostVer2 = NewZeroCostVer2(false) diff --git a/pkg/planner/util/optTracer.go b/pkg/planner/util/coreusage/optTracer.go similarity index 58% rename from pkg/planner/util/optTracer.go rename to pkg/planner/util/coreusage/optTracer.go index 79fa275f93ff8..f1df95cc1df37 100644 --- a/pkg/planner/util/optTracer.go +++ b/pkg/planner/util/coreusage/optTracer.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package util +package coreusage import "github.com/pingcap/tidb/pkg/util/tracing" @@ -20,6 +20,7 @@ import "github.com/pingcap/tidb/pkg/util/tracing" // logicalOptRule inside the accommodated pkg `util` should only be depended on by logical `rule` pkg. // // rule related -----> core/util +//**************************************** below logical optimize trace related ****************************************** // LogicalOptimizeOp is logical optimizing option for tracing. type LogicalOptimizeOp struct { @@ -66,3 +67,64 @@ func (op *LogicalOptimizeOp) RecordFinalLogicalPlan(build func() *tracing.PlanTr } op.tracer.RecordFinalLogicalPlan(build()) } + +//**************************************** below physical optimize trace related ****************************************** + +// PhysicalOptimizeOp is logical optimizing option for tracing. +type PhysicalOptimizeOp struct { + // tracer is goring to track optimize steps during physical optimizing + tracer *tracing.PhysicalOptimizeTracer +} + +func DefaultPhysicalOptimizeOption() *PhysicalOptimizeOp { + return &PhysicalOptimizeOp{} +} + +func (op *PhysicalOptimizeOp) WithEnableOptimizeTracer(tracer *tracing.PhysicalOptimizeTracer) *PhysicalOptimizeOp { + op.tracer = tracer + return op +} + +func (op *PhysicalOptimizeOp) AppendCandidate(c *tracing.CandidatePlanTrace) { + op.tracer.AppendCandidate(c) +} + +func (op *PhysicalOptimizeOp) GetTracer() *tracing.PhysicalOptimizeTracer { + return op.tracer +} + +// NewDefaultPlanCostOption returns PlanCostOption +func NewDefaultPlanCostOption() *PlanCostOption { + return &PlanCostOption{} +} + +// PlanCostOption indicates option during GetPlanCost +type PlanCostOption struct { + CostFlag uint64 + tracer *PhysicalOptimizeOp +} + +func (op *PlanCostOption) GetTracer() *PhysicalOptimizeOp { + return op.tracer +} + +// WithCostFlag set cost flag +func (op *PlanCostOption) WithCostFlag(flag uint64) *PlanCostOption { + if op == nil { + return nil + } + op.CostFlag = flag + return op +} + +// WithOptimizeTracer set tracer +func (op *PlanCostOption) WithOptimizeTracer(v *PhysicalOptimizeOp) *PlanCostOption { + if op == nil { + return nil + } + op.tracer = v + if v != nil && v.tracer != nil { + op.CostFlag |= CostFlagTrace + } + return op +}