From e5004b77446497de5351f676cd93da97e6e130f1 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Tue, 6 Aug 2024 16:57:56 +0800 Subject: [PATCH 1/5] . Signed-off-by: AilinKid <314806019@qq.com> --- pkg/planner/core/BUILD.bazel | 2 +- pkg/planner/core/base/BUILD.bazel | 1 + pkg/planner/core/base/rule_base.go | 33 ++++++++ pkg/planner/core/logical_datasource.go | 3 +- pkg/planner/core/logical_index_scan.go | 3 +- pkg/planner/core/logical_join.go | 2 +- pkg/planner/core/logical_selection.go | 2 +- pkg/planner/core/optimizer.go | 82 ++++++++----------- pkg/planner/core/partition_prune.go | 2 +- pkg/planner/core/rule/BUILD.bazel | 17 ++++ pkg/planner/core/rule/rule_build_key_info.go | 54 ++++++++++++ pkg/planner/core/rule/rule_init.go | 12 +++ pkg/planner/core/rule/util/BUILD.bazel | 12 ++- .../util/build_key_info_misc.go} | 54 ++---------- .../core/rule_aggregation_elimination.go | 11 ++- .../core/rule_aggregation_push_down.go | 50 +++++------ .../core/rule_aggregation_skew_rewrite.go | 15 ++-- pkg/planner/core/rule_collect_plan_stats.go | 17 ++-- pkg/planner/core/rule_column_pruning.go | 9 +- pkg/planner/core/rule_constant_propagation.go | 12 +-- pkg/planner/core/rule_decorrelate.go | 31 +++---- .../core/rule_derive_topn_from_window.go | 10 ++- pkg/planner/core/rule_eliminate_projection.go | 13 +-- .../core/rule_generate_column_substitute.go | 11 ++- pkg/planner/core/rule_join_elimination.go | 19 +++-- pkg/planner/core/rule_join_reorder.go | 11 ++- pkg/planner/core/rule_max_min_eliminate.go | 22 ++--- pkg/planner/core/rule_outer_to_inner_join.go | 10 ++- pkg/planner/core/rule_partition_processor.go | 66 +++++++-------- pkg/planner/core/rule_predicate_push_down.go | 8 +- .../core/rule_predicate_simplification.go | 10 ++- pkg/planner/core/rule_push_down_sequence.go | 11 ++- .../core/rule_resolve_grouping_expand.go | 10 ++- pkg/planner/core/rule_result_reorder.go | 18 ++-- pkg/planner/core/rule_semi_join_rewrite.go | 12 +-- pkg/planner/core/rule_topn_push_down.go | 10 ++- 36 files changed, 398 insertions(+), 267 deletions(-) create mode 100644 pkg/planner/core/base/rule_base.go create mode 100644 pkg/planner/core/rule/BUILD.bazel create mode 100644 pkg/planner/core/rule/rule_build_key_info.go create mode 100644 pkg/planner/core/rule/rule_init.go rename pkg/planner/core/{rule_build_key_info.go => rule/util/build_key_info_misc.go} (50%) diff --git a/pkg/planner/core/BUILD.bazel b/pkg/planner/core/BUILD.bazel index e48babee0aef1..11fdb8a600bf7 100644 --- a/pkg/planner/core/BUILD.bazel +++ b/pkg/planner/core/BUILD.bazel @@ -69,7 +69,6 @@ go_library( "rule_aggregation_elimination.go", "rule_aggregation_push_down.go", "rule_aggregation_skew_rewrite.go", - "rule_build_key_info.go", "rule_collect_plan_stats.go", "rule_column_pruning.go", "rule_constant_propagation.go", @@ -141,6 +140,7 @@ go_library( "//pkg/planner/core/metrics", "//pkg/planner/core/operator/baseimpl", "//pkg/planner/core/operator/logicalop", + "//pkg/planner/core/rule", "//pkg/planner/core/rule/util", "//pkg/planner/funcdep", "//pkg/planner/property", diff --git a/pkg/planner/core/base/BUILD.bazel b/pkg/planner/core/base/BUILD.bazel index 27fbeb99b650b..0e62b3f62f896 100644 --- a/pkg/planner/core/base/BUILD.bazel +++ b/pkg/planner/core/base/BUILD.bazel @@ -6,6 +6,7 @@ go_library( "doc.go", "misc_base.go", "plan_base.go", + "rule_base.go", "task_base.go", ], importpath = "github.com/pingcap/tidb/pkg/planner/core/base", diff --git a/pkg/planner/core/base/rule_base.go b/pkg/planner/core/base/rule_base.go new file mode 100644 index 0000000000000..ccaf143df64a0 --- /dev/null +++ b/pkg/planner/core/base/rule_base.go @@ -0,0 +1,33 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package base + +import ( + "context" + + "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" +) + +// LogicalOptRule means a logical optimizing rule, which contains de-correlate, ppd, column pruning, etc. +type LogicalOptRule interface { + // Optimize return parameters: + // 1. base.LogicalPlan: The optimized base.LogicalPlan after rule is applied + // 2. bool: Used to judge whether the plan is changed or not by logical rule. + // If the plan is changed, it will return true. + // The default value is false. It means that no interaction rule will be triggered. + // 3. error: If there is error during the rule optimizer, it will be thrown + Optimize(context.Context, LogicalPlan, *optimizetrace.LogicalOptimizeOp) (LogicalPlan, bool, error) + Name() string +} diff --git a/pkg/planner/core/logical_datasource.go b/pkg/planner/core/logical_datasource.go index 1b04d75c087bd..cdaa2401401e4 100644 --- a/pkg/planner/core/logical_datasource.go +++ b/pkg/planner/core/logical_datasource.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/cost" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" + ruleutil "github.com/pingcap/tidb/pkg/planner/core/rule/util" fd "github.com/pingcap/tidb/pkg/planner/funcdep" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" @@ -257,7 +258,7 @@ func (ds *DataSource) BuildKeyInfo(selfSchema *expression.Schema, _ []*expressio } else if index.State != model.StatePublic { continue } - if uniqueKey, newKey := checkIndexCanBeKey(index, ds.Columns, selfSchema); newKey != nil { + if uniqueKey, newKey := ruleutil.CheckIndexCanBeKey(index, ds.Columns, selfSchema); newKey != nil { selfSchema.Keys = append(selfSchema.Keys, newKey) } else if uniqueKey != nil { selfSchema.UniqueKeys = append(selfSchema.UniqueKeys, uniqueKey) diff --git a/pkg/planner/core/logical_index_scan.go b/pkg/planner/core/logical_index_scan.go index 508c2d1c905eb..e400c6733221b 100644 --- a/pkg/planner/core/logical_index_scan.go +++ b/pkg/planner/core/logical_index_scan.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" + ruleutil "github.com/pingcap/tidb/pkg/planner/core/rule/util" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/plancodec" @@ -98,7 +99,7 @@ func (is *LogicalIndexScan) BuildKeyInfo(selfSchema *expression.Schema, _ []*exp if path.IsTablePath() { continue } - if uniqueKey, newKey := checkIndexCanBeKey(path.Index, is.Columns, selfSchema); newKey != nil { + if uniqueKey, newKey := ruleutil.CheckIndexCanBeKey(path.Index, is.Columns, selfSchema); newKey != nil { selfSchema.Keys = append(selfSchema.Keys, newKey) } else if uniqueKey != nil { selfSchema.UniqueKeys = append(selfSchema.UniqueKeys, uniqueKey) diff --git a/pkg/planner/core/logical_join.go b/pkg/planner/core/logical_join.go index b322f0190ac6d..99dd7b4b97aa5 100644 --- a/pkg/planner/core/logical_join.go +++ b/pkg/planner/core/logical_join.go @@ -288,7 +288,7 @@ func (p *LogicalJoin) PredicatePushDown(predicates []expression.Expression, opt utilfuncp.AddSelection(p, lCh, leftRet, 0, opt) utilfuncp.AddSelection(p, rCh, rightRet, 1, opt) p.updateEQCond() - buildKeyInfo(p) + ruleutil.BuildKeyInfoPortal(p) return ret, p.Self() } diff --git a/pkg/planner/core/logical_selection.go b/pkg/planner/core/logical_selection.go index 1541709f1a37d..461911c5ae6a9 100644 --- a/pkg/planner/core/logical_selection.go +++ b/pkg/planner/core/logical_selection.go @@ -152,7 +152,7 @@ func (p *LogicalSelection) BuildKeyInfo(selfSchema *expression.Schema, childSche } } } - p.SetMaxOneRow(checkMaxOneRowCond(eqCols, childSchema[0])) + p.SetMaxOneRow(ruleutil.CheckMaxOneRowCond(eqCols, childSchema[0])) } // PushDownTopN inherits BaseLogicalPlan.<5th> implementation. diff --git a/pkg/planner/core/optimizer.go b/pkg/planner/core/optimizer.go index bea428e35c6eb..ebdd4706dcd18 100644 --- a/pkg/planner/core/optimizer.go +++ b/pkg/planner/core/optimizer.go @@ -39,6 +39,7 @@ import ( "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" + "github.com/pingcap/tidb/pkg/planner/core/rule" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util/debugtrace" "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" @@ -96,32 +97,32 @@ const ( flagResolveExpand ) -var optRuleList = []logicalOptRule{ - &gcSubstituter{}, - &columnPruner{}, - &resultReorder{}, - &buildKeySolver{}, - &decorrelateSolver{}, - &semiJoinRewriter{}, - &aggregationEliminator{}, - &skewDistinctAggRewriter{}, - &projectionEliminator{}, - &maxMinEliminator{}, - &constantPropagationSolver{}, - &convertOuterToInnerJoin{}, - &ppdSolver{}, - &outerJoinEliminator{}, - &partitionProcessor{}, - &collectPredicateColumnsPoint{}, - &aggregationPushDownSolver{}, - &deriveTopNFromWindow{}, - &predicateSimplification{}, - &pushDownTopNOptimizer{}, - &syncWaitStatsLoadPoint{}, - &joinReOrderSolver{}, - &columnPruner{}, // column pruning again at last, note it will mess up the results of buildKeySolver - &pushDownSequenceSolver{}, - &resolveExpand{}, +var optRuleList = []base.LogicalOptRule{ + &GcSubstituter{}, + &ColumnPruner{}, + &ResultReorder{}, + &rule.BuildKeySolver{}, + &DecorrelateSolver{}, + &SemiJoinRewriter{}, + &AggregationEliminator{}, + &SkewDistinctAggRewriter{}, + &ProjectionEliminator{}, + &MaxMinEliminator{}, + &ConstantPropagationSolver{}, + &ConvertOuterToInnerJoin{}, + &PPDSolver{}, + &OuterJoinEliminator{}, + &PartitionProcessor{}, + &CollectPredicateColumnsPoint{}, + &AggregationPushDownSolver{}, + &DeriveTopNFromWindow{}, + &PredicateSimplification{}, + &PushDownTopNOptimizer{}, + &SyncWaitStatsLoadPoint{}, + &JoinReOrderSolver{}, + &ColumnPruner{}, // column pruning again at last, note it will mess up the results of buildKeySolver + &PushDownSequenceSolver{}, + &ResolveExpand{}, } // Interaction Rule List @@ -129,20 +130,7 @@ var optRuleList = []logicalOptRule{ 1. The related rule has been trigger and changed the plan 2. The interaction rule is enabled */ -var optInteractionRuleList = map[logicalOptRule]logicalOptRule{} - -// logicalOptRule means a logical optimizing rule, which contains decorrelate, ppd, column pruning, etc. -type logicalOptRule interface { - /* Return Parameters: - 1. base.LogicalPlan: The optimized base.LogicalPlan after rule is applied - 2. bool: Used to judge whether the plan is changed or not by logical rule. - If the plan is changed, it will return true. - The default value is false. It means that no interaction rule will be triggered. - 3. error: If there is error during the rule optimizer, it will be thrown - */ - optimize(context.Context, base.LogicalPlan, *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) - name() string -} +var optInteractionRuleList = map[base.LogicalOptRule]base.LogicalOptRule{} // BuildLogicalPlanForTest builds a logical plan for testing purpose from ast.Node. func BuildLogicalPlanForTest(ctx context.Context, sctx sessionctx.Context, node ast.Node, infoSchema infoschema.InfoSchema) (base.Plan, error) { @@ -999,7 +987,7 @@ func logicalOptimize(ctx context.Context, flag uint64, logic base.LogicalPlan) ( }() } var err error - var againRuleList []logicalOptRule + var againRuleList []base.LogicalOptRule for i, rule := range optRuleList { // The order of flags is same as the order of optRule in the list. // We use a bitmask to record which opt rules should be used. If the i-th bit is 1, it means we should @@ -1007,9 +995,9 @@ func logicalOptimize(ctx context.Context, flag uint64, logic base.LogicalPlan) ( if flag&(1< interface. +func (*BuildKeySolver) Name() string { + return "build_keys" +} + +// Optimize implements base.LogicalOptRule.<1st> interface. +func (*BuildKeySolver) Optimize(_ context.Context, p base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { + planChanged := false + buildKeyInfo(p) + return p, planChanged, nil +} + +// **************************** end implementation of LogicalOptRule interface **************************** + +// buildKeyInfo recursively calls base.LogicalPlan's BuildKeyInfo method. +func buildKeyInfo(lp base.LogicalPlan) { + for _, child := range lp.Children() { + buildKeyInfo(child) + } + childSchema := make([]*expression.Schema, len(lp.Children())) + for i, child := range lp.Children() { + childSchema[i] = child.Schema() + } + lp.BuildKeyInfo(lp.Schema(), childSchema) +} diff --git a/pkg/planner/core/rule/rule_init.go b/pkg/planner/core/rule/rule_init.go new file mode 100644 index 0000000000000..8a7d54907c261 --- /dev/null +++ b/pkg/planner/core/rule/rule_init.go @@ -0,0 +1,12 @@ +package rule + +import "github.com/pingcap/tidb/pkg/planner/core/rule/util" + +// rule/pkg should rely on operator/pkg to do type check and dig in and out, +// rule/util doesn't have to rely on rule/pkg, but it can be put with rule +// handling logic, and be referenced by operator/pkg. +// the core usage only care and call about the rule/pkg and operator/pkg. + +func init() { + util.BuildKeyInfoPortal = buildKeyInfo +} diff --git a/pkg/planner/core/rule/util/BUILD.bazel b/pkg/planner/core/rule/util/BUILD.bazel index ba785f55f0b7f..0aa9d23ab8723 100644 --- a/pkg/planner/core/rule/util/BUILD.bazel +++ b/pkg/planner/core/rule/util/BUILD.bazel @@ -2,8 +2,16 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "util", - srcs = ["misc.go"], + srcs = [ + "build_key_info_misc.go", + "misc.go", + ], importpath = "github.com/pingcap/tidb/pkg/planner/core/rule/util", visibility = ["//visibility:public"], - deps = ["//pkg/expression"], + deps = [ + "//pkg/expression", + "//pkg/parser/model", + "//pkg/parser/mysql", + "//pkg/planner/core/base", + ], ) diff --git a/pkg/planner/core/rule_build_key_info.go b/pkg/planner/core/rule/util/build_key_info_misc.go similarity index 50% rename from pkg/planner/core/rule_build_key_info.go rename to pkg/planner/core/rule/util/build_key_info_misc.go index f96271fe6ae2a..02150281b49a2 100644 --- a/pkg/planner/core/rule_build_key_info.go +++ b/pkg/planner/core/rule/util/build_key_info_misc.go @@ -1,52 +1,15 @@ -// Copyright 2017 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package core +package util import ( - "context" - "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" - "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" ) -type buildKeySolver struct{} - -func (*buildKeySolver) optimize(_ context.Context, p base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { - planChanged := false - buildKeyInfo(p) - return p, planChanged, nil -} - -// buildKeyInfo recursively calls base.LogicalPlan's BuildKeyInfo method. -func buildKeyInfo(lp base.LogicalPlan) { - for _, child := range lp.Children() { - buildKeyInfo(child) - } - childSchema := make([]*expression.Schema, len(lp.Children())) - for i, child := range lp.Children() { - childSchema[i] = child.Schema() - } - lp.BuildKeyInfo(lp.Schema(), childSchema) -} - -// If a condition is the form of (uniqueKey = constant) or (uniqueKey = Correlated column), it returns at most one row. -// This function will check it. -func checkMaxOneRowCond(eqColIDs map[int64]struct{}, childSchema *expression.Schema) bool { +// CheckMaxOneRowCond check if a condition is the form of (uniqueKey = constant) or (uniqueKey = +// Correlated column), it returns at most one row. +func CheckMaxOneRowCond(eqColIDs map[int64]struct{}, childSchema *expression.Schema) bool { if len(eqColIDs) == 0 { return false } @@ -70,8 +33,8 @@ func checkMaxOneRowCond(eqColIDs map[int64]struct{}, childSchema *expression.Sch return false } -// checkIndexCanBeKey checks whether an Index can be a Key in schema. -func checkIndexCanBeKey(idx *model.IndexInfo, columns []*model.ColumnInfo, schema *expression.Schema) (uniqueKey, newKey expression.KeyInfo) { +// CheckIndexCanBeKey checks whether an Index can be a Key in schema. +func CheckIndexCanBeKey(idx *model.IndexInfo, columns []*model.ColumnInfo, schema *expression.Schema) (uniqueKey, newKey expression.KeyInfo) { if !idx.Unique { return nil, nil } @@ -110,6 +73,5 @@ func checkIndexCanBeKey(idx *model.IndexInfo, columns []*model.ColumnInfo, schem return nil, nil } -func (*buildKeySolver) name() string { - return "build_keys" -} +// BuildKeyInfoPortal is a hook for other packages to build key info for logical plan. +var BuildKeyInfoPortal func(lp base.LogicalPlan) diff --git a/pkg/planner/core/rule_aggregation_elimination.go b/pkg/planner/core/rule_aggregation_elimination.go index 3aaca664aabc5..02e806535a9f5 100644 --- a/pkg/planner/core/rule_aggregation_elimination.go +++ b/pkg/planner/core/rule_aggregation_elimination.go @@ -29,7 +29,8 @@ import ( "github.com/pingcap/tidb/pkg/types" ) -type aggregationEliminator struct { +// AggregationEliminator is used to eliminate aggregation grouped by unique key. +type AggregationEliminator struct { aggregationEliminateChecker } @@ -256,11 +257,12 @@ func wrapCastFunction(ctx expression.BuildContext, arg expression.Expression, ta return expression.BuildCastFunction(ctx, arg, targetTp) } -func (a *aggregationEliminator) optimize(ctx context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements the base.LogicalOptRule.<0th> interface. +func (a *AggregationEliminator) Optimize(ctx context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false newChildren := make([]base.LogicalPlan, 0, len(p.Children())) for _, child := range p.Children() { - newChild, planChanged, err := a.optimize(ctx, child, opt) + newChild, planChanged, err := a.Optimize(ctx, child, opt) if err != nil { return nil, planChanged, err } @@ -278,6 +280,7 @@ func (a *aggregationEliminator) optimize(ctx context.Context, p base.LogicalPlan return p, planChanged, nil } -func (*aggregationEliminator) name() string { +// Name implements the base.LogicalOptRule.<1st> interface. +func (*AggregationEliminator) Name() string { return "aggregation_eliminate" } diff --git a/pkg/planner/core/rule_aggregation_push_down.go b/pkg/planner/core/rule_aggregation_push_down.go index 7b20f831163f6..bf55073000b33 100644 --- a/pkg/planner/core/rule_aggregation_push_down.go +++ b/pkg/planner/core/rule_aggregation_push_down.go @@ -26,12 +26,14 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/planner/core/operator/logicalop" + ruleutil "github.com/pingcap/tidb/pkg/planner/core/rule/util" "github.com/pingcap/tidb/pkg/planner/util" "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" "github.com/pingcap/tidb/pkg/types" ) -type aggregationPushDownSolver struct { +// AggregationPushDownSolver is a rule that pushes down aggregation functions to the child of LogicalJoin. +type AggregationPushDownSolver struct { aggregationEliminateChecker } @@ -42,7 +44,7 @@ type aggregationPushDownSolver struct { // It's easy to see that max, min, first row is decomposable, no matter whether it's distinct, but sum(distinct) and // count(distinct) is not. // Currently we don't support avg and concat. -func (*aggregationPushDownSolver) isDecomposableWithJoin(fun *aggregation.AggFuncDesc) bool { +func (*AggregationPushDownSolver) isDecomposableWithJoin(fun *aggregation.AggFuncDesc) bool { if len(fun.OrderByItems) > 0 { return false } @@ -59,7 +61,7 @@ func (*aggregationPushDownSolver) isDecomposableWithJoin(fun *aggregation.AggFun } } -func (*aggregationPushDownSolver) isDecomposableWithUnion(fun *aggregation.AggFuncDesc) bool { +func (*AggregationPushDownSolver) isDecomposableWithUnion(fun *aggregation.AggFuncDesc) bool { if len(fun.OrderByItems) > 0 { return false } @@ -77,7 +79,7 @@ func (*aggregationPushDownSolver) isDecomposableWithUnion(fun *aggregation.AggFu // getAggFuncChildIdx gets which children it belongs to. // 0 stands for left, 1 stands for right, -1 stands for both, 2 stands for neither (e.g. count(*), sum(1) ...) -func (*aggregationPushDownSolver) getAggFuncChildIdx(aggFunc *aggregation.AggFuncDesc, lSchema, rSchema *expression.Schema) int { +func (*AggregationPushDownSolver) getAggFuncChildIdx(aggFunc *aggregation.AggFuncDesc, lSchema, rSchema *expression.Schema) int { fromLeft, fromRight := false, false var cols []*expression.Column cols = expression.ExtractColumnsFromExpressions(cols, aggFunc.Args, nil) @@ -102,7 +104,7 @@ func (*aggregationPushDownSolver) getAggFuncChildIdx(aggFunc *aggregation.AggFun // collectAggFuncs collects all aggregate functions and splits them into two parts: "leftAggFuncs" and "rightAggFuncs" whose // arguments are all from left child or right child separately. If some aggregate functions have the arguments that have // columns both from left and right children, the whole aggregation is forbidden to push down. -func (a *aggregationPushDownSolver) collectAggFuncs(agg *LogicalAggregation, join *LogicalJoin) (valid bool, leftAggFuncs, rightAggFuncs []*aggregation.AggFuncDesc) { +func (a *AggregationPushDownSolver) collectAggFuncs(agg *LogicalAggregation, join *LogicalJoin) (valid bool, leftAggFuncs, rightAggFuncs []*aggregation.AggFuncDesc) { valid = true leftChild := join.Children()[0] rightChild := join.Children()[1] @@ -145,7 +147,7 @@ func (a *aggregationPushDownSolver) collectAggFuncs(agg *LogicalAggregation, joi // query should be "SELECT SUM(B.agg) FROM A, (SELECT SUM(id) as agg, c1, c2, c3 FROM B GROUP BY id, c1, c2, c3) as B // WHERE A.c1 = B.c1 AND A.c2 != B.c2 GROUP BY B.c3". As you see, all the columns appearing in join-conditions should be // treated as group by columns in join subquery. -func (a *aggregationPushDownSolver) collectGbyCols(agg *LogicalAggregation, join *LogicalJoin) (leftGbyCols, rightGbyCols []*expression.Column) { +func (a *AggregationPushDownSolver) collectGbyCols(agg *LogicalAggregation, join *LogicalJoin) (leftGbyCols, rightGbyCols []*expression.Column) { leftChild := join.Children()[0] ctx := agg.SCtx() for _, gbyExpr := range agg.GroupByItems { @@ -184,7 +186,7 @@ func (a *aggregationPushDownSolver) collectGbyCols(agg *LogicalAggregation, join return } -func (a *aggregationPushDownSolver) splitAggFuncsAndGbyCols(agg *LogicalAggregation, join *LogicalJoin) (valid bool, +func (a *AggregationPushDownSolver) splitAggFuncsAndGbyCols(agg *LogicalAggregation, join *LogicalJoin) (valid bool, leftAggFuncs, rightAggFuncs []*aggregation.AggFuncDesc, leftGbyCols, rightGbyCols []*expression.Column) { valid, leftAggFuncs, rightAggFuncs = a.collectAggFuncs(agg, join) @@ -196,7 +198,7 @@ func (a *aggregationPushDownSolver) splitAggFuncsAndGbyCols(agg *LogicalAggregat } // addGbyCol adds a column to gbyCols. If a group by column has existed, it will not be added repeatedly. -func (*aggregationPushDownSolver) addGbyCol(ctx base.PlanContext, gbyCols []*expression.Column, cols ...*expression.Column) []*expression.Column { +func (*AggregationPushDownSolver) addGbyCol(ctx base.PlanContext, gbyCols []*expression.Column, cols ...*expression.Column) []*expression.Column { for _, c := range cols { duplicate := false for _, gbyCol := range gbyCols { @@ -213,13 +215,13 @@ func (*aggregationPushDownSolver) addGbyCol(ctx base.PlanContext, gbyCols []*exp } // checkValidJoin checks if this join should be pushed across. -func (*aggregationPushDownSolver) checkValidJoin(join *LogicalJoin) bool { +func (*AggregationPushDownSolver) checkValidJoin(join *LogicalJoin) bool { return join.JoinType == InnerJoin || join.JoinType == LeftOuterJoin || join.JoinType == RightOuterJoin } // decompose splits an aggregate function to two parts: a final mode function and a partial mode function. Currently // there are no differences between partial mode and complete mode, so we can confuse them. -func (*aggregationPushDownSolver) decompose(ctx base.PlanContext, aggFunc *aggregation.AggFuncDesc, +func (*AggregationPushDownSolver) decompose(ctx base.PlanContext, aggFunc *aggregation.AggFuncDesc, schema *expression.Schema, nullGenerating bool) ([]*aggregation.AggFuncDesc, *expression.Schema) { // Result is a slice because avg should be decomposed to sum and count. Currently we don't process this case. result := []*aggregation.AggFuncDesc{aggFunc.Clone()} @@ -251,7 +253,7 @@ func (*aggregationPushDownSolver) decompose(ctx base.PlanContext, aggFunc *aggre // tryToPushDownAgg tries to push down an aggregate function into a join path. If all aggFuncs are first row, we won't // process it temporarily. If not, We will add additional group by columns and first row functions. We make a new aggregation operator. // If the pushed aggregation is grouped by unique key, it's no need to push it down. -func (a *aggregationPushDownSolver) tryToPushDownAgg(oldAgg *LogicalAggregation, aggFuncs []*aggregation.AggFuncDesc, gbyCols []*expression.Column, +func (a *AggregationPushDownSolver) tryToPushDownAgg(oldAgg *LogicalAggregation, aggFuncs []*aggregation.AggFuncDesc, gbyCols []*expression.Column, join *LogicalJoin, childIdx int, blockOffset int, opt *optimizetrace.LogicalOptimizeOp) (_ base.LogicalPlan, err error) { child := join.Children()[childIdx] if aggregation.IsAllFirstRow(aggFuncs) { @@ -292,7 +294,7 @@ func (a *aggregationPushDownSolver) tryToPushDownAgg(oldAgg *LogicalAggregation, return agg, nil } -func (*aggregationPushDownSolver) getDefaultValues(agg *LogicalAggregation) ([]types.Datum, bool) { +func (*AggregationPushDownSolver) getDefaultValues(agg *LogicalAggregation) ([]types.Datum, bool) { defaultValues := make([]types.Datum, 0, agg.Schema().Len()) for _, aggFunc := range agg.AggFuncs { value, existsDefaultValue := aggFunc.EvalNullValueInOuterJoin(agg.SCtx().GetExprCtx(), agg.Children()[0].Schema()) @@ -304,7 +306,7 @@ func (*aggregationPushDownSolver) getDefaultValues(agg *LogicalAggregation) ([]t return defaultValues, true } -func (*aggregationPushDownSolver) checkAnyCountAndSum(aggFuncs []*aggregation.AggFuncDesc) bool { +func (*AggregationPushDownSolver) checkAnyCountAndSum(aggFuncs []*aggregation.AggFuncDesc) bool { for _, fun := range aggFuncs { if fun.Name == ast.AggFuncSum || fun.Name == ast.AggFuncCount { return true @@ -315,7 +317,7 @@ func (*aggregationPushDownSolver) checkAnyCountAndSum(aggFuncs []*aggregation.Ag // checkAllArgsColumn checks whether the args in function are dedicated columns // eg: count(*) or sum(a+1) will return false while count(a) or sum(a) will return true -func (*aggregationPushDownSolver) checkAllArgsColumn(fun *aggregation.AggFuncDesc) bool { +func (*AggregationPushDownSolver) checkAllArgsColumn(fun *aggregation.AggFuncDesc) bool { for _, arg := range fun.Args { _, ok := arg.(*expression.Column) if !ok { @@ -328,7 +330,7 @@ func (*aggregationPushDownSolver) checkAllArgsColumn(fun *aggregation.AggFuncDes // TODO: // 1. https://github.com/pingcap/tidb/issues/16355, push avg & distinct functions across join // 2. remove this method and use splitPartialAgg instead for clean code. -func (a *aggregationPushDownSolver) makeNewAgg(ctx base.PlanContext, aggFuncs []*aggregation.AggFuncDesc, +func (a *AggregationPushDownSolver) makeNewAgg(ctx base.PlanContext, aggFuncs []*aggregation.AggFuncDesc, gbyCols []*expression.Column, preferAggType uint, preferAggToCop bool, blockOffset int, nullGenerating bool) (*LogicalAggregation, error) { agg := LogicalAggregation{ GroupByItems: expression.Column2Exprs(gbyCols), @@ -360,7 +362,7 @@ func (a *aggregationPushDownSolver) makeNewAgg(ctx base.PlanContext, aggFuncs [] return agg, nil } -func (*aggregationPushDownSolver) splitPartialAgg(agg *LogicalAggregation) (pushedAgg *LogicalAggregation) { +func (*AggregationPushDownSolver) splitPartialAgg(agg *LogicalAggregation) (pushedAgg *LogicalAggregation) { partial, final, _ := BuildFinalModeAggregation(agg.SCtx(), &AggInfo{ AggFuncs: agg.AggFuncs, GroupByItems: agg.GroupByItems, @@ -386,7 +388,7 @@ func (*aggregationPushDownSolver) splitPartialAgg(agg *LogicalAggregation) (push // pushAggCrossUnion will try to push the agg down to the union. If the new aggregation's group-by columns doesn't contain unique key. // We will return the new aggregation. Otherwise we will transform the aggregation to projection. -func (*aggregationPushDownSolver) pushAggCrossUnion(agg *LogicalAggregation, unionSchema *expression.Schema, unionChild base.LogicalPlan) (base.LogicalPlan, error) { +func (*AggregationPushDownSolver) pushAggCrossUnion(agg *LogicalAggregation, unionSchema *expression.Schema, unionChild base.LogicalPlan) (base.LogicalPlan, error) { ctx := agg.SCtx() newAgg := LogicalAggregation{ AggFuncs: make([]*aggregation.AggFuncDesc, 0, len(agg.AggFuncs)), @@ -437,13 +439,14 @@ func (*aggregationPushDownSolver) pushAggCrossUnion(agg *LogicalAggregation, uni return newAgg, nil } -func (a *aggregationPushDownSolver) optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements the base.LogicalOptRule.<0th> interface. +func (a *AggregationPushDownSolver) Optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false newLogicalPlan, err := a.aggPushDown(p, opt) return newLogicalPlan, planChanged, err } -func (a *aggregationPushDownSolver) tryAggPushDownForUnion(union *LogicalUnionAll, agg *LogicalAggregation, opt *optimizetrace.LogicalOptimizeOp) error { +func (a *AggregationPushDownSolver) tryAggPushDownForUnion(union *LogicalUnionAll, agg *LogicalAggregation, opt *optimizetrace.LogicalOptimizeOp) error { for _, aggFunc := range agg.AggFuncs { if !a.isDecomposableWithUnion(aggFunc) { return nil @@ -478,7 +481,7 @@ func (a *aggregationPushDownSolver) tryAggPushDownForUnion(union *LogicalUnionAl } // aggPushDown tries to push down aggregate functions to join paths. -func (a *aggregationPushDownSolver) aggPushDown(p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (_ base.LogicalPlan, err error) { +func (a *AggregationPushDownSolver) aggPushDown(p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (_ base.LogicalPlan, err error) { if agg, ok := p.(*LogicalAggregation); ok { proj := a.tryToEliminateAggregation(agg, opt) if proj != nil { @@ -518,7 +521,7 @@ func (a *aggregationPushDownSolver) aggPushDown(p base.LogicalPlan, opt *optimiz } else if join.JoinType == RightOuterJoin { util.ResetNotNullFlag(join.Schema(), 0, lChild.Schema().Len()) } - buildKeyInfo(join) + ruleutil.BuildKeyInfoPortal(join) // count(a) -> ifnull(col#x, 0, 1) in rewriteExpr of agg function, since col#x is already the final // pushed-down aggregation's result, we don't need to take every row as count 1 when they don't have // not-null flag in a.tryToEliminateAggregation(oldAgg, opt), which is not suitable here. @@ -550,7 +553,7 @@ func (a *aggregationPushDownSolver) aggPushDown(p base.LogicalPlan, opt *optimiz } if changed { join.SetChildren(lChild, rChild) - buildKeyInfo(join) + ruleutil.BuildKeyInfoPortal(join) } } } else if proj, ok1 := child.(*logicalop.LogicalProjection); ok1 { @@ -683,7 +686,8 @@ func (a *aggregationPushDownSolver) aggPushDown(p base.LogicalPlan, opt *optimiz return p, nil } -func (*aggregationPushDownSolver) name() string { +// Name implements the base.LogicalOptRule.<1st> interface. +func (*AggregationPushDownSolver) Name() string { return "aggregation_push_down" } diff --git a/pkg/planner/core/rule_aggregation_skew_rewrite.go b/pkg/planner/core/rule_aggregation_skew_rewrite.go index fca63bc93bcc2..1f11b68996fc9 100644 --- a/pkg/planner/core/rule_aggregation_skew_rewrite.go +++ b/pkg/planner/core/rule_aggregation_skew_rewrite.go @@ -27,7 +27,8 @@ import ( "github.com/pingcap/tidb/pkg/util/intset" ) -type skewDistinctAggRewriter struct { +// SkewDistinctAggRewriter rewrites group distinct aggregate into 2 level aggregates. +type SkewDistinctAggRewriter struct { } // skewDistinctAggRewriter will rewrite group distinct aggregate into 2 level aggregates, e.g.: @@ -49,7 +50,7 @@ type skewDistinctAggRewriter struct { // - The aggregate has 1 and only 1 distinct aggregate function (limited to count, avg, sum) // // This rule is disabled by default. Use tidb_opt_skew_distinct_agg to enable the rule. -func (a *skewDistinctAggRewriter) rewriteSkewDistinctAgg(agg *LogicalAggregation, opt *optimizetrace.LogicalOptimizeOp) base.LogicalPlan { +func (a *SkewDistinctAggRewriter) rewriteSkewDistinctAgg(agg *LogicalAggregation, opt *optimizetrace.LogicalOptimizeOp) base.LogicalPlan { // only group aggregate is applicable if len(agg.GroupByItems) == 0 { return nil @@ -237,7 +238,7 @@ func (a *skewDistinctAggRewriter) rewriteSkewDistinctAgg(agg *LogicalAggregation return proj } -func (*skewDistinctAggRewriter) isQualifiedAgg(aggFunc *aggregation.AggFuncDesc) bool { +func (*SkewDistinctAggRewriter) isQualifiedAgg(aggFunc *aggregation.AggFuncDesc) bool { if aggFunc.Mode != aggregation.CompleteMode { return false } @@ -276,11 +277,12 @@ func appendSkewDistinctAggRewriteTraceStep(agg *LogicalAggregation, result base. opt.AppendStepToCurrent(agg.ID(), agg.TP(), reason, action) } -func (a *skewDistinctAggRewriter) optimize(ctx context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements base.LogicalOptRule.<0th> interface. +func (a *SkewDistinctAggRewriter) Optimize(ctx context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false newChildren := make([]base.LogicalPlan, 0, len(p.Children())) for _, child := range p.Children() { - newChild, planChanged, err := a.optimize(ctx, child, opt) + newChild, planChanged, err := a.Optimize(ctx, child, opt) if err != nil { return nil, planChanged, err } @@ -297,6 +299,7 @@ func (a *skewDistinctAggRewriter) optimize(ctx context.Context, p base.LogicalPl return p, planChanged, nil } -func (*skewDistinctAggRewriter) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*SkewDistinctAggRewriter) Name() string { return "skew_distinct_agg_rewrite" } diff --git a/pkg/planner/core/rule_collect_plan_stats.go b/pkg/planner/core/rule_collect_plan_stats.go index 2983390083932..46674b36976b0 100644 --- a/pkg/planner/core/rule_collect_plan_stats.go +++ b/pkg/planner/core/rule_collect_plan_stats.go @@ -31,9 +31,11 @@ import ( "go.uber.org/zap" ) -type collectPredicateColumnsPoint struct{} +// CollectPredicateColumnsPoint collects the columns that are used in the predicates. +type CollectPredicateColumnsPoint struct{} -func (collectPredicateColumnsPoint) optimize(_ context.Context, plan base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements LogicalOptRule.<0th> interface. +func (CollectPredicateColumnsPoint) Optimize(_ context.Context, plan base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false if plan.SCtx().GetSessionVars().InRestrictedSQL { return plan, planChanged, nil @@ -72,13 +74,15 @@ func (collectPredicateColumnsPoint) optimize(_ context.Context, plan base.Logica return plan, planChanged, nil } -func (collectPredicateColumnsPoint) name() string { +// Name implements the base.LogicalOptRule.<1st> interface. +func (CollectPredicateColumnsPoint) Name() string { return "collect_predicate_columns_point" } -type syncWaitStatsLoadPoint struct{} +type SyncWaitStatsLoadPoint struct{} -func (syncWaitStatsLoadPoint) optimize(_ context.Context, plan base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements the base.LogicalOptRule.<0th> interface. +func (SyncWaitStatsLoadPoint) Optimize(_ context.Context, plan base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false if plan.SCtx().GetSessionVars().InRestrictedSQL { return plan, planChanged, nil @@ -90,7 +94,8 @@ func (syncWaitStatsLoadPoint) optimize(_ context.Context, plan base.LogicalPlan, return plan, planChanged, err } -func (syncWaitStatsLoadPoint) name() string { +// Name implements the base.LogicalOptRule.<1st> interface. +func (SyncWaitStatsLoadPoint) Name() string { return "sync_wait_stats_load_point" } diff --git a/pkg/planner/core/rule_column_pruning.go b/pkg/planner/core/rule_column_pruning.go index 58a3507427d2b..43d23b9eaacae 100644 --- a/pkg/planner/core/rule_column_pruning.go +++ b/pkg/planner/core/rule_column_pruning.go @@ -30,10 +30,12 @@ import ( "github.com/pingcap/tidb/pkg/util/intest" ) -type columnPruner struct { +// ColumnPruner is used to prune unnecessary columns. +type ColumnPruner struct { } -func (*columnPruner) optimize(_ context.Context, lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements base.LogicalOptRule.<0th> interface. +func (*ColumnPruner) Optimize(_ context.Context, lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false lp, err := lp.PruneColumns(slices.Clone(lp.Schema().Columns), opt) if err != nil { @@ -93,7 +95,8 @@ func pruneByItems(p base.LogicalPlan, old []*util.ByItems, opt *optimizetrace.Lo return } -func (*columnPruner) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*ColumnPruner) Name() string { return "column_prune" } diff --git a/pkg/planner/core/rule_constant_propagation.go b/pkg/planner/core/rule_constant_propagation.go index 437c085d801e9..c9a883dfe5a8f 100644 --- a/pkg/planner/core/rule_constant_propagation.go +++ b/pkg/planner/core/rule_constant_propagation.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" ) -// constantPropagationSolver can support constant propagated cross-query block. +// ConstantPropagationSolver can support constant propagated cross-query block. // This is a logical optimize rule. // It mainly used for the sub query in FromList and propagated the constant predicate // from sub query to outer query. @@ -41,9 +41,10 @@ import ( // Steps 1 and 2 will be called recursively // 3. (ppdSolver in rule_predicate_push_down.go) Push down constant predicate // and propagate constant predicate into other side. 't.id>1' -type constantPropagationSolver struct { +type ConstantPropagationSolver struct { } +// Optimize implements base.LogicalOptRule.<0th> interface. // **Preorder traversal** of logic tree // Step1: constant propagation current plan node // Step2: optimize all of child @@ -52,7 +53,7 @@ type constantPropagationSolver struct { // which is mainly implemented in the interface "constantPropagation" of LogicalPlan. // Currently only the Logical Join implements this function. (Used for the subquery in FROM List) // In the future, the Logical Apply will implements this function. (Used for the subquery in WHERE or SELECT list) -func (cp *constantPropagationSolver) optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +func (cp *ConstantPropagationSolver) Optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false // constant propagation root plan newRoot := p.ConstantPropagation(nil, 0, opt) @@ -69,7 +70,7 @@ func (cp *constantPropagationSolver) optimize(_ context.Context, p base.LogicalP } // execOptimize optimize constant propagation exclude root plan node -func (cp *constantPropagationSolver) execOptimize(currentPlan base.LogicalPlan, parentPlan base.LogicalPlan, currentChildIdx int, opt *optimizetrace.LogicalOptimizeOp) { +func (cp *ConstantPropagationSolver) execOptimize(currentPlan base.LogicalPlan, parentPlan base.LogicalPlan, currentChildIdx int, opt *optimizetrace.LogicalOptimizeOp) { if parentPlan == nil { // Attention: The function 'execOptimize' could not handle the root plan, so the parent plan could not be nil. return @@ -82,7 +83,8 @@ func (cp *constantPropagationSolver) execOptimize(currentPlan base.LogicalPlan, } } -func (*constantPropagationSolver) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*ConstantPropagationSolver) Name() string { return "constant_propagation" } diff --git a/pkg/planner/core/rule_decorrelate.go b/pkg/planner/core/rule_decorrelate.go index c9e6a2724fe3f..634f435b3e006 100644 --- a/pkg/planner/core/rule_decorrelate.go +++ b/pkg/planner/core/rule_decorrelate.go @@ -105,10 +105,10 @@ func extractOuterApplyCorrelatedColsHelper(p base.PhysicalPlan, outerSchemas []* return newCorCols } -// decorrelateSolver tries to convert apply plan to join plan. -type decorrelateSolver struct{} +// DecorrelateSolver tries to convert apply plan to join plan. +type DecorrelateSolver struct{} -func (*decorrelateSolver) aggDefaultValueMap(agg *LogicalAggregation) map[int]*expression.Constant { +func (*DecorrelateSolver) aggDefaultValueMap(agg *LogicalAggregation) map[int]*expression.Constant { defaultValueMap := make(map[int]*expression.Constant, len(agg.AggFuncs)) for i, f := range agg.AggFuncs { switch f.Name { @@ -121,8 +121,8 @@ func (*decorrelateSolver) aggDefaultValueMap(agg *LogicalAggregation) map[int]*e return defaultValueMap } -// optimize implements logicalOptRule interface. -func (s *decorrelateSolver) optimize(ctx context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements base.LogicalOptRule.<0th> interface. +func (s *DecorrelateSolver) Optimize(ctx context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false if apply, ok := p.(*LogicalApply); ok { outerPlan := apply.Children()[0] @@ -148,13 +148,13 @@ func (s *decorrelateSolver) optimize(ctx context.Context, p base.LogicalPlan, op innerPlan = sel.Children()[0] apply.SetChildren(outerPlan, innerPlan) appendRemoveSelectionTraceStep(apply, sel, opt) - return s.optimize(ctx, p, opt) + return s.Optimize(ctx, p, opt) } else if m, ok := innerPlan.(*logicalop.LogicalMaxOneRow); ok { if m.Children()[0].MaxOneRow() { innerPlan = m.Children()[0] apply.SetChildren(outerPlan, innerPlan) appendRemoveMaxOneRowTraceStep(m, opt) - return s.optimize(ctx, p, opt) + return s.Optimize(ctx, p, opt) } } else if proj, ok := innerPlan.(*logicalop.LogicalProjection); ok { // After the column pruning, some expressions in the projection operator may be pruned. @@ -202,7 +202,7 @@ func (s *decorrelateSolver) optimize(ctx context.Context, p base.LogicalPlan, op proj.SetSchema(apply.Schema()) proj.Exprs = append(expression.Column2Exprs(outerPlan.Schema().Clone().Columns), proj.Exprs...) apply.SetSchema(expression.MergeSchema(outerPlan.Schema(), innerPlan.Schema())) - np, planChanged, err := s.optimize(ctx, p, opt) + np, planChanged, err := s.Optimize(ctx, p, opt) if err != nil { return nil, planChanged, err } @@ -211,7 +211,7 @@ func (s *decorrelateSolver) optimize(ctx context.Context, p base.LogicalPlan, op return proj, planChanged, nil } appendRemoveProjTraceStep(apply, proj, opt) - return s.optimize(ctx, p, opt) + return s.Optimize(ctx, p, opt) } else if li, ok := innerPlan.(*logicalop.LogicalLimit); ok { // The presence of 'limit' in 'exists' will make the plan not optimal, so we need to decorrelate the 'limit' of subquery in optimization. // e.g. select count(*) from test t1 where exists (select value from test t2 where t1.id = t2.id limit 1); When using 'limit' in subquery, the plan will not optimal. @@ -228,7 +228,7 @@ func (s *decorrelateSolver) optimize(ctx context.Context, p base.LogicalPlan, op innerPlan = li.Children()[0] apply.SetChildren(outerPlan, innerPlan) appendRemoveLimitTraceStep(li, opt) - return s.optimize(ctx, p, opt) + return s.Optimize(ctx, p, opt) } } else if agg, ok := innerPlan.(*LogicalAggregation); ok { if apply.CanPullUpAgg() && agg.canPullUp() { @@ -278,7 +278,7 @@ func (s *decorrelateSolver) optimize(ctx context.Context, p base.LogicalPlan, op newAggFuncs = append(newAggFuncs, desc) } agg.AggFuncs = newAggFuncs - np, planChanged, err := s.optimize(ctx, p, opt) + np, planChanged, err := s.Optimize(ctx, p, opt) if err != nil { return nil, planChanged, err } @@ -356,7 +356,7 @@ func (s *decorrelateSolver) optimize(ctx context.Context, p base.LogicalPlan, op appendAddProjTraceStep(apply, proj, opt) } appendModifyAggTraceStep(outerPlan, apply, agg, sel, appendedGroupByCols, appendedAggFuncs, eqCondWithCorCol, opt) - return s.optimize(ctx, p, opt) + return s.Optimize(ctx, p, opt) } sel.Conditions = originalExpr apply.CorCols = coreusage.ExtractCorColumnsBySchema4LogicalPlan(apply.Children()[1], apply.Children()[0].Schema()) @@ -368,7 +368,7 @@ func (s *decorrelateSolver) optimize(ctx context.Context, p base.LogicalPlan, op innerPlan = sort.Children()[0] apply.SetChildren(outerPlan, innerPlan) appendRemoveSortTraceStep(sort, opt) - return s.optimize(ctx, p, opt) + return s.Optimize(ctx, p, opt) } } NoOptimize: @@ -378,7 +378,7 @@ NoOptimize: } newChildren := make([]base.LogicalPlan, 0, len(p.Children())) for _, child := range p.Children() { - np, planChanged, err := s.optimize(ctx, child, opt) + np, planChanged, err := s.Optimize(ctx, child, opt) if err != nil { return nil, planChanged, err } @@ -388,7 +388,8 @@ NoOptimize: return p, planChanged, nil } -func (*decorrelateSolver) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*DecorrelateSolver) Name() string { return "decorrelate" } diff --git a/pkg/planner/core/rule_derive_topn_from_window.go b/pkg/planner/core/rule_derive_topn_from_window.go index 5b0ce8e4de7a7..061d54016568b 100644 --- a/pkg/planner/core/rule_derive_topn_from_window.go +++ b/pkg/planner/core/rule_derive_topn_from_window.go @@ -25,8 +25,8 @@ import ( "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" ) -// deriveTopNFromWindow pushes down the topN or limit. In the future we will remove the limit from `requiredProperty` in CBO phase. -type deriveTopNFromWindow struct { +// DeriveTopNFromWindow pushes down the topN or limit. In the future we will remove the limit from `requiredProperty` in CBO phase. +type DeriveTopNFromWindow struct { } func appendDerivedTopNTrace(topN base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) { @@ -117,11 +117,13 @@ func windowIsTopN(p *LogicalSelection) (bool, uint64) { return false, 0 } -func (*deriveTopNFromWindow) optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements base.LogicalOptRule.<0th> interface. +func (*DeriveTopNFromWindow) Optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false return p.DeriveTopN(opt), planChanged, nil } -func (*deriveTopNFromWindow) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*DeriveTopNFromWindow) Name() string { return "derive_topn_from_window" } diff --git a/pkg/planner/core/rule_eliminate_projection.go b/pkg/planner/core/rule_eliminate_projection.go index f05e3bea3818f..5020525c48723 100644 --- a/pkg/planner/core/rule_eliminate_projection.go +++ b/pkg/planner/core/rule_eliminate_projection.go @@ -144,21 +144,21 @@ func eliminatePhysicalProjection(p base.PhysicalPlan) base.PhysicalPlan { // The projection eliminate in logical optimize will optimize the projection under the projection, window, agg // The projection eliminate in post optimize will optimize other projection -// For update stmt +// ProjectionEliminator is for update stmt // The projection eliminate in logical optimize has been forbidden. // The projection eliminate in post optimize will optimize the projection under the projection, window, agg (the condition is same as logical optimize) -type projectionEliminator struct { +type ProjectionEliminator struct { } -// optimize implements the logicalOptRule interface. -func (pe *projectionEliminator) optimize(_ context.Context, lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements the logicalOptRule interface. +func (pe *ProjectionEliminator) Optimize(_ context.Context, lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false root := pe.eliminate(lp, make(map[string]*expression.Column), false, opt) return root, planChanged, nil } // eliminate eliminates the redundant projection in a logical plan. -func (pe *projectionEliminator) eliminate(p base.LogicalPlan, replace map[string]*expression.Column, canEliminate bool, opt *optimizetrace.LogicalOptimizeOp) base.LogicalPlan { +func (pe *ProjectionEliminator) eliminate(p base.LogicalPlan, replace map[string]*expression.Column, canEliminate bool, opt *optimizetrace.LogicalOptimizeOp) base.LogicalPlan { // LogicalCTE's logical optimization is independent. if _, ok := p.(*LogicalCTE); ok { return p @@ -233,7 +233,8 @@ func ReplaceColumnOfExpr(expr expression.Expression, proj *logicalop.LogicalProj return expr } -func (*projectionEliminator) name() string { +// Name implements the logicalOptRule.<1st> interface. +func (*ProjectionEliminator) Name() string { return "projection_eliminate" } diff --git a/pkg/planner/core/rule_generate_column_substitute.go b/pkg/planner/core/rule_generate_column_substitute.go index 61ddd472612e0..d4b0115225687 100644 --- a/pkg/planner/core/rule_generate_column_substitute.go +++ b/pkg/planner/core/rule_generate_column_substitute.go @@ -28,7 +28,8 @@ import ( h "github.com/pingcap/tidb/pkg/util/hint" ) -type gcSubstituter struct { +// GcSubstituter is used to substitute the expression to indexed virtual generated column in where, group by, order by, and field clause. +type GcSubstituter struct { } // ExprColumnMap is used to store all expressions of indexed generated columns in a table, @@ -36,12 +37,13 @@ type gcSubstituter struct { // thus we can substitute the expression in a query to an indexed generated column. type ExprColumnMap map[expression.Expression]*expression.Column +// Optimize implements base.LogicalOptRule.<0th> interface. // optimize try to replace the expression to indexed virtual generate column in where, group by, order by, and field clause // so that we can use the index on expression. // For example: select a+1 from t order by a+1, with a virtual generate column c as (a+1) and // an index on c. We need to replace a+1 with c so that we can use the index on c. // See also https://dev.mysql.com/doc/refman/8.0/en/generated-column-index-optimizations.html -func (gc *gcSubstituter) optimize(ctx context.Context, lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +func (gc *GcSubstituter) Optimize(ctx context.Context, lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false exprToColumn := make(ExprColumnMap) collectGenerateColumn(lp, exprToColumn) @@ -180,7 +182,7 @@ func substituteExpression(cond expression.Expression, lp base.LogicalPlan, exprT return changed } -func (gc *gcSubstituter) substitute(ctx context.Context, lp base.LogicalPlan, exprToColumn ExprColumnMap, opt *optimizetrace.LogicalOptimizeOp) base.LogicalPlan { +func (gc *GcSubstituter) substitute(ctx context.Context, lp base.LogicalPlan, exprToColumn ExprColumnMap, opt *optimizetrace.LogicalOptimizeOp) base.LogicalPlan { var tp types.EvalType ectx := lp.SCtx().GetExprCtx().GetEvalCtx() switch x := lp.(type) { @@ -232,6 +234,7 @@ func (gc *gcSubstituter) substitute(ctx context.Context, lp base.LogicalPlan, ex return lp } -func (*gcSubstituter) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*GcSubstituter) Name() string { return "generate_column_substitute" } diff --git a/pkg/planner/core/rule_join_elimination.go b/pkg/planner/core/rule_join_elimination.go index 60d6b547ea09f..16b9183f25352 100644 --- a/pkg/planner/core/rule_join_elimination.go +++ b/pkg/planner/core/rule_join_elimination.go @@ -28,7 +28,8 @@ import ( "github.com/pingcap/tidb/pkg/util/set" ) -type outerJoinEliminator struct { +// OuterJoinEliminator is used to eliminate outer join. +type OuterJoinEliminator struct { } // tryToEliminateOuterJoin will eliminate outer join plan base on the following rules @@ -38,7 +39,7 @@ type outerJoinEliminator struct { // 2. outer join elimination with duplicate agnostic aggregate functions: For example left outer join. // If the parent only use the columns from left table with 'distinct' label. The left outer join can // be eliminated. -func (o *outerJoinEliminator) tryToEliminateOuterJoin(p *LogicalJoin, aggCols []*expression.Column, parentCols []*expression.Column, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +func (o *OuterJoinEliminator) tryToEliminateOuterJoin(p *LogicalJoin, aggCols []*expression.Column, parentCols []*expression.Column, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { var innerChildIdx int switch p.JoinType { case LeftOuterJoin: @@ -96,7 +97,7 @@ func (o *outerJoinEliminator) tryToEliminateOuterJoin(p *LogicalJoin, aggCols [] } // extract join keys as a schema for inner child of a outer join -func (*outerJoinEliminator) extractInnerJoinKeys(join *LogicalJoin, innerChildIdx int) *expression.Schema { +func (*OuterJoinEliminator) extractInnerJoinKeys(join *LogicalJoin, innerChildIdx int) *expression.Schema { joinKeys := make([]*expression.Column, 0, len(join.EqualConditions)) for _, eqCond := range join.EqualConditions { joinKeys = append(joinKeys, eqCond.GetArgs()[innerChildIdx].(*expression.Column)) @@ -121,7 +122,7 @@ func IsColsAllFromOuterTable(cols []*expression.Column, outerUniqueIDs set.Int64 } // check whether one of unique keys sets is contained by inner join keys -func (*outerJoinEliminator) isInnerJoinKeysContainUniqueKey(innerPlan base.LogicalPlan, joinKeys *expression.Schema) (bool, error) { +func (*OuterJoinEliminator) isInnerJoinKeysContainUniqueKey(innerPlan base.LogicalPlan, joinKeys *expression.Schema) (bool, error) { for _, keyInfo := range innerPlan.Schema().Keys { joinKeysContainKeyInfo := true for _, col := range keyInfo { @@ -138,7 +139,7 @@ func (*outerJoinEliminator) isInnerJoinKeysContainUniqueKey(innerPlan base.Logic } // check whether one of index sets is contained by inner join index -func (*outerJoinEliminator) isInnerJoinKeysContainIndex(innerPlan base.LogicalPlan, joinKeys *expression.Schema) (bool, error) { +func (*OuterJoinEliminator) isInnerJoinKeysContainIndex(innerPlan base.LogicalPlan, joinKeys *expression.Schema) (bool, error) { ds, ok := innerPlan.(*DataSource) if !ok { return false, nil @@ -195,7 +196,7 @@ func GetDupAgnosticAggCols( return true, newAggCols } -func (o *outerJoinEliminator) doOptimize(p base.LogicalPlan, aggCols []*expression.Column, parentCols []*expression.Column, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { +func (o *OuterJoinEliminator) doOptimize(p base.LogicalPlan, aggCols []*expression.Column, parentCols []*expression.Column, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { // CTE's logical optimization is independent. if _, ok := p.(*LogicalCTE); ok { return p, nil @@ -249,13 +250,15 @@ func (o *outerJoinEliminator) doOptimize(p base.LogicalPlan, aggCols []*expressi return p, nil } -func (o *outerJoinEliminator) optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements base.LogicalOptRule.<0th> interface. +func (o *OuterJoinEliminator) Optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false p, err := o.doOptimize(p, nil, nil, opt) return p, planChanged, err } -func (*outerJoinEliminator) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*OuterJoinEliminator) Name() string { return "outer_join_eliminate" } diff --git a/pkg/planner/core/rule_join_reorder.go b/pkg/planner/core/rule_join_reorder.go index 64425c64c76e1..8ee1672787c88 100644 --- a/pkg/planner/core/rule_join_reorder.go +++ b/pkg/planner/core/rule_join_reorder.go @@ -212,7 +212,8 @@ func extractJoinGroup(p base.LogicalPlan) *joinGroupResult { } } -type joinReOrderSolver struct { +// JoinReOrderSolver is used to reorder the join nodes in a logical plan. +type JoinReOrderSolver struct { } type jrNode struct { @@ -225,7 +226,8 @@ type joinTypeWithExtMsg struct { outerBindCondition []expression.Expression } -func (s *joinReOrderSolver) optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements the base.LogicalOptRule.<0th> interface. +func (s *JoinReOrderSolver) Optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false tracer := &joinReorderTrace{cost: map[string]float64{}, opt: opt} tracer.traceJoinReorder(p) @@ -236,7 +238,7 @@ func (s *joinReOrderSolver) optimize(_ context.Context, p base.LogicalPlan, opt } // optimizeRecursive recursively collects join groups and applies join reorder algorithm for each group. -func (s *joinReOrderSolver) optimizeRecursive(ctx base.PlanContext, p base.LogicalPlan, tracer *joinReorderTrace) (base.LogicalPlan, error) { +func (s *JoinReOrderSolver) optimizeRecursive(ctx base.PlanContext, p base.LogicalPlan, tracer *joinReorderTrace) (base.LogicalPlan, error) { if _, ok := p.(*LogicalCTE); ok { return p, nil } @@ -687,7 +689,8 @@ func (*baseSingleGroupJoinOrderSolver) calcJoinCumCost(join base.LogicalPlan, lN return join.StatsInfo().RowCount + lNode.cumCost + rNode.cumCost } -func (*joinReOrderSolver) name() string { +// Name implements the base.LogicalOptRule.<1st> interface. +func (*JoinReOrderSolver) Name() string { return "join_reorder" } diff --git a/pkg/planner/core/rule_max_min_eliminate.go b/pkg/planner/core/rule_max_min_eliminate.go index c24f388942b9f..58a509a05ebf6 100644 --- a/pkg/planner/core/rule_max_min_eliminate.go +++ b/pkg/planner/core/rule_max_min_eliminate.go @@ -32,20 +32,21 @@ import ( "github.com/pingcap/tidb/pkg/util/ranger" ) -// maxMinEliminator tries to eliminate max/min aggregate function. +// MaxMinEliminator tries to eliminate max/min aggregate function. // For SQL like `select max(id) from t;`, we could optimize it to `select max(id) from (select id from t order by id desc limit 1 where id is not null) t;`. // For SQL like `select min(id) from t;`, we could optimize it to `select min(id) from (select id from t order by id limit 1 where id is not null) t;`. // For SQL like `select max(id), min(id) from t;`, we could optimize it to the cartesianJoin result of the two queries above if `id` has an index. -type maxMinEliminator struct { +type MaxMinEliminator struct { } -func (a *maxMinEliminator) optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements base.LogicalOptRule.<0th> interface. +func (a *MaxMinEliminator) Optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false return a.eliminateMaxMin(p, opt), planChanged, nil } // composeAggsByInnerJoin composes the scalar aggregations by cartesianJoin. -func (*maxMinEliminator) composeAggsByInnerJoin(originAgg *LogicalAggregation, aggs []*LogicalAggregation, opt *optimizetrace.LogicalOptimizeOp) (plan base.LogicalPlan) { +func (*MaxMinEliminator) composeAggsByInnerJoin(originAgg *LogicalAggregation, aggs []*LogicalAggregation, opt *optimizetrace.LogicalOptimizeOp) (plan base.LogicalPlan) { plan = aggs[0] sctx := plan.SCtx() joins := make([]*LogicalJoin, 0) @@ -64,7 +65,7 @@ func (*maxMinEliminator) composeAggsByInnerJoin(originAgg *LogicalAggregation, a // checkColCanUseIndex checks whether there is an AccessPath satisfy the conditions: // 1. all of the selection's condition can be pushed down as AccessConds of the path. // 2. the path can keep order for `col` after pushing down the conditions. -func (a *maxMinEliminator) checkColCanUseIndex(plan base.LogicalPlan, col *expression.Column, conditions []expression.Expression) bool { +func (a *MaxMinEliminator) checkColCanUseIndex(plan base.LogicalPlan, col *expression.Column, conditions []expression.Expression) bool { switch p := plan.(type) { case *LogicalSelection: conditions = append(conditions, p.Conditions...) @@ -108,7 +109,7 @@ func (a *maxMinEliminator) checkColCanUseIndex(plan base.LogicalPlan, col *expre // cloneSubPlans shallow clones the subPlan. We only consider `Selection` and `DataSource` here, // because we have restricted the subPlan in `checkColCanUseIndex`. -func (a *maxMinEliminator) cloneSubPlans(plan base.LogicalPlan) base.LogicalPlan { +func (a *MaxMinEliminator) cloneSubPlans(plan base.LogicalPlan) base.LogicalPlan { switch p := plan.(type) { case *LogicalSelection: newConditions := make([]expression.Expression, len(p.Conditions)) @@ -141,7 +142,7 @@ func (a *maxMinEliminator) cloneSubPlans(plan base.LogicalPlan) base.LogicalPlan // `select max(a) from t` + `select min(a) from t` + `select max(b) from t`. // Then we check whether `a` and `b` have indices. If any of the used column has no index, we cannot eliminate // this aggregation. -func (a *maxMinEliminator) splitAggFuncAndCheckIndices(agg *LogicalAggregation, opt *optimizetrace.LogicalOptimizeOp) (aggs []*LogicalAggregation, canEliminate bool) { +func (a *MaxMinEliminator) splitAggFuncAndCheckIndices(agg *LogicalAggregation, opt *optimizetrace.LogicalOptimizeOp) (aggs []*LogicalAggregation, canEliminate bool) { for _, f := range agg.AggFuncs { // We must make sure the args of max/min is a simple single column. col, ok := f.Args[0].(*expression.Column) @@ -173,7 +174,7 @@ func (a *maxMinEliminator) splitAggFuncAndCheckIndices(agg *LogicalAggregation, } // eliminateSingleMaxMin tries to convert a single max/min to Limit+Sort operators. -func (*maxMinEliminator) eliminateSingleMaxMin(agg *LogicalAggregation, opt *optimizetrace.LogicalOptimizeOp) *LogicalAggregation { +func (*MaxMinEliminator) eliminateSingleMaxMin(agg *LogicalAggregation, opt *optimizetrace.LogicalOptimizeOp) *LogicalAggregation { f := agg.AggFuncs[0] child := agg.Children()[0] ctx := agg.SCtx() @@ -214,7 +215,7 @@ func (*maxMinEliminator) eliminateSingleMaxMin(agg *LogicalAggregation, opt *opt } // eliminateMaxMin tries to convert max/min to Limit+Sort operators. -func (a *maxMinEliminator) eliminateMaxMin(p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) base.LogicalPlan { +func (a *MaxMinEliminator) eliminateMaxMin(p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) base.LogicalPlan { // CTE's logical optimization is indenpent. if _, ok := p.(*LogicalCTE); ok { return p @@ -260,7 +261,8 @@ func (a *maxMinEliminator) eliminateMaxMin(p base.LogicalPlan, opt *optimizetrac return p } -func (*maxMinEliminator) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*MaxMinEliminator) Name() string { return "max_min_eliminate" } diff --git a/pkg/planner/core/rule_outer_to_inner_join.go b/pkg/planner/core/rule_outer_to_inner_join.go index 433739752992c..4ef8860014560 100644 --- a/pkg/planner/core/rule_outer_to_inner_join.go +++ b/pkg/planner/core/rule_outer_to_inner_join.go @@ -35,10 +35,11 @@ func mergeOnClausePredicates(p *LogicalJoin, predicates []expression.Expression) return combinedCond } -// convertOuterToInnerJoin converts outer to inner joins if the unmtaching rows are filtered. -type convertOuterToInnerJoin struct { +// ConvertOuterToInnerJoin converts outer to inner joins if the unmtaching rows are filtered. +type ConvertOuterToInnerJoin struct { } +// Optimize implements base.LogicalOptRule.<0th> interface. // convertOuterToInnerJoin is refactoring of the outer to inner join logic that used to be part of predicate push down. // The rewrite passes down predicates from selection (WHERE clause) and join predicates (ON clause). // All nodes except LogicalJoin are pass through where the rewrite is done for the child and nothing for the node itself. @@ -50,7 +51,7 @@ type convertOuterToInnerJoin struct { // - For inner/semi joins, the ON clause can be applied on both children // - For anti semi joins, ON clause applied only on left side // - For all other cases, do not pass ON clause. -func (*convertOuterToInnerJoin) optimize(_ context.Context, p base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +func (*ConvertOuterToInnerJoin) Optimize(_ context.Context, p base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false return p.ConvertOuterToInnerJoin(nil), planChanged, nil } @@ -59,6 +60,7 @@ func (*convertOuterToInnerJoin) optimize(_ context.Context, p base.LogicalPlan, // Also, predicates involving aggregate expressions are not null filtering. IsNullReject always returns // false for those cases. -func (*convertOuterToInnerJoin) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*ConvertOuterToInnerJoin) Name() string { return "convert_outer_to_inner_joins" } diff --git a/pkg/planner/core/rule_partition_processor.go b/pkg/planner/core/rule_partition_processor.go index c066ad9751ae0..ef16fd88ff399 100644 --- a/pkg/planner/core/rule_partition_processor.go +++ b/pkg/planner/core/rule_partition_processor.go @@ -47,7 +47,7 @@ import ( // FullRange represent used all partitions. const FullRange = -1 -// partitionProcessor rewrites the ast for table partition. +// PartitionProcessor rewrites the ast for table partition. // Used by static partition prune mode. /* // create table t (id int) partition by range (id) @@ -61,16 +61,22 @@ const FullRange = -1 // select * from p2 where id < 20 // select * from p3 where id < 30) */ -// partitionProcessor is here because it's easier to prune partition after predicate push down. -type partitionProcessor struct{} +// PartitionProcessor is here because it's easier to prune partition after predicate push down. +type PartitionProcessor struct{} -func (s *partitionProcessor) optimize(_ context.Context, lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements the LogicalOptRule.<0th> interface. +func (s *PartitionProcessor) Optimize(_ context.Context, lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false p, err := s.rewriteDataSource(lp, opt) return p, planChanged, err } -func (s *partitionProcessor) rewriteDataSource(lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { +// Name implements the LogicalOptRule.<1st> interface. +func (*PartitionProcessor) Name() string { + return "partition_processor" +} + +func (s *PartitionProcessor) rewriteDataSource(lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { // Assert there will not be sel -> sel in the ast. switch p := lp.(type) { case *DataSource: @@ -147,7 +153,7 @@ func getPartColumnsForHashPartition(hashExpr expression.Expression) ([]*expressi return partCols, colLen } -func (s *partitionProcessor) getUsedHashPartitions(ctx base.PlanContext, +func (s *PartitionProcessor) getUsedHashPartitions(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, columns []*expression.Column, conds []expression.Expression, names types.NameSlice) ([]int, error) { pi := tbl.Meta().Partition @@ -265,7 +271,7 @@ func (s *partitionProcessor) getUsedHashPartitions(ctx base.PlanContext, return used, nil } -func (s *partitionProcessor) getUsedKeyPartitions(ctx base.PlanContext, +func (s *PartitionProcessor) getUsedKeyPartitions(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, columns []*expression.Column, conds []expression.Expression, _ types.NameSlice) ([]int, error) { pi := tbl.Meta().Partition @@ -373,7 +379,7 @@ func (s *partitionProcessor) getUsedKeyPartitions(ctx base.PlanContext, } // getUsedPartitions is used to get used partitions for hash or key partition tables -func (s *partitionProcessor) getUsedPartitions(ctx base.PlanContext, tbl table.Table, +func (s *PartitionProcessor) getUsedPartitions(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, columns []*expression.Column, conds []expression.Expression, names types.NameSlice, partType model.PartitionType) ([]int, error) { if partType == model.PartitionTypeHash { @@ -384,7 +390,7 @@ func (s *partitionProcessor) getUsedPartitions(ctx base.PlanContext, tbl table.T // findUsedPartitions is used to get used partitions for hash or key partition tables. // The first returning is the used partition index set pruned by `conds`. -func (s *partitionProcessor) findUsedPartitions(ctx base.PlanContext, +func (s *PartitionProcessor) findUsedPartitions(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, conds []expression.Expression, columns []*expression.Column, names types.NameSlice) ([]int, error) { pi := tbl.Meta().Partition @@ -407,7 +413,7 @@ func (s *partitionProcessor) findUsedPartitions(ctx base.PlanContext, return ret, nil } -func (s *partitionProcessor) convertToIntSlice(or partitionRangeOR, pi *model.PartitionInfo, partitionNames []model.CIStr) []int { +func (s *PartitionProcessor) convertToIntSlice(or partitionRangeOR, pi *model.PartitionInfo, partitionNames []model.CIStr) []int { if len(or) == 1 && or[0].start == 0 && or[0].end == len(pi.Definitions) { if len(partitionNames) == 0 { if len(pi.Definitions) == 1 { @@ -441,7 +447,7 @@ func convertToRangeOr(used []int, pi *model.PartitionInfo) partitionRangeOR { } // pruneHashOrKeyPartition is used to prune hash or key partition tables -func (s *partitionProcessor) pruneHashOrKeyPartition(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, +func (s *PartitionProcessor) pruneHashOrKeyPartition(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, conds []expression.Expression, columns []*expression.Column, names types.NameSlice) ([]int, error) { used, err := s.findUsedPartitions(ctx, tbl, partitionNames, conds, columns, names) if err != nil { @@ -453,7 +459,7 @@ func (s *partitionProcessor) pruneHashOrKeyPartition(ctx base.PlanContext, tbl t // reconstructTableColNames reconstructs FieldsNames according to ds.TblCols. // ds.names may not match ds.TblCols since ds.names is pruned while ds.TblCols contains all original columns. // please see https://github.com/pingcap/tidb/issues/22635 for more details. -func (*partitionProcessor) reconstructTableColNames(ds *DataSource) ([]*types.FieldName, error) { +func (*PartitionProcessor) reconstructTableColNames(ds *DataSource) ([]*types.FieldName, error) { names := make([]*types.FieldName, 0, len(ds.TblCols)) // Use DeletableCols to get all the columns. colsInfo := ds.table.DeletableCols() @@ -497,7 +503,7 @@ func (*partitionProcessor) reconstructTableColNames(ds *DataSource) ([]*types.Fi return names, nil } -func (s *partitionProcessor) processHashOrKeyPartition(ds *DataSource, pi *model.PartitionInfo, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { +func (s *PartitionProcessor) processHashOrKeyPartition(ds *DataSource, pi *model.PartitionInfo, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { names, err := s.reconstructTableColNames(ds) if err != nil { return nil, err @@ -519,7 +525,7 @@ func (s *partitionProcessor) processHashOrKeyPartition(ds *DataSource, pi *model // listPartitionPruner uses to prune partition for list partition. type listPartitionPruner struct { - *partitionProcessor + *PartitionProcessor ctx base.PlanContext pi *model.PartitionInfo partitionNames []model.CIStr @@ -527,7 +533,7 @@ type listPartitionPruner struct { listPrune *tables.ForListPruning } -func newListPartitionPruner(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, s *partitionProcessor, pruneList *tables.ForListPruning, columns []*expression.Column) *listPartitionPruner { +func newListPartitionPruner(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, s *PartitionProcessor, pruneList *tables.ForListPruning, columns []*expression.Column) *listPartitionPruner { pruneList = pruneList.Clone() for i := range pruneList.PruneExprCols { for j := range columns { @@ -548,7 +554,7 @@ func newListPartitionPruner(ctx base.PlanContext, tbl table.Table, partitionName fullRange := make(map[int]struct{}) fullRange[FullRange] = struct{}{} return &listPartitionPruner{ - partitionProcessor: s, + PartitionProcessor: s, ctx: ctx, pi: tbl.Meta().Partition, partitionNames: partitionNames, @@ -782,7 +788,7 @@ func (l *listPartitionPruner) findUsedListPartitions(conds []expression.Expressi return used, nil } -func (s *partitionProcessor) findUsedListPartitions(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, +func (s *PartitionProcessor) findUsedListPartitions(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, conds []expression.Expression, columns []*expression.Column) ([]int, error) { pi := tbl.Meta().Partition partExpr := tbl.(partitionTable).PartitionExpr() @@ -810,7 +816,7 @@ func (s *partitionProcessor) findUsedListPartitions(ctx base.PlanContext, tbl ta return ret, nil } -func (s *partitionProcessor) pruneListPartition(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, +func (s *PartitionProcessor) pruneListPartition(ctx base.PlanContext, tbl table.Table, partitionNames []model.CIStr, conds []expression.Expression, columns []*expression.Column) ([]int, error) { used, err := s.findUsedListPartitions(ctx, tbl, partitionNames, conds, columns) if err != nil { @@ -819,7 +825,7 @@ func (s *partitionProcessor) pruneListPartition(ctx base.PlanContext, tbl table. return used, nil } -func (s *partitionProcessor) prune(ds *DataSource, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { +func (s *PartitionProcessor) prune(ds *DataSource, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { pi := ds.TableInfo.GetPartitionInfo() if pi == nil { return ds, nil @@ -848,7 +854,7 @@ func (s *partitionProcessor) prune(ds *DataSource, opt *optimizetrace.LogicalOpt } // findByName checks whether object name exists in list. -func (*partitionProcessor) findByName(partitionNames []model.CIStr, partitionName string) bool { +func (*PartitionProcessor) findByName(partitionNames []model.CIStr, partitionName string) bool { for _, s := range partitionNames { if s.L == partitionName { return true @@ -857,10 +863,6 @@ func (*partitionProcessor) findByName(partitionNames []model.CIStr, partitionNam return false } -func (*partitionProcessor) name() string { - return "partition_processor" -} - type lessThanDataInt struct { data []int64 unsigned bool @@ -996,7 +998,7 @@ func intersectionRange(start, end, newStart, newEnd int) (s int, e int) { return s, e } -func (s *partitionProcessor) pruneRangePartition(ctx base.PlanContext, pi *model.PartitionInfo, tbl table.PartitionedTable, conds []expression.Expression, +func (s *PartitionProcessor) pruneRangePartition(ctx base.PlanContext, pi *model.PartitionInfo, tbl table.PartitionedTable, conds []expression.Expression, columns []*expression.Column, names types.NameSlice) (partitionRangeOR, error) { partExpr := tbl.(partitionTable).PartitionExpr() @@ -1032,7 +1034,7 @@ func (s *partitionProcessor) pruneRangePartition(ctx base.PlanContext, pi *model return result, nil } -func (s *partitionProcessor) processRangePartition(ds *DataSource, pi *model.PartitionInfo, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { +func (s *PartitionProcessor) processRangePartition(ds *DataSource, pi *model.PartitionInfo, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { used, err := s.pruneRangePartition(ds.SCtx(), pi, ds.table.(table.PartitionedTable), ds.AllConds, ds.TblCols, ds.OutputNames()) if err != nil { return nil, err @@ -1040,7 +1042,7 @@ func (s *partitionProcessor) processRangePartition(ds *DataSource, pi *model.Par return s.makeUnionAllChildren(ds, pi, used, opt) } -func (s *partitionProcessor) processListPartition(ds *DataSource, pi *model.PartitionInfo, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { +func (s *PartitionProcessor) processListPartition(ds *DataSource, pi *model.PartitionInfo, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { used, err := s.pruneListPartition(ds.SCtx(), ds.table, ds.PartitionNames, ds.AllConds, ds.TblCols) if err != nil { return nil, err @@ -1647,7 +1649,7 @@ func pruneUseBinarySearch(lessThan lessThanDataInt, data dataForPrune) (start in return start, end } -func (*partitionProcessor) resolveAccessPaths(ds *DataSource) error { +func (*PartitionProcessor) resolveAccessPaths(ds *DataSource) error { possiblePaths, err := getPossibleAccessPaths( ds.SCtx(), &h.PlanHints{IndexMergeHintList: ds.IndexMergeHints, IndexHintList: ds.IndexHints}, ds.AstIndexHints, ds.table, ds.DBName, ds.TableInfo.Name, ds.IsForUpdateRead, true) @@ -1662,7 +1664,7 @@ func (*partitionProcessor) resolveAccessPaths(ds *DataSource) error { return nil } -func (s *partitionProcessor) resolveOptimizeHint(ds *DataSource, partitionName model.CIStr) error { +func (s *PartitionProcessor) resolveOptimizeHint(ds *DataSource, partitionName model.CIStr) error { // index hint if len(ds.IndexHints) > 0 { newIndexHint := make([]h.HintedIndex, 0, len(ds.IndexHints)) @@ -1747,7 +1749,7 @@ func appendWarnForUnknownPartitions(ctx base.PlanContext, hintName string, unkno ctx.GetSessionVars().StmtCtx.SetHintWarningFromError(warning) } -func (*partitionProcessor) checkHintsApplicable(ds *DataSource, partitionSet set.StringSet) { +func (*PartitionProcessor) checkHintsApplicable(ds *DataSource, partitionSet set.StringSet) { for _, idxHint := range ds.IndexHints { unknownPartitions := checkTableHintsApplicableForPartition(idxHint.Partitions, partitionSet) appendWarnForUnknownPartitions(ds.SCtx(), h.Restore2IndexHint(idxHint.HintTypeString(), idxHint), unknownPartitions) @@ -1762,7 +1764,7 @@ func (*partitionProcessor) checkHintsApplicable(ds *DataSource, partitionSet set appendWarnForUnknownPartitions(ds.SCtx(), h.HintReadFromStorage, unknownPartitions) } -func (s *partitionProcessor) makeUnionAllChildren(ds *DataSource, pi *model.PartitionInfo, or partitionRangeOR, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { +func (s *PartitionProcessor) makeUnionAllChildren(ds *DataSource, pi *model.PartitionInfo, or partitionRangeOR, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, error) { children := make([]base.LogicalPlan, 0, len(pi.Definitions)) partitionNameSet := make(set.StringSet) usedDefinition := make(map[int64]model.PartitionDefinition) @@ -1819,7 +1821,7 @@ func (s *partitionProcessor) makeUnionAllChildren(ds *DataSource, pi *model.Part return unionAll, nil } -func (*partitionProcessor) pruneRangeColumnsPartition(ctx base.PlanContext, conds []expression.Expression, pi *model.PartitionInfo, pe *tables.PartitionExpr, columns []*expression.Column) (partitionRangeOR, error) { +func (*PartitionProcessor) pruneRangeColumnsPartition(ctx base.PlanContext, conds []expression.Expression, pi *model.PartitionInfo, pe *tables.PartitionExpr, columns []*expression.Column) (partitionRangeOR, error) { result := fullRange(len(pi.Definitions)) if len(pi.Columns) < 1 { diff --git a/pkg/planner/core/rule_predicate_push_down.go b/pkg/planner/core/rule_predicate_push_down.go index de20629cda035..99eaefc3e2429 100644 --- a/pkg/planner/core/rule_predicate_push_down.go +++ b/pkg/planner/core/rule_predicate_push_down.go @@ -30,7 +30,7 @@ import ( "github.com/pingcap/tidb/pkg/util/ranger" ) -type ppdSolver struct{} +type PPDSolver struct{} // exprPrefixAdder is the wrapper struct to add tidb_shard(x) = val for `OrigConds` // `cols` is the index columns for a unique shard index @@ -41,7 +41,8 @@ type exprPrefixAdder struct { lengths []int } -func (*ppdSolver) optimize(_ context.Context, lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements base.LogicalOptRule.<0th> interface. +func (*PPDSolver) Optimize(_ context.Context, lp base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false _, p := lp.PredicatePushDown(nil, opt) return p, planChanged, nil @@ -195,7 +196,8 @@ func DeleteTrueExprs(p base.LogicalPlan, conds []expression.Expression) []expres return newConds } -func (*ppdSolver) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*PPDSolver) Name() string { return "predicate_push_down" } diff --git a/pkg/planner/core/rule_predicate_simplification.go b/pkg/planner/core/rule_predicate_simplification.go index cd663bf003804..dd0c801489b8f 100644 --- a/pkg/planner/core/rule_predicate_simplification.go +++ b/pkg/planner/core/rule_predicate_simplification.go @@ -24,9 +24,9 @@ import ( "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" ) -// predicateSimplification consolidates different predcicates on a column and its equivalence classes. Initial out is for +// PredicateSimplification consolidates different predcicates on a column and its equivalence classes. Initial out is for // in-list and not equal list intersection. -type predicateSimplification struct { +type PredicateSimplification struct { } type predicateType = byte @@ -65,7 +65,8 @@ func findPredicateType(expr expression.Expression) (*expression.Column, predicat return nil, otherPredicate } -func (*predicateSimplification) optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements base.LogicalOptRule.<0th> interface. +func (*PredicateSimplification) Optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false return p.PredicateSimplification(opt), planChanged, nil } @@ -146,6 +147,7 @@ func applyPredicateSimplification(sctx base.PlanContext, predicates []expression return newValues } -func (*predicateSimplification) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*PredicateSimplification) Name() string { return "predicate_simplification" } diff --git a/pkg/planner/core/rule_push_down_sequence.go b/pkg/planner/core/rule_push_down_sequence.go index 7fcffce8b1e50..69d7f97c9a7cb 100644 --- a/pkg/planner/core/rule_push_down_sequence.go +++ b/pkg/planner/core/rule_push_down_sequence.go @@ -22,19 +22,22 @@ import ( "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" ) -type pushDownSequenceSolver struct { +// PushDownSequenceSolver is used to push down sequence. +type PushDownSequenceSolver struct { } -func (*pushDownSequenceSolver) name() string { +// Name implements the base.LogicalOptRule.<1st> interface. +func (*PushDownSequenceSolver) Name() string { return "push_down_sequence" } -func (pdss *pushDownSequenceSolver) optimize(_ context.Context, lp base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements the base.LogicalOptRule.<0th> interface. +func (pdss *PushDownSequenceSolver) Optimize(_ context.Context, lp base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false return pdss.recursiveOptimize(nil, lp), planChanged, nil } -func (pdss *pushDownSequenceSolver) recursiveOptimize(pushedSequence *logicalop.LogicalSequence, lp base.LogicalPlan) base.LogicalPlan { +func (pdss *PushDownSequenceSolver) recursiveOptimize(pushedSequence *logicalop.LogicalSequence, lp base.LogicalPlan) base.LogicalPlan { _, ok := lp.(*logicalop.LogicalSequence) if !ok && pushedSequence == nil { newChildren := make([]base.LogicalPlan, 0, len(lp.Children())) diff --git a/pkg/planner/core/rule_resolve_grouping_expand.go b/pkg/planner/core/rule_resolve_grouping_expand.go index 27d886583d2ec..363eca21fa10b 100644 --- a/pkg/planner/core/rule_resolve_grouping_expand.go +++ b/pkg/planner/core/rule_resolve_grouping_expand.go @@ -51,10 +51,11 @@ import ( // to achieve this similar effect, put it in the last logical optimizing phase is much // more reasonable. -// resolveExpand generating Expand projection list when all the logical optimization is done. -type resolveExpand struct { +// ResolveExpand generating Expand projection list when all the logical optimization is done. +type ResolveExpand struct { } +// Optimize implements the base.LogicalOptRule.<0th> interface. // By now, rollup syntax will build a LogicalExpand from bottom up. In LogicalExpand itself, its schema out should be 3 parts: // // +---------------------------------------------------------------------+ @@ -75,7 +76,7 @@ type resolveExpand struct { // (upper required) (grouping sets columns appended) // // Expand operator itself is kind like a projection, while difference is that it has a multi projection list, named as leveled projection. -func (*resolveExpand) optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +func (*ResolveExpand) Optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false // As you see, Expand's leveled projection should be built after all column-prune is done. So we just make generating-leveled-projection // as the last rule of logical optimization, which is more clear. (spark has column prune action before building expand) @@ -83,7 +84,8 @@ func (*resolveExpand) optimize(_ context.Context, p base.LogicalPlan, opt *optim return newLogicalPlan, planChanged, err } -func (*resolveExpand) name() string { +// Name implements the base.LogicalOptRule.<1st> interface. +func (*ResolveExpand) Name() string { return "resolve_expand" } diff --git a/pkg/planner/core/rule_result_reorder.go b/pkg/planner/core/rule_result_reorder.go index 484acd562a87a..28eca5ffc5850 100644 --- a/pkg/planner/core/rule_result_reorder.go +++ b/pkg/planner/core/rule_result_reorder.go @@ -25,7 +25,7 @@ import ( ) /* -resultReorder reorder query results. +ResultReorder reorder query results. NOTE: it's not a common rule for all queries, it's specially implemented for a few customers. Results of some queries are not ordered, for example: @@ -39,10 +39,11 @@ This rule reorders results by modifying or injecting a Sort operator: 2.1. if it's a Sort, update it by appending all output columns into its order-by list, 2.2. otherwise, inject a new Sort upon this operator. */ -type resultReorder struct { +type ResultReorder struct { } -func (rs *resultReorder) optimize(_ context.Context, lp base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements base.LogicalOptRule.<0th> interface. +func (rs *ResultReorder) Optimize(_ context.Context, lp base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false ordered := rs.completeSort(lp) if !ordered { @@ -51,7 +52,7 @@ func (rs *resultReorder) optimize(_ context.Context, lp base.LogicalPlan, _ *opt return lp, planChanged, nil } -func (rs *resultReorder) completeSort(lp base.LogicalPlan) bool { +func (rs *ResultReorder) completeSort(lp base.LogicalPlan) bool { if rs.isInputOrderKeeper(lp) { if len(lp.Children()) == 0 { return true @@ -79,7 +80,7 @@ func (rs *resultReorder) completeSort(lp base.LogicalPlan) bool { return false } -func (rs *resultReorder) injectSort(lp base.LogicalPlan) base.LogicalPlan { +func (rs *ResultReorder) injectSort(lp base.LogicalPlan) base.LogicalPlan { if rs.isInputOrderKeeper(lp) { lp.SetChildren(rs.injectSort(lp.Children()[0])) return lp @@ -100,7 +101,7 @@ func (rs *resultReorder) injectSort(lp base.LogicalPlan) base.LogicalPlan { return sort } -func (*resultReorder) isInputOrderKeeper(lp base.LogicalPlan) bool { +func (*ResultReorder) isInputOrderKeeper(lp base.LogicalPlan) bool { switch lp.(type) { case *LogicalSelection, *logicalop.LogicalProjection, *logicalop.LogicalLimit, *logicalop.LogicalTableDual: return true @@ -109,7 +110,7 @@ func (*resultReorder) isInputOrderKeeper(lp base.LogicalPlan) bool { } // extractHandleCols does the best effort to get the handle column. -func (rs *resultReorder) extractHandleCol(lp base.LogicalPlan) *expression.Column { +func (rs *ResultReorder) extractHandleCol(lp base.LogicalPlan) *expression.Column { switch x := lp.(type) { case *LogicalSelection, *logicalop.LogicalLimit: handleCol := rs.extractHandleCol(lp.Children()[0]) @@ -133,6 +134,7 @@ func (rs *resultReorder) extractHandleCol(lp base.LogicalPlan) *expression.Colum return nil } -func (*resultReorder) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*ResultReorder) Name() string { return "result_reorder" } diff --git a/pkg/planner/core/rule_semi_join_rewrite.go b/pkg/planner/core/rule_semi_join_rewrite.go index c651073f0c7d0..d29d16946d7f1 100644 --- a/pkg/planner/core/rule_semi_join_rewrite.go +++ b/pkg/planner/core/rule_semi_join_rewrite.go @@ -26,7 +26,7 @@ import ( h "github.com/pingcap/tidb/pkg/util/hint" ) -// semiJoinRewriter rewrites semi join to inner join with aggregation. +// SemiJoinRewriter rewrites semi join to inner join with aggregation. // Note: This rewriter is only used for exists subquery. // And it also requires the hint `SEMI_JOIN_REWRITE` to be set. // For example: @@ -36,20 +36,22 @@ import ( // will be rewriten to: // // select * from t join (select a from s group by a) s on t.a = s.a; -type semiJoinRewriter struct { +type SemiJoinRewriter struct { } -func (smj *semiJoinRewriter) optimize(_ context.Context, p base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements base.LogicalOptRule.<0th> interface. +func (smj *SemiJoinRewriter) Optimize(_ context.Context, p base.LogicalPlan, _ *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false newLogicalPlan, err := smj.recursivePlan(p) return newLogicalPlan, planChanged, err } -func (*semiJoinRewriter) name() string { +// Name implements base.LogicalOptRule.<1st> interface. +func (*SemiJoinRewriter) Name() string { return "semi_join_rewrite" } -func (smj *semiJoinRewriter) recursivePlan(p base.LogicalPlan) (base.LogicalPlan, error) { +func (smj *SemiJoinRewriter) recursivePlan(p base.LogicalPlan) (base.LogicalPlan, error) { if _, ok := p.(*LogicalCTE); ok { return p, nil } diff --git a/pkg/planner/core/rule_topn_push_down.go b/pkg/planner/core/rule_topn_push_down.go index b7723fd5fe378..cf28cf0cfaf6c 100644 --- a/pkg/planner/core/rule_topn_push_down.go +++ b/pkg/planner/core/rule_topn_push_down.go @@ -25,11 +25,12 @@ import ( "github.com/pingcap/tidb/pkg/planner/util/optimizetrace" ) -// pushDownTopNOptimizer pushes down the topN or limit. In the future we will remove the limit from `requiredProperty` in CBO phase. -type pushDownTopNOptimizer struct { +// PushDownTopNOptimizer pushes down the topN or limit. In the future we will remove the limit from `requiredProperty` in CBO phase. +type PushDownTopNOptimizer struct { } -func (*pushDownTopNOptimizer) optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { +// Optimize implements the base.LogicalOptRule.<0th> interface. +func (*PushDownTopNOptimizer) Optimize(_ context.Context, p base.LogicalPlan, opt *optimizetrace.LogicalOptimizeOp) (base.LogicalPlan, bool, error) { planChanged := false return p.PushDownTopN(nil, opt), planChanged, nil } @@ -52,7 +53,8 @@ func pushDownTopNForBaseLogicalPlan(lp base.LogicalPlan, topNLogicalPlan base.Lo return p } -func (*pushDownTopNOptimizer) name() string { +// Name implements the base.LogicalOptRule.<1st> interface. +func (*PushDownTopNOptimizer) Name() string { return "topn_push_down" } From 6fa30c1ffdd5683768c79a0b008919ae4618e0f2 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Tue, 6 Aug 2024 17:16:02 +0800 Subject: [PATCH 2/5] . Signed-off-by: AilinKid <314806019@qq.com> --- pkg/planner/core/rule/util/build_key_info_misc.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pkg/planner/core/rule/util/build_key_info_misc.go b/pkg/planner/core/rule/util/build_key_info_misc.go index 02150281b49a2..8d566868a54e9 100644 --- a/pkg/planner/core/rule/util/build_key_info_misc.go +++ b/pkg/planner/core/rule/util/build_key_info_misc.go @@ -1,3 +1,17 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package util import ( From b54dd0c77285d94be318c26f42eaf36e2a4b94e8 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Wed, 7 Aug 2024 11:44:51 +0800 Subject: [PATCH 3/5] . Signed-off-by: AilinKid <314806019@qq.com> --- pkg/planner/core/rule_collect_plan_stats.go | 1 + pkg/planner/core/rule_predicate_push_down.go | 1 + 2 files changed, 2 insertions(+) diff --git a/pkg/planner/core/rule_collect_plan_stats.go b/pkg/planner/core/rule_collect_plan_stats.go index 46674b36976b0..e9047782c5095 100644 --- a/pkg/planner/core/rule_collect_plan_stats.go +++ b/pkg/planner/core/rule_collect_plan_stats.go @@ -79,6 +79,7 @@ func (CollectPredicateColumnsPoint) Name() string { return "collect_predicate_columns_point" } +// SyncWaitStatsLoadPoint sync-wait for stats load point. type SyncWaitStatsLoadPoint struct{} // Optimize implements the base.LogicalOptRule.<0th> interface. diff --git a/pkg/planner/core/rule_predicate_push_down.go b/pkg/planner/core/rule_predicate_push_down.go index 99eaefc3e2429..3df2c9723a4bc 100644 --- a/pkg/planner/core/rule_predicate_push_down.go +++ b/pkg/planner/core/rule_predicate_push_down.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/pkg/util/ranger" ) +// PPDSolver stands for Predicate Push Down. type PPDSolver struct{} // exprPrefixAdder is the wrapper struct to add tidb_shard(x) = val for `OrigConds` From 1b481ee10fd1a0e7ce7e4dfcd100f536a32cafee Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Wed, 7 Aug 2024 14:13:37 +0800 Subject: [PATCH 4/5] add licence header Signed-off-by: AilinKid <314806019@qq.com> --- pkg/planner/core/rule/rule_init.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pkg/planner/core/rule/rule_init.go b/pkg/planner/core/rule/rule_init.go index 8a7d54907c261..d10d7f754c429 100644 --- a/pkg/planner/core/rule/rule_init.go +++ b/pkg/planner/core/rule/rule_init.go @@ -1,3 +1,17 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package rule import "github.com/pingcap/tidb/pkg/planner/core/rule/util" From c96cad2d912cae3c279ca7b38e19e5663db2fe22 Mon Sep 17 00:00:00 2001 From: AilinKid <314806019@qq.com> Date: Wed, 7 Aug 2024 14:15:04 +0800 Subject: [PATCH 5/5] . Signed-off-by: AilinKid <314806019@qq.com> --- pkg/planner/core/rule/rule_build_key_info.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/planner/core/rule/rule_build_key_info.go b/pkg/planner/core/rule/rule_build_key_info.go index 711f5fbbf2552..b5231c90bf14d 100644 --- a/pkg/planner/core/rule/rule_build_key_info.go +++ b/pkg/planner/core/rule/rule_build_key_info.go @@ -1,4 +1,4 @@ -// Copyright 2017 PingCAP, Inc. +// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License.