diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index c00d94cc10337..469922857525e 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -635,6 +635,10 @@ func compareCandidates(lhs, rhs *candidatePath) int { } func (ds *DataSource) isMatchProp(path *util.AccessPath, prop *property.PhysicalProperty) bool { + if ds.table.Type().IsClusterTable() && !prop.IsSortItemEmpty() { + // TableScan with cluster table can't keep order. + return false + } var isMatchProp bool if path.IsIntHandlePath { pkCol := ds.getPKIsHandleCol() diff --git a/planner/core/task.go b/planner/core/task.go index c805f7b7718d6..a838de8704985 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/charset" @@ -1039,6 +1040,62 @@ func (p *PhysicalTopN) canPushDownToTiFlash(mppTask *mppTask) bool { return true } +// For https://github.com/pingcap/tidb/issues/51723, +// This function only supports `CLUSTER_SLOW_QUERY`, +// it will change plan from +// TopN -> TableReader -> TableFullScan[cop] to +// TopN -> TableReader -> Limit[cop] -> TableFullScan[cop] + keepOrder +func (p *PhysicalTopN) pushLimitDownToTiDBCop(copTsk *copTask) (task, bool) { + if copTsk.indexPlan != nil || copTsk.tablePlan == nil { + return nil, false + } + + var ( + tblScan *PhysicalTableScan + err error + ok bool + ) + + copTsk.tablePlan, err = copTsk.tablePlan.Clone() + if err != nil { + return nil, false + } + finalTblScanPlan := copTsk.tablePlan + for len(finalTblScanPlan.Children()) > 0 { + finalTblScanPlan = finalTblScanPlan.Children()[0] + } + + if tblScan, ok = finalTblScanPlan.(*PhysicalTableScan); !ok { + return nil, false + } + + // Check the table is `CLUSTER_SLOW_QUERY` or not. + if tblScan.Table.Name.O != infoschema.ClusterTableSlowLog { + return nil, false + } + + colsProp, ok := GetPropByOrderByItems(p.ByItems) + if !ok { + return nil, false + } + if len(colsProp.SortItems) != 1 || !colsProp.SortItems[0].Col.Equal(p.SCtx(), tblScan.HandleCols.GetCol(0)) { + return nil, false + } + tblScan.Desc = colsProp.SortItems[0].Desc + tblScan.KeepOrder = true + + childProfile := copTsk.plan().statsInfo() + newCount := p.Offset + p.Count + stats := deriveLimitStats(childProfile, float64(newCount)) + pushedLimit := PhysicalLimit{ + Count: newCount, + }.Init(p.SCtx(), stats, p.SelectBlockOffset()) + pushedLimit.SetSchema(copTsk.tablePlan.Schema()) + copTsk = attachPlan2Task(pushedLimit, copTsk).(*copTask) + rootTask := copTsk.convertToRootTask(p.SCtx()) + return attachPlan2Task(p, rootTask), true +} + func (p *PhysicalTopN) attach2Task(tasks ...task) task { t := tasks[0].copy() cols := make([]*expression.Column, 0, len(p.ByItems)) @@ -1046,6 +1103,12 @@ func (p *PhysicalTopN) attach2Task(tasks ...task) task { cols = append(cols, expression.ExtractColumns(item.Expr)...) } needPushDown := len(cols) > 0 + if copTask, ok := t.(*copTask); ok && needPushDown && copTask.getStoreType() == kv.TiDB && len(copTask.rootTaskConds) == 0 { + newTask, changed := p.pushLimitDownToTiDBCop(copTask) + if changed { + return newTask + } + } if copTask, ok := t.(*copTask); ok && needPushDown && p.canPushDownToTiKV(copTask) && len(copTask.rootTaskConds) == 0 { newTask, changed := p.pushPartialTopNDownToCop(copTask) if changed {