From 8ac35f2057d68b9469cdd648530d9d246f0f5d91 Mon Sep 17 00:00:00 2001 From: zyguan Date: Wed, 18 Dec 2024 02:00:57 +0000 Subject: [PATCH 1/2] planner,util/ranger: reduce mem allocs on rebuildRange Signed-off-by: zyguan --- pkg/kv/key.go | 20 ++ pkg/planner/core/plan_cache_rebuild.go | 27 ++- pkg/session/BUILD.bazel | 1 + pkg/session/contextimpl.go | 8 + pkg/util/ranger/BUILD.bazel | 1 + pkg/util/ranger/bench_test.go | 4 +- pkg/util/ranger/detacher.go | 58 +++--- pkg/util/ranger/points.go | 252 ++++++++++++------------- pkg/util/ranger/ranger.go | 174 +++++++++-------- pkg/util/ranger/ranger_test.go | 102 +++++++--- pkg/util/ranger/types.go | 102 +++++++++- 11 files changed, 469 insertions(+), 280 deletions(-) diff --git a/pkg/kv/key.go b/pkg/kv/key.go index 927fa81060096..e2273a5d50a87 100644 --- a/pkg/kv/key.go +++ b/pkg/kv/key.go @@ -67,6 +67,26 @@ func (k Key) PrefixNext() Key { return buf } +// PrefixNextInPlace tries to modifies the key itself to the next prefix key, returns false when lack of space (in which +// case the key is kept unchanged and the caller can append 0 to the key). +func (k Key) PrefixNextInPlace() bool { + var i int + for i = len(k) - 1; i >= 0; i-- { + k[i]++ + if k[i] != 0 { + break + } + } + if i == -1 { + // revert the key + for i := range k { + k[i]-- + } + return false + } + return true +} + // Cmp returns the comparison result of two key. // The result will be 0 if a==b, -1 if a < b, and +1 if a > b. func (k Key) Cmp(another Key) int { diff --git a/pkg/planner/core/plan_cache_rebuild.go b/pkg/planner/core/plan_cache_rebuild.go index 3a274caf106c6..6bbf0268378e8 100644 --- a/pkg/planner/core/plan_cache_rebuild.go +++ b/pkg/planner/core/plan_cache_rebuild.go @@ -182,6 +182,7 @@ func convertConstant2Datum(ctx base.PlanContext, con *expression.Constant, targe } func buildRangeForTableScan(sctx base.PlanContext, ts *PhysicalTableScan) (err error) { + allocator := tryGetRangerAllocator(sctx) if ts.Table.IsCommonHandle { pk := tables.FindPrimaryIndex(ts.Table) pkCols := make([]*expression.Column, 0, len(pk.Columns)) @@ -202,7 +203,7 @@ func buildRangeForTableScan(sctx base.PlanContext, ts *PhysicalTableScan) (err e } } if len(pkCols) > 0 { - res, err := ranger.DetachCondAndBuildRangeForIndex(sctx.GetRangerCtx(), ts.AccessCondition, pkCols, pkColsLen, 0) + res, err := ranger.DetachCondAndBuildRangeForIndexWithAllocator(sctx.GetRangerCtx(), allocator, ts.AccessCondition, pkCols, pkColsLen, 0) if err != nil { return err } @@ -224,7 +225,7 @@ func buildRangeForTableScan(sctx base.PlanContext, ts *PhysicalTableScan) (err e } } if pkCol != nil { - ranges, accessConds, remainingConds, err := ranger.BuildTableRange(ts.AccessCondition, sctx.GetRangerCtx(), pkCol.RetType, 0) + ranges, accessConds, remainingConds, err := ranger.BuildTableRangeWithAllocator(ts.AccessCondition, sctx.GetRangerCtx(), allocator, pkCol.RetType, 0) if err != nil { return err } @@ -254,8 +255,9 @@ func buildRangesForPointGet(sctx base.PlanContext, x *PointGetPlan) (err error) } // if access condition is not nil, which means it's a point get generated by cbo. if x.AccessConditions != nil { + allocator := tryGetRangerAllocator(sctx) if x.IndexInfo != nil { - ranges, err := ranger.DetachCondAndBuildRangeForIndex(x.SCtx().GetRangerCtx(), x.AccessConditions, x.IdxCols, x.IdxColLens, 0) + ranges, err := ranger.DetachCondAndBuildRangeForIndexWithAllocator(x.SCtx().GetRangerCtx(), allocator, x.AccessConditions, x.IdxCols, x.IdxColLens, 0) if err != nil { return err } @@ -277,7 +279,7 @@ func buildRangesForPointGet(sctx base.PlanContext, x *PointGetPlan) (err error) } } if pkCol != nil { - ranges, accessConds, remainingConds, err := ranger.BuildTableRange(x.AccessConditions, x.SCtx().GetRangerCtx(), pkCol.RetType, 0) + ranges, accessConds, remainingConds, err := ranger.BuildTableRangeWithAllocator(x.AccessConditions, x.SCtx().GetRangerCtx(), allocator, pkCol.RetType, 0) if err != nil { return err } @@ -322,8 +324,9 @@ func buildRangesForBatchGet(sctx base.PlanContext, x *BatchPointGetPlan) (err er } // if access condition is not nil, which means it's a point get generated by cbo. if x.AccessConditions != nil { + allocator := tryGetRangerAllocator(sctx) if x.IndexInfo != nil { - ranges, err := ranger.DetachCondAndBuildRangeForIndex(x.ctx.GetRangerCtx(), x.AccessConditions, x.IdxCols, x.IdxColLens, 0) + ranges, err := ranger.DetachCondAndBuildRangeForIndexWithAllocator(x.ctx.GetRangerCtx(), allocator, x.AccessConditions, x.IdxCols, x.IdxColLens, 0) if err != nil { return err } @@ -345,7 +348,7 @@ func buildRangesForBatchGet(sctx base.PlanContext, x *BatchPointGetPlan) (err er } } if pkCol != nil { - ranges, accessConds, remainingConds, err := ranger.BuildTableRange(x.AccessConditions, x.ctx.GetRangerCtx(), pkCol.RetType, 0) + ranges, accessConds, remainingConds, err := ranger.BuildTableRangeWithAllocator(x.AccessConditions, x.ctx.GetRangerCtx(), allocator, pkCol.RetType, 0) if err != nil { return err } @@ -411,7 +414,8 @@ func buildRangeForIndexScan(sctx base.PlanContext, is *PhysicalIndexScan) (err e return errors.New("unexpected range for PhysicalIndexScan") } - res, err := ranger.DetachCondAndBuildRangeForIndex(sctx.GetRangerCtx(), is.AccessCondition, is.IdxCols, is.IdxColLens, 0) + allocator := tryGetRangerAllocator(sctx) + res, err := ranger.DetachCondAndBuildRangeForIndexWithAllocator(sctx.GetRangerCtx(), allocator, is.AccessCondition, is.IdxCols, is.IdxColLens, 0) if err != nil { return err } @@ -444,3 +448,12 @@ func isSafeRange(accessConds []expression.Expression, rebuiltResult *ranger.Deta return true } + +func tryGetRangerAllocator(ctx base.PlanContext) *ranger.Allocator { + if p, ok := ctx.(ranger.AllocatorProvider); ok { + allocator := p.GetRangerAllocator() + allocator.Reset() + return allocator + } + return nil +} diff --git a/pkg/session/BUILD.bazel b/pkg/session/BUILD.bazel index f2da24851b1d8..b00a7f5af3b54 100644 --- a/pkg/session/BUILD.bazel +++ b/pkg/session/BUILD.bazel @@ -109,6 +109,7 @@ go_library( "//pkg/util/logutil/consistency", "//pkg/util/memory", "//pkg/util/parser", + "//pkg/util/ranger", "//pkg/util/ranger/context", "//pkg/util/redact", "//pkg/util/sem", diff --git a/pkg/session/contextimpl.go b/pkg/session/contextimpl.go index 9ddb247e23f80..4ee3f91823283 100644 --- a/pkg/session/contextimpl.go +++ b/pkg/session/contextimpl.go @@ -17,6 +17,7 @@ package session import ( "github.com/pingcap/tidb/pkg/planner/planctx" "github.com/pingcap/tidb/pkg/planner/plannersession" + "github.com/pingcap/tidb/pkg/util/ranger" ) var _ planctx.PlanContext = &planContextImpl{} @@ -27,6 +28,8 @@ var _ planctx.PlanContext = &planContextImpl{} type planContextImpl struct { *session *plannersession.PlanCtxExtended + + allocator ranger.Allocator } // NewPlanContextImpl creates a new PlanContextImpl. @@ -36,3 +39,8 @@ func newPlanContextImpl(s *session) *planContextImpl { PlanCtxExtended: plannersession.NewPlanCtxExtended(s), } } + +// GetRangerAllocator implements the `ranger.AllocatorProvider` interface. +func (ctx *planContextImpl) GetRangerAllocator() *ranger.Allocator { + return &ctx.allocator +} diff --git a/pkg/util/ranger/BUILD.bazel b/pkg/util/ranger/BUILD.bazel index dc4d0a202d6fe..b63e9bd43e940 100644 --- a/pkg/util/ranger/BUILD.bazel +++ b/pkg/util/ranger/BUILD.bazel @@ -32,6 +32,7 @@ go_library( "//pkg/util/dbterror/plannererrors", "//pkg/util/hack", "//pkg/util/ranger/context", + "//pkg/util/zeropool", "@com_github_pingcap_errors//:errors", ], ) diff --git a/pkg/util/ranger/bench_test.go b/pkg/util/ranger/bench_test.go index b07adb7203932..9b865dfb78d6a 100644 --- a/pkg/util/ranger/bench_test.go +++ b/pkg/util/ranger/bench_test.go @@ -130,8 +130,10 @@ WHERE b.ResetTimer() pctx := sctx.GetPlanCtx() + allocator := new(ranger.Allocator) for range b.N { - _, err = ranger.DetachCondAndBuildRangeForIndex(pctx.GetRangerCtx(), conds, cols, lengths, 0) + allocator.Reset() + _, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(pctx.GetRangerCtx(), allocator, conds, cols, lengths, 0) require.NoError(b, err) } b.StopTimer() diff --git a/pkg/util/ranger/detacher.go b/pkg/util/ranger/detacher.go index be18e026af25a..76ceb36c2dfcb 100644 --- a/pkg/util/ranger/detacher.go +++ b/pkg/util/ranger/detacher.go @@ -303,7 +303,7 @@ func mergeTwoCNFRanges(sctx *rangerctx.RangerContext, cond expression.Expression // item ranges. // e.g, for input CNF expressions ((a,b) in ((1,1),(2,2))) and a > 1 and ((a,b,c) in (1,1,1),(2,2,2)) // ((a,b,c) in (1,1,1),(2,2,2)) would be extracted. -func extractBestCNFItemRanges(sctx *rangerctx.RangerContext, conds []expression.Expression, cols []*expression.Column, +func extractBestCNFItemRanges(sctx *rangerctx.RangerContext, allocator *Allocator, conds []expression.Expression, cols []*expression.Column, lengths []int, rangeMaxSize int64, convertToSortKey bool) (*cnfItemRangeResult, []*valueInfo, error) { if len(conds) < 2 { return nil, nil, nil @@ -323,7 +323,7 @@ func extractBestCNFItemRanges(sctx *rangerctx.RangerContext, conds []expression. // We build ranges for `(a,b) in ((1,1),(1,2))` and get `[1 1, 1 1] [1 2, 1 2]`, which are point ranges and we can // append `c = 1` to the point ranges. However, if we choose to merge consecutive ranges here, we get `[1 1, 1 2]`, // which are not point ranges, and we cannot append `c = 1` anymore. - res, err := detachCondAndBuildRange(sctx, tmpConds, cols, lengths, rangeMaxSize, convertToSortKey, false) + res, err := detachCondAndBuildRange(sctx, allocator, tmpConds, cols, lengths, rangeMaxSize, convertToSortKey, false) if err != nil { return nil, nil, err } @@ -389,7 +389,7 @@ func chooseBetweenRangeAndPoint(sctx *rangerctx.RangerContext, r1 *DetachRangeRe // detachCNFCondAndBuildRangeForIndex will detach the index filters from table filters. These conditions are connected with `and` // It will first find the point query column and then extract the range query column. // considerDNF is true means it will try to extract access conditions from the DNF expressions. -func (d *rangeDetacher) detachCNFCondAndBuildRangeForIndex(conditions []expression.Expression, newTpSlice []*types.FieldType, considerDNF bool) (*DetachRangeResult, error) { +func (d *rangeDetacher) detachCNFCondAndBuildRangeForIndex(conditions []expression.Expression, newTpSlice []types.FieldType, considerDNF bool) (*DetachRangeResult, error) { var ( eqCount int ranges Ranges @@ -397,7 +397,7 @@ func (d *rangeDetacher) detachCNFCondAndBuildRangeForIndex(conditions []expressi ) res := &DetachRangeResult{} - accessConds, filterConds, newConditions, columnValues, emptyRange := ExtractEqAndInCondition(d.sctx, conditions, d.cols, d.lengths) + accessConds, filterConds, newConditions, columnValues, emptyRange := extractEqAndInCondition(d.sctx, d.allocator, conditions, d.cols, d.lengths) if emptyRange { return res, nil } @@ -462,7 +462,7 @@ func (d *rangeDetacher) detachCNFCondAndBuildRangeForIndex(conditions []expressi ctx: d.sctx.ExprCtx.GetEvalCtx(), } if considerDNF { - bestCNFItemRes, columnValues, err := extractBestCNFItemRanges(d.sctx, conditions, d.cols, d.lengths, d.rangeMaxSize, d.convertToSortKey) + bestCNFItemRes, columnValues, err := extractBestCNFItemRanges(d.sctx, d.allocator, conditions, d.cols, d.lengths, d.rangeMaxSize, d.convertToSortKey) if err != nil { return nil, err } @@ -503,7 +503,7 @@ func (d *rangeDetacher) detachCNFCondAndBuildRangeForIndex(conditions []expressi if eqOrInCount > 0 { newCols := d.cols[eqOrInCount:] newLengths := d.lengths[eqOrInCount:] - tailRes, err := detachCondAndBuildRange(d.sctx, newConditions, newCols, newLengths, d.rangeMaxSize, d.convertToSortKey, d.mergeConsecutive) + tailRes, err := detachCondAndBuildRange(d.sctx, d.allocator, newConditions, newCols, newLengths, d.rangeMaxSize, d.convertToSortKey, d.mergeConsecutive) if err != nil { return nil, err } @@ -722,15 +722,22 @@ func extractValueInfo(expr expression.Expression) *valueInfo { // columnValues: the constant column values for all index columns. columnValues[i] is nil if cols[i] is not constant. // bool: indicate whether there's nil range when merging eq and in conditions. func ExtractEqAndInCondition(sctx *rangerctx.RangerContext, conditions []expression.Expression, cols []*expression.Column, + lengths []int) ([]expression.Expression, []expression.Expression, []expression.Expression, []*valueInfo, bool) { + return extractEqAndInCondition(sctx, nil, conditions, cols, lengths) +} + +// extractEqAndInCondition like ExtractEqAndInCondition, but it uses the given allocator. +func extractEqAndInCondition(sctx *rangerctx.RangerContext, allocator *Allocator, conditions []expression.Expression, cols []*expression.Column, lengths []int) ([]expression.Expression, []expression.Expression, []expression.Expression, []*valueInfo, bool) { var filters []expression.Expression - rb := builder{sctx: sctx} + rb := builder{sctx: sctx, allocator: allocator} accesses := make([]expression.Expression, len(cols)) points := make([][]*point, len(cols)) mergedAccesses := make([]expression.Expression, len(cols)) newConditions := make([]expression.Expression, 0, len(conditions)) columnValues := make([]*valueInfo, len(cols)) offsets := make([]int, len(conditions)) + var newTpVal types.FieldType for i, cond := range conditions { offset := getPotentialEqOrInColOffset(sctx, cond, cols) offsets[i] = offset @@ -743,16 +750,16 @@ func ExtractEqAndInCondition(sctx *rangerctx.RangerContext, conditions []express } // Multiple Eq/In conditions for one column in CNF, apply intersection on them // Lazily compute the points for the previously visited Eq/In - newTp := newFieldType(cols[offset].GetType(sctx.ExprCtx.GetEvalCtx())) + newTpVal = newFieldTypeValue(cols[offset].GetType(sctx.ExprCtx.GetEvalCtx())) collator := collate.GetCollator(cols[offset].GetType(sctx.ExprCtx.GetEvalCtx()).GetCollate()) if mergedAccesses[offset] == nil { mergedAccesses[offset] = accesses[offset] // Note that this is a relatively special usage of build(). We will restore the points back to Expression for // later use and may build the Expression to points again. // We need to keep the original value here, which means we neither cut prefix nor convert to sort key. - points[offset] = rb.build(accesses[offset], newTp, types.UnspecifiedLength, false) + points[offset] = rb.build(accesses[offset], &newTpVal, types.UnspecifiedLength, false) } - points[offset] = rb.intersection(points[offset], rb.build(cond, newTp, types.UnspecifiedLength, false), collator) + points[offset] = rb.intersection(points[offset], rb.build(cond, &newTpVal, types.UnspecifiedLength, false), collator) if len(points[offset]) == 0 { // Early termination if false expression found if expression.MaybeOverOptimized4PlanCache(sctx.ExprCtx, conditions) { // `a>@x and a<@y` --> `invalid-range if @x>=@y` @@ -835,7 +842,7 @@ func ExtractEqAndInCondition(sctx *rangerctx.RangerContext, conditions []express // We will detach the conditions of every DNF items, then compose them to a DNF. func (d *rangeDetacher) detachDNFCondAndBuildRangeForIndex( condition *expression.ScalarFunction, - newTpSlice []*types.FieldType, + newTpSlice []types.FieldType, ) ( Ranges, []expression.Expression, @@ -850,7 +857,7 @@ func (d *rangeDetacher) detachDNFCondAndBuildRangeForIndex( optPrefixIndexSingleScan: d.sctx.OptPrefixIndexSingleScan, ctx: d.sctx.ExprCtx.GetEvalCtx(), } - rb := builder{sctx: d.sctx} + rb := builder{sctx: d.sctx, allocator: d.allocator} dnfItems := expression.FlattenDNFConditions(condition) newAccessItems := make([]expression.Expression, 0, len(dnfItems)) minAccessConds := -1 @@ -918,13 +925,13 @@ func (d *rangeDetacher) detachDNFCondAndBuildRangeForIndex( if shouldReserve { hasResidual = true } - points := rb.build(item, newTpSlice[0], d.lengths[0], d.convertToSortKey) - tmpNewTp := newTpSlice[0] + points := rb.build(item, &newTpSlice[0], d.lengths[0], d.convertToSortKey) + tmpNewTp := &newTpSlice[0] if d.convertToSortKey { tmpNewTp = convertStringFTToBinaryCollate(tmpNewTp) } // TODO: restrict the mem usage of ranges - ranges, rangeFallback, err := points2Ranges(d.sctx, points, tmpNewTp, d.rangeMaxSize) + ranges, rangeFallback, err := points2Ranges(&rb, points, tmpNewTp, d.rangeMaxSize) if err != nil { return nil, nil, nil, false, -1, errors.Trace(err) } @@ -1020,8 +1027,15 @@ type DetachRangeResult struct { // The returned values are encapsulated into a struct DetachRangeResult, see its comments for explanation. func DetachCondAndBuildRangeForIndex(sctx *rangerctx.RangerContext, conditions []expression.Expression, cols []*expression.Column, lengths []int, rangeMaxSize int64) (*DetachRangeResult, error) { + return DetachCondAndBuildRangeForIndexWithAllocator(sctx, nil, conditions, cols, lengths, rangeMaxSize) +} + +// DetachCondAndBuildRangeForIndexWithAllocator likes DetachCondAndBuildRangeForIndex, but it uses the given allocator. +func DetachCondAndBuildRangeForIndexWithAllocator(sctx *rangerctx.RangerContext, allocator *Allocator, conditions []expression.Expression, + cols []*expression.Column, lengths []int, rangeMaxSize int64) (*DetachRangeResult, error) { d := &rangeDetacher{ sctx: sctx, + allocator: allocator, allConds: conditions, cols: cols, lengths: lengths, @@ -1033,10 +1047,11 @@ func DetachCondAndBuildRangeForIndex(sctx *rangerctx.RangerContext, conditions [ } // detachCondAndBuildRange detaches the index filters from table filters and uses them to build ranges. -func detachCondAndBuildRange(sctx *rangerctx.RangerContext, conditions []expression.Expression, cols []*expression.Column, +func detachCondAndBuildRange(sctx *rangerctx.RangerContext, allocator *Allocator, conditions []expression.Expression, cols []*expression.Column, lengths []int, rangeMaxSize int64, convertToSortKey bool, mergeConsecutive bool) (*DetachRangeResult, error) { d := &rangeDetacher{ sctx: sctx, + allocator: allocator, allConds: conditions, cols: cols, lengths: lengths, @@ -1053,11 +1068,12 @@ func detachCondAndBuildRange(sctx *rangerctx.RangerContext, conditions []express // The returned values are encapsulated into a struct DetachRangeResult, see its comments for explanation. func DetachCondAndBuildRangeForPartition(sctx *rangerctx.RangerContext, conditions []expression.Expression, cols []*expression.Column, lengths []int, rangeMaxSize int64) (*DetachRangeResult, error) { - return detachCondAndBuildRange(sctx, conditions, cols, lengths, rangeMaxSize, false, false) + return detachCondAndBuildRange(sctx, nil, conditions, cols, lengths, rangeMaxSize, false, false) } type rangeDetacher struct { sctx *rangerctx.RangerContext + allocator *Allocator allConds []expression.Expression cols []*expression.Column lengths []int @@ -1068,9 +1084,9 @@ type rangeDetacher struct { func (d *rangeDetacher) detachCondAndBuildRangeForCols() (*DetachRangeResult, error) { res := &DetachRangeResult{} - newTpSlice := make([]*types.FieldType, 0, len(d.cols)) + newTpSlice := make([]types.FieldType, 0, len(d.cols)) for _, col := range d.cols { - newTpSlice = append(newTpSlice, newFieldType(col.RetType)) + newTpSlice = append(newTpSlice, newFieldTypeValue(col.RetType)) } if len(d.allConds) == 1 { if sf, ok := d.allConds[0].(*expression.ScalarFunction); ok && sf.FuncName.L == ast.LogicOr { @@ -1103,9 +1119,9 @@ func (d *rangeDetacher) detachCondAndBuildRangeForCols() (*DetachRangeResult, er // for building ranges, set rangeMemQuota to 0 to avoid range fallback. func DetachSimpleCondAndBuildRangeForIndex(sctx *rangerctx.RangerContext, conditions []expression.Expression, cols []*expression.Column, lengths []int, rangeMaxSize int64) (Ranges, []expression.Expression, error) { - newTpSlice := make([]*types.FieldType, 0, len(cols)) + newTpSlice := make([]types.FieldType, 0, len(cols)) for _, col := range cols { - newTpSlice = append(newTpSlice, newFieldType(col.RetType)) + newTpSlice = append(newTpSlice, newFieldTypeValue(col.RetType)) } d := &rangeDetacher{ sctx: sctx, diff --git a/pkg/util/ranger/points.go b/pkg/util/ranger/points.go index d664d15636c8a..5875284516afe 100644 --- a/pkg/util/ranger/points.go +++ b/pkg/util/ranger/points.go @@ -71,14 +71,6 @@ func (rp *point) String() string { return fmt.Sprintf("%v%s", val, symbol) } -func (rp *point) Clone(value types.Datum) *point { - return &point{ - value: value, - excl: rp.excl, - start: rp.start, - } -} - type pointSorter struct { err error collator collate.Collator @@ -130,7 +122,7 @@ func rangePointEqualValueLess(a, b *point) bool { return a.excl && !b.excl } -func pointsConvertToSortKey(sctx *rangerctx.RangerContext, inputPs []*point, newTp *types.FieldType) ([]*point, error) { +func pointsConvertToSortKey(r *builder, inputPs []*point, newTp *types.FieldType) ([]*point, error) { // Only handle normal string type here. // Currently, set won't be pushed down and it shouldn't reach here in theory. // For enum, we have separate logic for it, like handleEnumFromBinOp(). For now, it only supports point range, @@ -142,7 +134,7 @@ func pointsConvertToSortKey(sctx *rangerctx.RangerContext, inputPs []*point, new } ps := make([]*point, 0, len(inputPs)) for _, p := range inputPs { - np, err := pointConvertToSortKey(sctx, p, newTp, true) + np, err := pointConvertToSortKey(r, p, newTp, true) if err != nil { return nil, err } @@ -152,12 +144,12 @@ func pointsConvertToSortKey(sctx *rangerctx.RangerContext, inputPs []*point, new } func pointConvertToSortKey( - sctx *rangerctx.RangerContext, + r *builder, inputP *point, newTp *types.FieldType, trimTrailingSpace bool, ) (*point, error) { - p, err := convertPoint(sctx, inputP, newTp) + p, err := convertPoint(r, inputP, newTp) if err != nil { return nil, err } @@ -171,7 +163,7 @@ func pointConvertToSortKey( sortKey = collate.GetCollator(newTp.GetCollate()).Key(string(hack.String(sortKey))) } - return &point{value: types.NewBytesDatum(sortKey), excl: p.excl, start: p.start}, nil + return r.alloc(types.NewBytesDatum(sortKey), p.excl, p.start), nil } func (r *pointSorter) Swap(i, j int) { @@ -182,18 +174,18 @@ func (r *pointSorter) Swap(i, j int) { * If use []point, fullRange will be copied when used. * So for keep this behaver, getFullRange function is introduced. */ -func getFullRange() []*point { - return []*point{ - {start: true}, - {value: types.MaxValueDatum()}, - } +func getFullRange(allocator *Allocator) []*point { + return allocator.points2( + allocator.newPoint(types.Datum{}, false, true), + allocator.newPoint(types.MaxValueDatum(), false, false), + ) } -func getNotNullFullRange() []*point { - return []*point{ - {value: types.MinNotNullDatum(), start: true}, - {value: types.MaxValueDatum()}, - } +func getNotNullFullRange(allocator *Allocator) []*point { + return allocator.points2( + allocator.newPoint(types.MinNotNullDatum(), false, true), + allocator.newPoint(types.MaxValueDatum(), false, false), + ) } // FullIntRange is used for table range. Since table range cannot accept MaxValueDatum as the max value. @@ -242,8 +234,13 @@ func NullRange() Ranges { // builder is the range builder struct. type builder struct { - err error - sctx *rangerctx.RangerContext + err error + sctx *rangerctx.RangerContext + allocator *Allocator +} + +func (r *builder) alloc(value types.Datum, excl bool, start bool) *point { + return r.allocator.newPoint(value, excl, start) } // build converts Expression on one column into point, which can be further built into Range. @@ -268,7 +265,7 @@ func (r *builder) build( return r.buildFromConstant(x) } - return getFullRange() + return getFullRange(r.allocator) } func (r *builder) buildFromConstant(expr *expression.Constant) []*point { @@ -291,18 +288,18 @@ func (r *builder) buildFromConstant(expr *expression.Constant) []*point { if val == 0 { return nil } - return getFullRange() + return getFullRange(r.allocator) } -func (*builder) buildFromColumn() []*point { +func (r *builder) buildFromColumn() []*point { // column name expression is equivalent to column name is true. - startPoint1 := &point{value: types.MinNotNullDatum(), start: true} - endPoint1 := &point{excl: true} + startPoint1 := r.alloc(types.MinNotNullDatum(), false, true) + endPoint1 := r.alloc(types.Datum{}, true, false) endPoint1.value.SetInt64(0) - startPoint2 := &point{excl: true, start: true} + startPoint2 := r.alloc(types.Datum{}, true, true) startPoint2.value.SetInt64(0) - endPoint2 := &point{value: types.MaxValueDatum()} - return []*point{startPoint1, endPoint1, startPoint2, endPoint2} + endPoint2 := r.alloc(types.MaxValueDatum(), false, false) + return r.allocator.points4(startPoint1, endPoint1, startPoint2, endPoint2) } func (r *builder) buildFromBinOp( @@ -403,7 +400,7 @@ func (r *builder) buildFromBinOp( if err != nil { if op == ast.NE { // col != an impossible value (not valid year) - return getNotNullFullRange() + return getNotNullFullRange(r.allocator) } // col = an impossible value (not valid year) return nil @@ -420,50 +417,50 @@ func (r *builder) buildFromBinOp( } if ft.GetType() == mysql.TypeEnum && ft.EvalType() == types.ETString { - return handleEnumFromBinOp(tc, ft, value, op) + return handleEnumFromBinOp(r.allocator, tc, ft, value, op) } var res []*point switch op { case ast.NullEQ: if value.IsNull() { - res = []*point{{start: true}, {}} // [null, null] + res = r.allocator.points2(r.alloc(types.Datum{}, false, true), r.alloc(types.Datum{}, false, false)) // [null, null] break } fallthrough case ast.EQ: - startPoint := &point{value: value, start: true} - endPoint := &point{value: value} - res = []*point{startPoint, endPoint} + startPoint := r.alloc(value, false, true) + endPoint := r.alloc(value, false, false) + res = r.allocator.points2(startPoint, endPoint) case ast.NE: - startPoint1 := &point{value: types.MinNotNullDatum(), start: true} - endPoint1 := &point{value: value, excl: true} - startPoint2 := &point{value: value, start: true, excl: true} - endPoint2 := &point{value: types.MaxValueDatum()} - res = []*point{startPoint1, endPoint1, startPoint2, endPoint2} + startPoint1 := r.alloc(types.MinNotNullDatum(), false, true) + endPoint1 := r.alloc(value, true, false) + startPoint2 := r.alloc(value, true, true) + endPoint2 := r.alloc(types.MaxValueDatum(), false, false) + res = r.allocator.points4(startPoint1, endPoint1, startPoint2, endPoint2) case ast.LT: - startPoint := &point{value: types.MinNotNullDatum(), start: true} - endPoint := &point{value: value, excl: true} - res = []*point{startPoint, endPoint} + startPoint := r.alloc(types.MinNotNullDatum(), false, true) + endPoint := r.alloc(value, true, false) + res = r.allocator.points2(startPoint, endPoint) case ast.LE: - startPoint := &point{value: types.MinNotNullDatum(), start: true} - endPoint := &point{value: value} - res = []*point{startPoint, endPoint} + startPoint := r.alloc(types.MinNotNullDatum(), false, true) + endPoint := r.alloc(value, false, false) + res = r.allocator.points2(startPoint, endPoint) case ast.GT: - startPoint := &point{value: value, start: true, excl: true} - endPoint := &point{value: types.MaxValueDatum()} - res = []*point{startPoint, endPoint} + startPoint := r.alloc(value, true, true) + endPoint := r.alloc(types.MaxValueDatum(), false, false) + res = r.allocator.points2(startPoint, endPoint) case ast.GE: - startPoint := &point{value: value, start: true} - endPoint := &point{value: types.MaxValueDatum()} - res = []*point{startPoint, endPoint} + startPoint := r.alloc(value, false, true) + endPoint := r.alloc(types.MaxValueDatum(), false, false) + res = r.allocator.points2(startPoint, endPoint) } cutPrefixForPoints(res, prefixLen, ft) if convertToSortKey { - res, err = pointsConvertToSortKey(r.sctx, res, newTp) + res, err = pointsConvertToSortKey(r, res, newTp) if err != nil { r.err = err - return getFullRange() + return getFullRange(r.allocator) } } return res @@ -547,15 +544,14 @@ func handleBoundCol(ft *types.FieldType, val types.Datum, op string) (types.Datu return val, op, true } -func handleEnumFromBinOp(tc types.Context, ft *types.FieldType, val types.Datum, op string) []*point { +func handleEnumFromBinOp(allocator *Allocator, tc types.Context, ft *types.FieldType, val types.Datum, op string) []*point { res := make([]*point, 0, len(ft.GetElems())*2) appendPointFunc := func(d types.Datum) { - res = append(res, &point{value: d, excl: false, start: true}) - res = append(res, &point{value: d, excl: false, start: false}) + res = append(res, allocator.newPoint(d, false, true), allocator.newPoint(d, false, false)) } if op == ast.NullEQ && val.IsNull() { - res = append(res, &point{start: true}, &point{}) // null point + res = append(res, allocator.newPoint(types.Datum{}, false, true), allocator.newPoint(types.Datum{}, false, false)) } tmpEnum := types.Enum{} @@ -600,52 +596,52 @@ func handleEnumFromBinOp(tc types.Context, ft *types.FieldType, val types.Datum, return res } -func (*builder) buildFromIsTrue(_ *expression.ScalarFunction, isNot int, keepNull bool) []*point { +func (r *builder) buildFromIsTrue(_ *expression.ScalarFunction, isNot int, keepNull bool) []*point { if isNot == 1 { if keepNull { // Range is {[0, 0]} - startPoint := &point{start: true} + startPoint := r.alloc(types.Datum{}, false, true) startPoint.value.SetInt64(0) - endPoint := &point{} + endPoint := r.alloc(types.Datum{}, false, false) endPoint.value.SetInt64(0) - return []*point{startPoint, endPoint} + return r.allocator.points2(startPoint, endPoint) } // NOT TRUE range is {[null null] [0, 0]} - startPoint1 := &point{start: true} - endPoint1 := &point{} - startPoint2 := &point{start: true} + startPoint1 := r.alloc(types.Datum{}, false, true) + endPoint1 := r.alloc(types.Datum{}, false, false) + startPoint2 := r.alloc(types.Datum{}, false, true) startPoint2.value.SetInt64(0) - endPoint2 := &point{} + endPoint2 := r.alloc(types.Datum{}, false, false) endPoint2.value.SetInt64(0) - return []*point{startPoint1, endPoint1, startPoint2, endPoint2} + return r.allocator.points4(startPoint1, endPoint1, startPoint2, endPoint2) } // TRUE range is {[-inf 0) (0 +inf]} - startPoint1 := &point{value: types.MinNotNullDatum(), start: true} - endPoint1 := &point{excl: true} + startPoint1 := r.alloc(types.MinNotNullDatum(), false, true) + endPoint1 := r.alloc(types.Datum{}, true, false) endPoint1.value.SetInt64(0) - startPoint2 := &point{excl: true, start: true} + startPoint2 := r.alloc(types.Datum{}, true, true) startPoint2.value.SetInt64(0) - endPoint2 := &point{value: types.MaxValueDatum()} - return []*point{startPoint1, endPoint1, startPoint2, endPoint2} + endPoint2 := r.alloc(types.MaxValueDatum(), false, false) + return r.allocator.points4(startPoint1, endPoint1, startPoint2, endPoint2) } -func (*builder) buildFromIsFalse(_ *expression.ScalarFunction, isNot int) []*point { +func (r *builder) buildFromIsFalse(_ *expression.ScalarFunction, isNot int) []*point { if isNot == 1 { // NOT FALSE range is {[-inf, 0), (0, +inf], [null, null]} - startPoint1 := &point{start: true} - endPoint1 := &point{excl: true} + startPoint1 := r.alloc(types.Datum{}, false, true) + endPoint1 := r.alloc(types.Datum{}, true, false) endPoint1.value.SetInt64(0) - startPoint2 := &point{start: true, excl: true} + startPoint2 := r.alloc(types.Datum{}, true, true) startPoint2.value.SetInt64(0) - endPoint2 := &point{value: types.MaxValueDatum()} - return []*point{startPoint1, endPoint1, startPoint2, endPoint2} + endPoint2 := r.alloc(types.MaxValueDatum(), false, false) + return r.allocator.points4(startPoint1, endPoint1, startPoint2, endPoint2) } // FALSE range is {[0, 0]} - startPoint := &point{start: true} + startPoint := r.alloc(types.Datum{}, false, true) startPoint.value.SetInt64(0) - endPoint := &point{} + endPoint := r.alloc(types.Datum{}, false, false) endPoint.value.SetInt64(0) - return []*point{startPoint, endPoint} + return r.allocator.points2(startPoint, endPoint) } func (r *builder) buildFromIn( @@ -665,12 +661,12 @@ func (r *builder) buildFromIn( v, ok := e.(*expression.Constant) if !ok { r.err = plannererrors.ErrUnsupportedType.GenWithStack("expr:%v is not constant", e.StringWithCtx(evalCtx, errors.RedactLogDisable)) - return getFullRange(), hasNull + return getFullRange(r.allocator), hasNull } dt, err := v.Eval(evalCtx, chunk.Row{}) if err != nil { r.err = plannererrors.ErrUnsupportedType.GenWithStack("expr:%v is not evaluated", e.StringWithCtx(evalCtx, errors.RedactLogDisable)) - return getFullRange(), hasNull + return getFullRange(r.allocator), hasNull } if dt.IsNull() { hasNull = true @@ -709,8 +705,8 @@ func (r *builder) buildFromIn( var startValue, endValue types.Datum dt.Copy(&startValue) dt.Copy(&endValue) - startPoint := &point{value: startValue, start: true} - endPoint := &point{value: endValue} + startPoint := r.alloc(startValue, false, true) + endPoint := r.alloc(endValue, false, false) rangePoints = append(rangePoints, startPoint, endPoint) } sorter := pointSorter{points: rangePoints, tc: tc, collator: collate.GetCollator(colCollate)} @@ -736,10 +732,10 @@ func (r *builder) buildFromIn( cutPrefixForPoints(rangePoints, prefixLen, ft) var err error if convertToSortKey { - rangePoints, err = pointsConvertToSortKey(r.sctx, rangePoints, newTp) + rangePoints, err = pointsConvertToSortKey(r, rangePoints, newTp) if err != nil { r.err = err - return getFullRange(), false + return getFullRange(r.allocator), false } } return rangePoints, hasNull @@ -753,29 +749,29 @@ func (r *builder) newBuildFromPatternLike( ) []*point { _, collation := expr.CharsetAndCollation() if !collate.CompatibleCollate(expr.GetArgs()[0].GetType(r.sctx.ExprCtx.GetEvalCtx()).GetCollate(), collation) { - return getFullRange() + return getFullRange(r.allocator) } pdt, err := expr.GetArgs()[1].(*expression.Constant).Eval(r.sctx.ExprCtx.GetEvalCtx(), chunk.Row{}) tpOfPattern := expr.GetArgs()[0].GetType(r.sctx.ExprCtx.GetEvalCtx()) if err != nil { r.err = errors.Trace(err) - return getFullRange() + return getFullRange(r.allocator) } pattern, err := pdt.ToString() if err != nil { r.err = errors.Trace(err) - return getFullRange() + return getFullRange(r.allocator) } // non-exceptional return case 1: empty pattern if pattern == "" { - startPoint := &point{value: types.NewStringDatum(""), start: true} - endPoint := &point{value: types.NewStringDatum("")} - res := []*point{startPoint, endPoint} + startPoint := r.alloc(types.NewStringDatum(""), false, true) + endPoint := r.alloc(types.NewStringDatum(""), false, false) + res := r.allocator.points2(startPoint, endPoint) if convertToSortKey { - res, err = pointsConvertToSortKey(r.sctx, res, newTp) + res, err = pointsConvertToSortKey(r, res, newTp) if err != nil { r.err = err - return getFullRange() + return getFullRange(r.allocator) } } return res @@ -784,7 +780,7 @@ func (r *builder) newBuildFromPatternLike( edt, err := expr.GetArgs()[2].(*expression.Constant).Eval(r.sctx.ExprCtx.GetEvalCtx(), chunk.Row{}) if err != nil { r.err = errors.Trace(err) - return getFullRange() + return getFullRange(r.allocator) } escape := byte(edt.GetInt64()) var exclude bool @@ -821,20 +817,20 @@ func (r *builder) newBuildFromPatternLike( } // non-exceptional return case 2: no characters before the wildcard if len(lowValue) == 0 { - return []*point{{value: types.MinNotNullDatum(), start: true}, {value: types.MaxValueDatum()}} + return r.allocator.points2(r.alloc(types.MinNotNullDatum(), false, true), r.alloc(types.MaxValueDatum(), false, false)) } // non-exceptional return case 3: pattern contains valid characters and doesn't contain the wildcard if isExactMatch { val := types.NewCollationStringDatum(string(lowValue), tpOfPattern.GetCollate()) - startPoint := &point{value: val, start: true} - endPoint := &point{value: val} - res := []*point{startPoint, endPoint} + startPoint := r.alloc(val, false, true) + endPoint := r.alloc(val, false, false) + res := r.allocator.points2(startPoint, endPoint) cutPrefixForPoints(res, prefixLen, tpOfPattern) if convertToSortKey { - res, err = pointsConvertToSortKey(r.sctx, res, newTp) + res, err = pointsConvertToSortKey(r, res, newTp) if err != nil { r.err = err - return getFullRange() + return getFullRange(r.allocator) } } return res @@ -847,12 +843,12 @@ func (r *builder) newBuildFromPatternLike( // a range for the wildcard. if !convertToSortKey && !collate.IsBinCollation(tpOfPattern.GetCollate()) { - return []*point{{value: types.MinNotNullDatum(), start: true}, {value: types.MaxValueDatum()}} + return r.allocator.points2(r.alloc(types.MinNotNullDatum(), false, true), r.alloc(types.MaxValueDatum(), false, false)) } // non-exceptional return case 4-2: build a range for the wildcard // the end_key is sortKey(start_value) + 1 - originalStartPoint := &point{start: true, excl: exclude} + originalStartPoint := r.alloc(types.Datum{}, exclude, true) originalStartPoint.value.SetBytesAsString(lowValue, tpOfPattern.GetCollate(), uint32(tpOfPattern.GetFlen())) cutPrefixForPoints([]*point{originalStartPoint}, prefixLen, tpOfPattern) @@ -863,18 +859,18 @@ func (r *builder) newBuildFromPatternLike( // column, the start key should be 'abd' instead of 'abc ', but the end key can be 'abc!'. ( ' ' is 32 and '!' is 33 // in ASCII) shouldTrimTrailingSpace := collate.IsPadSpaceCollation(collation) - startPoint, err := pointConvertToSortKey(r.sctx, originalStartPoint, newTp, shouldTrimTrailingSpace) + startPoint, err := pointConvertToSortKey(r, originalStartPoint, newTp, shouldTrimTrailingSpace) if err != nil { r.err = errors.Trace(err) - return getFullRange() + return getFullRange(r.allocator) } - sortKeyPointWithoutTrim, err := pointConvertToSortKey(r.sctx, originalStartPoint, newTp, false) + sortKeyPointWithoutTrim, err := pointConvertToSortKey(r, originalStartPoint, newTp, false) if err != nil { r.err = errors.Trace(err) - return getFullRange() + return getFullRange(r.allocator) } sortKeyWithoutTrim := append([]byte{}, sortKeyPointWithoutTrim.value.GetBytes()...) - endPoint := &point{value: types.MaxValueDatum(), excl: true} + endPoint := r.alloc(types.MaxValueDatum(), true, false) for i := len(sortKeyWithoutTrim) - 1; i >= 0; i-- { // Make the end point value more than the start point value, // and the length of the end point value is the same as the length of the start point value. @@ -889,7 +885,7 @@ func (r *builder) newBuildFromPatternLike( endPoint.value = types.MaxValueDatum() } } - return []*point{startPoint, endPoint} + return r.allocator.points2(startPoint, endPoint) } func (r *builder) buildFromNot( @@ -936,36 +932,36 @@ func (r *builder) buildFromNot( retRangePoints := make([]*point, 0, 2+len(rangePoints)) previousValue := types.Datum{} for i := 0; i < len(rangePoints); i += 2 { - retRangePoints = append(retRangePoints, &point{value: previousValue, start: true, excl: true}) - retRangePoints = append(retRangePoints, &point{value: rangePoints[i].value, excl: true}) + retRangePoints = append(retRangePoints, r.alloc(previousValue, true, true)) + retRangePoints = append(retRangePoints, r.alloc(rangePoints[i].value, true, false)) previousValue = rangePoints[i].value } // Append the interval (last element, max value]. - retRangePoints = append(retRangePoints, &point{value: previousValue, start: true, excl: true}) - retRangePoints = append(retRangePoints, &point{value: types.MaxValueDatum()}) + retRangePoints = append(retRangePoints, r.alloc(previousValue, true, true)) + retRangePoints = append(retRangePoints, r.alloc(types.MaxValueDatum(), false, false)) cutPrefixForPoints(retRangePoints, prefixLen, expr.GetArgs()[0].GetType(r.sctx.ExprCtx.GetEvalCtx())) if convertToSortKey { var err error - retRangePoints, err = pointsConvertToSortKey(r.sctx, retRangePoints, newTp) + retRangePoints, err = pointsConvertToSortKey(r, retRangePoints, newTp) if err != nil { r.err = err - return getFullRange() + return getFullRange(r.allocator) } } return retRangePoints case ast.Like: // Pattern not like is not supported. r.err = plannererrors.ErrUnsupportedType.GenWithStack("NOT LIKE is not supported.") - return getFullRange() + return getFullRange(r.allocator) case ast.IsNull: - startPoint := &point{value: types.MinNotNullDatum(), start: true} - endPoint := &point{value: types.MaxValueDatum()} - return []*point{startPoint, endPoint} + startPoint := r.alloc(types.MinNotNullDatum(), false, true) + endPoint := r.alloc(types.MaxValueDatum(), false, false) + return r.allocator.points2(startPoint, endPoint) } // TODO: currently we don't handle ast.LogicAnd, ast.LogicOr, ast.GT, ast.LT and so on. Most of those cases are eliminated // by PushDownNot but they may happen. For now, we return full range for those unhandled cases in order to keep correctness. // Later we need to cover those cases and set r.err when meeting some unexpected case. - return getFullRange() + return getFullRange(r.allocator) } func (r *builder) buildFromScalarFunc( @@ -1001,9 +997,9 @@ func (r *builder) buildFromScalarFunc( case ast.Like: return r.newBuildFromPatternLike(expr, newTp, prefixLen, convertToSortKey) case ast.IsNull: - startPoint := &point{start: true} - endPoint := &point{} - return []*point{startPoint, endPoint} + startPoint := r.alloc(types.Datum{}, false, true) + endPoint := r.alloc(types.Datum{}, false, false) + return r.allocator.points2(startPoint, endPoint) case ast.UnaryNot: return r.buildFromNot(expr.GetArgs()[0].(*expression.ScalarFunction), newTp, prefixLen, convertToSortKey) } diff --git a/pkg/util/ranger/ranger.go b/pkg/util/ranger/ranger.go index e18d3e0c4dd15..5ccb9c4c2eb02 100644 --- a/pkg/util/ranger/ranger.go +++ b/pkg/util/ranger/ranger.go @@ -38,31 +38,40 @@ import ( "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/collate" rangerctx "github.com/pingcap/tidb/pkg/util/ranger/context" + "github.com/pingcap/tidb/pkg/util/zeropool" ) -func validInterval(ec errctx.Context, loc *time.Location, low, high *point) (bool, error) { - l, err := codec.EncodeKey(loc, nil, low.value) +var keybufPool = zeropool.New(func() []byte { return make([]byte, 256) }) + +func validInterval(buf []byte, ec errctx.Context, loc *time.Location, low, high *point) ([]byte, bool, error) { + var err error + buf, err = codec.EncodeKey(loc, buf, low.value) err = ec.HandleError(err) if err != nil { - return false, errors.Trace(err) + return buf, false, errors.Trace(err) } if low.excl { - l = kv.Key(l).PrefixNext() + if !kv.Key(buf).PrefixNextInPlace() { + buf = append(buf, 0) + } } - r, err := codec.EncodeKey(loc, nil, high.value) + pos := len(buf) + buf, err = codec.EncodeKey(loc, buf, high.value) err = ec.HandleError(err) if err != nil { - return false, errors.Trace(err) + return buf, false, errors.Trace(err) } if !high.excl { - r = kv.Key(r).PrefixNext() + if !kv.Key(buf[pos:]).PrefixNextInPlace() { + buf = append(buf, 0) + } } - return bytes.Compare(l, r) < 0, nil + return buf, bytes.Compare(buf[:pos], buf[pos:]) < 0, nil } // convertPoints does some preprocessing on rangePoints to make them ready to build ranges. Preprocessing includes converting // points to the specified type, validating intervals and skipping impossible intervals. -func convertPoints(sctx *rangerctx.RangerContext, rangePoints []*point, newTp *types.FieldType, skipNull bool, tableRange bool) ([]*point, error) { +func convertPoints(r *builder, rangePoints []*point, newTp *types.FieldType, skipNull bool, tableRange bool) ([]*point, error) { i := 0 numPoints := len(rangePoints) var minValueDatum, maxValueDatum types.Datum @@ -77,8 +86,10 @@ func convertPoints(sctx *rangerctx.RangerContext, rangePoints []*point, newTp *t maxValueDatum.SetInt64(math.MaxInt64) } } + buf := keybufPool.Get() + less := false for j := 0; j < numPoints; j += 2 { - startPoint, err := convertPoint(sctx, rangePoints[j], newTp) + startPoint, err := convertPoint(r, rangePoints[j], newTp) if err != nil { return nil, errors.Trace(err) } @@ -90,8 +101,9 @@ func convertPoints(sctx *rangerctx.RangerContext, rangePoints []*point, newTp *t startPoint.value = minValueDatum } } - endPoint, err := convertPoint(sctx, rangePoints[j+1], newTp) + endPoint, err := convertPoint(r, rangePoints[j+1], newTp) if err != nil { + keybufPool.Put(buf) return nil, errors.Trace(err) } if tableRange { @@ -102,8 +114,9 @@ func convertPoints(sctx *rangerctx.RangerContext, rangePoints []*point, newTp *t if skipNull && endPoint.value.Kind() == types.KindNull { continue } - less, err := validInterval(sctx.ErrCtx, sctx.TypeCtx.Location(), startPoint, endPoint) + buf, less, err = validInterval(buf[:0], r.sctx.ErrCtx, r.sctx.TypeCtx.Location(), startPoint, endPoint) if err != nil { + keybufPool.Put(buf) return nil, errors.Trace(err) } if !less { @@ -113,6 +126,7 @@ func convertPoints(sctx *rangerctx.RangerContext, rangePoints []*point, newTp *t rangePoints[i+1] = endPoint i += 2 } + keybufPool.Put(buf) return rangePoints[:i], nil } @@ -126,8 +140,8 @@ func estimateMemUsageForPoints2Ranges(rangePoints []*point) int64 { // Only one column is built there. If there're multiple columns, use appendPoints2Ranges. // rangeMaxSize is the max memory limit for ranges. O indicates no memory limit. // If the second return value is true, it means that the estimated memory usage of ranges exceeds rangeMaxSize and it falls back to full range. -func points2Ranges(sctx *rangerctx.RangerContext, rangePoints []*point, newTp *types.FieldType, rangeMaxSize int64) (Ranges, bool, error) { - convertedPoints, err := convertPoints(sctx, rangePoints, newTp, mysql.HasNotNullFlag(newTp.GetFlag()), false) +func points2Ranges(r *builder, rangePoints []*point, newTp *types.FieldType, rangeMaxSize int64) (Ranges, bool, error) { + convertedPoints, err := convertPoints(r, rangePoints, newTp, mysql.HasNotNullFlag(newTp.GetFlag()), false) if err != nil { return nil, false, errors.Trace(err) } @@ -141,22 +155,11 @@ func points2Ranges(sctx *rangerctx.RangerContext, rangePoints []*point, newTp *t } return fullRange, true, nil } - ranges := make(Ranges, 0, len(convertedPoints)/2) - for i := 0; i < len(convertedPoints); i += 2 { - startPoint, endPoint := convertedPoints[i], convertedPoints[i+1] - ran := &Range{ - LowVal: []types.Datum{startPoint.value}, - LowExclude: startPoint.excl, - HighVal: []types.Datum{endPoint.value}, - HighExclude: endPoint.excl, - Collators: []collate.Collator{collate.GetCollator(newTp.GetCollate())}, - } - ranges = append(ranges, ran) - } - return ranges, false, nil + return r.allocator.rangesFromPoints(convertedPoints, collate.GetCollator(newTp.GetCollate())), false, nil } -func convertPoint(sctx *rangerctx.RangerContext, point *point, newTp *types.FieldType) (*point, error) { +func convertPoint(r *builder, point *point, newTp *types.FieldType) (*point, error) { + sctx := r.sctx switch point.value.Kind() { case types.KindMaxValue, types.KindMinNotNull: return point, nil @@ -204,7 +207,7 @@ func convertPoint(sctx *rangerctx.RangerContext, point *point, newTp *types.Fiel if err != nil { return point, errors.Trace(err) } - npoint := point.Clone(casted) + npoint := r.allocator.newPoint(casted, point.excl, point.start) if valCmpCasted == 0 { return npoint, nil } @@ -273,9 +276,9 @@ func estimateMemUsageForAppendPoints2Ranges(origin Ranges, rangePoints []*point) // rangeMaxSize is the max memory limit for ranges. O indicates no memory limit. // If the second return value is true, it means that the estimated memory usage of ranges after appending points exceeds // rangeMaxSize and the function rejects appending points to ranges. -func appendPoints2Ranges(sctx *rangerctx.RangerContext, origin Ranges, rangePoints []*point, +func appendPoints2Ranges(r *builder, origin Ranges, rangePoints []*point, newTp *types.FieldType, rangeMaxSize int64) (Ranges, bool, error) { - convertedPoints, err := convertPoints(sctx, rangePoints, newTp, false, false) + convertedPoints, err := convertPoints(r, rangePoints, newTp, false, false) if err != nil { return nil, false, errors.Trace(err) } @@ -283,24 +286,31 @@ func appendPoints2Ranges(sctx *rangerctx.RangerContext, origin Ranges, rangePoin if rangeMaxSize > 0 && estimateMemUsageForAppendPoints2Ranges(origin, convertedPoints) > rangeMaxSize { return origin, true, nil } - var newIndexRanges Ranges + size := 0 + for _, ran := range origin { + if ran.IsPoint(r.sctx) { + size += len(convertedPoints) / 2 + } else { + size++ + } + } + newIndexRanges := make(Ranges, 0, size) for i := range origin { oRange := origin[i] - if !oRange.IsPoint(sctx) { + if !oRange.IsPoint(r.sctx) { newIndexRanges = append(newIndexRanges, oRange) } else { - newRanges, err := appendPoints2IndexRange(oRange, convertedPoints, newTp) + newIndexRanges, err = appendPoints2IndexRange(newIndexRanges, oRange, convertedPoints, newTp) if err != nil { return nil, false, errors.Trace(err) } - newIndexRanges = append(newIndexRanges, newRanges...) } } return newIndexRanges, false, nil } -func appendPoints2IndexRange(origin *Range, rangePoints []*point, ft *types.FieldType) (Ranges, error) { - newRanges := make(Ranges, 0, len(rangePoints)/2) +func appendPoints2IndexRange(dst Ranges, origin *Range, rangePoints []*point, ft *types.FieldType) (Ranges, error) { + collator := collate.GetCollator(ft.GetCollate()) for i := 0; i < len(rangePoints); i += 2 { startPoint, endPoint := rangePoints[i], rangePoints[i+1] @@ -314,7 +324,7 @@ func appendPoints2IndexRange(origin *Range, rangePoints []*point, ft *types.Fiel collators := make([]collate.Collator, len(origin.Collators)+1) copy(collators, origin.Collators) - collators[len(origin.Collators)] = collate.GetCollator(ft.GetCollate()) + collators[len(origin.Collators)] = collator ir := &Range{ LowVal: lowVal, @@ -323,9 +333,9 @@ func appendPoints2IndexRange(origin *Range, rangePoints []*point, ft *types.Fiel HighExclude: endPoint.excl, Collators: collators, } - newRanges = append(newRanges, ir) + dst = append(dst, ir) } - return newRanges, nil + return dst, nil } // estimateMemUsageForAppendRanges2PointRanges estimates the memory usage of results of appending ranges to point ranges. @@ -386,37 +396,26 @@ func AppendRanges2PointRanges(pointRanges Ranges, ranges Ranges, rangeMaxSize in // It will remove the nil and convert MinNotNull and MaxValue to MinInt64 or MinUint64 and MaxInt64 or MaxUint64. // rangeMaxSize is the max memory limit for ranges. O indicates no memory limit. // If the second return value is true, it means that the estimated memory usage of ranges exceeds rangeMaxSize and it falls back to full range. -func points2TableRanges(sctx *rangerctx.RangerContext, rangePoints []*point, newTp *types.FieldType, rangeMaxSize int64) (Ranges, bool, error) { - convertedPoints, err := convertPoints(sctx, rangePoints, newTp, true, true) +func points2TableRanges(r *builder, rangePoints []*point, newTp *types.FieldType, rangeMaxSize int64) (Ranges, bool, error) { + convertedPoints, err := convertPoints(r, rangePoints, newTp, true, true) if err != nil { return nil, false, errors.Trace(err) } if rangeMaxSize > 0 && estimateMemUsageForPoints2Ranges(convertedPoints) > rangeMaxSize { return FullIntRange(mysql.HasUnsignedFlag(newTp.GetFlag())), true, nil } - ranges := make(Ranges, 0, len(convertedPoints)/2) - for i := 0; i < len(convertedPoints); i += 2 { - startPoint, endPoint := convertedPoints[i], convertedPoints[i+1] - ran := &Range{ - LowVal: []types.Datum{startPoint.value}, - LowExclude: startPoint.excl, - HighVal: []types.Datum{endPoint.value}, - HighExclude: endPoint.excl, - Collators: []collate.Collator{collate.GetCollator(newTp.GetCollate())}, - } - ranges = append(ranges, ran) - } - return ranges, false, nil + return r.allocator.rangesFromPoints(convertedPoints, collate.GetCollator(newTp.GetCollate())), false, nil } // buildColumnRange builds range from CNF conditions. // rangeMaxSize is the max memory limit for ranges. O indicates no memory limit. // The second return value is the conditions used to build ranges and the third return value is the remained conditions. -func buildColumnRange(accessConditions []expression.Expression, sctx *rangerctx.RangerContext, tp *types.FieldType, tableRange bool, +func buildColumnRange(accessConditions []expression.Expression, sctx *rangerctx.RangerContext, allocator *Allocator, tp *types.FieldType, tableRange bool, colLen int, rangeMaxSize int64) (Ranges, []expression.Expression, []expression.Expression, error) { - rb := builder{sctx: sctx} - newTp := newFieldType(tp) - rangePoints := getFullRange() + rb := builder{sctx: sctx, allocator: allocator} + tpVal := newFieldTypeValue(tp) + newTp := &tpVal + rangePoints := getFullRange(rb.allocator) for _, cond := range accessConditions { collator := collate.GetCollator(charset.CollationBin) rangePoints = rb.intersection(rangePoints, rb.build(cond, newTp, colLen, true), collator) @@ -431,9 +430,9 @@ func buildColumnRange(accessConditions []expression.Expression, sctx *rangerctx. ) newTp = convertStringFTToBinaryCollate(newTp) if tableRange { - ranges, rangeFallback, err = points2TableRanges(sctx, rangePoints, newTp, rangeMaxSize) + ranges, rangeFallback, err = points2TableRanges(&rb, rangePoints, newTp, rangeMaxSize) } else { - ranges, rangeFallback, err = points2Ranges(sctx, rangePoints, newTp, rangeMaxSize) + ranges, rangeFallback, err = points2Ranges(&rb, rangePoints, newTp, rangeMaxSize) } if err != nil { return nil, nil, nil, errors.Trace(err) @@ -459,7 +458,13 @@ func buildColumnRange(accessConditions []expression.Expression, sctx *rangerctx. // conditions by the second and third return values respectively. func BuildTableRange(accessConditions []expression.Expression, sctx *rangerctx.RangerContext, tp *types.FieldType, rangeMaxSize int64) (Ranges, []expression.Expression, []expression.Expression, error) { - return buildColumnRange(accessConditions, sctx, tp, true, types.UnspecifiedLength, rangeMaxSize) + return buildColumnRange(accessConditions, sctx, nil, tp, true, types.UnspecifiedLength, rangeMaxSize) +} + +// BuildTableRangeWithAllocator like BuildTableRange, but use the given allocator. +func BuildTableRangeWithAllocator(accessConditions []expression.Expression, sctx *rangerctx.RangerContext, allocator *Allocator, + tp *types.FieldType, rangeMaxSize int64) (Ranges, []expression.Expression, []expression.Expression, error) { + return buildColumnRange(accessConditions, sctx, allocator, tp, true, types.UnspecifiedLength, rangeMaxSize) } // BuildColumnRange builds range from access conditions for general columns. @@ -473,12 +478,12 @@ func BuildColumnRange(conds []expression.Expression, sctx *rangerctx.RangerConte if len(conds) == 0 { return FullRange(), nil, nil, nil } - return buildColumnRange(conds, sctx, tp, false, colLen, rangeMemQuota) + return buildColumnRange(conds, sctx, nil, tp, false, colLen, rangeMemQuota) } -func (d *rangeDetacher) buildRangeOnColsByCNFCond(newTp []*types.FieldType, eqAndInCount int, +func (d *rangeDetacher) buildRangeOnColsByCNFCond(newTp []types.FieldType, eqAndInCount int, accessConds []expression.Expression) (Ranges, []expression.Expression, []expression.Expression, error) { - rb := builder{sctx: d.sctx} + rb := builder{sctx: d.sctx, allocator: d.allocator} var ( ranges Ranges rangeFallback bool @@ -486,18 +491,18 @@ func (d *rangeDetacher) buildRangeOnColsByCNFCond(newTp []*types.FieldType, eqAn ) for i := range eqAndInCount { // Build ranges for equal or in access conditions. - point := rb.build(accessConds[i], newTp[i], d.lengths[i], d.convertToSortKey) + point := rb.build(accessConds[i], &newTp[i], d.lengths[i], d.convertToSortKey) if rb.err != nil { return nil, nil, nil, errors.Trace(rb.err) } - tmpNewTp := newTp[i] + tmpNewTp := &newTp[i] if d.convertToSortKey { tmpNewTp = convertStringFTToBinaryCollate(tmpNewTp) } if i == 0 { - ranges, rangeFallback, err = points2Ranges(d.sctx, point, tmpNewTp, d.rangeMaxSize) + ranges, rangeFallback, err = points2Ranges(&rb, point, tmpNewTp, d.rangeMaxSize) } else { - ranges, rangeFallback, err = appendPoints2Ranges(d.sctx, ranges, point, tmpNewTp, d.rangeMaxSize) + ranges, rangeFallback, err = appendPoints2Ranges(&rb, ranges, point, tmpNewTp, d.rangeMaxSize) } if err != nil { return nil, nil, nil, errors.Trace(err) @@ -507,14 +512,14 @@ func (d *rangeDetacher) buildRangeOnColsByCNFCond(newTp []*types.FieldType, eqAn return ranges, accessConds[:i], accessConds[i:], nil } } - rangePoints := getFullRange() + rangePoints := getFullRange(rb.allocator) // Build rangePoints for non-equal access conditions. for i := eqAndInCount; i < len(accessConds); i++ { collator := collate.GetCollator(newTp[eqAndInCount].GetCollate()) if d.convertToSortKey { collator = collate.GetCollator(charset.CollationBin) } - rangePoints = rb.intersection(rangePoints, rb.build(accessConds[i], newTp[eqAndInCount], d.lengths[eqAndInCount], d.convertToSortKey), collator) + rangePoints = rb.intersection(rangePoints, rb.build(accessConds[i], &newTp[eqAndInCount], d.lengths[eqAndInCount], d.convertToSortKey), collator) if rb.err != nil { return nil, nil, nil, errors.Trace(rb.err) } @@ -522,15 +527,15 @@ func (d *rangeDetacher) buildRangeOnColsByCNFCond(newTp []*types.FieldType, eqAn var tmpNewTp *types.FieldType if eqAndInCount == 0 || eqAndInCount < len(accessConds) { if d.convertToSortKey { - tmpNewTp = convertStringFTToBinaryCollate(newTp[eqAndInCount]) + tmpNewTp = convertStringFTToBinaryCollate(&newTp[eqAndInCount]) } else { - tmpNewTp = newTp[eqAndInCount] + tmpNewTp = &newTp[eqAndInCount] } } if eqAndInCount == 0 { - ranges, rangeFallback, err = points2Ranges(d.sctx, rangePoints, tmpNewTp, d.rangeMaxSize) + ranges, rangeFallback, err = points2Ranges(&rb, rangePoints, tmpNewTp, d.rangeMaxSize) } else if eqAndInCount < len(accessConds) { - ranges, rangeFallback, err = appendPoints2Ranges(d.sctx, ranges, rangePoints, tmpNewTp, d.rangeMaxSize) + ranges, rangeFallback, err = appendPoints2Ranges(&rb, ranges, rangePoints, tmpNewTp, d.rangeMaxSize) } if err != nil { return nil, nil, nil, errors.Trace(err) @@ -555,7 +560,7 @@ func convertStringFTToBinaryCollate(ft *types.FieldType) *types.FieldType { } // buildCNFIndexRange builds the range for index where the top layer is CNF. -func (d *rangeDetacher) buildCNFIndexRange(newTp []*types.FieldType, eqAndInCount int, +func (d *rangeDetacher) buildCNFIndexRange(newTp []types.FieldType, eqAndInCount int, accessConds []expression.Expression) (Ranges, []expression.Expression, []expression.Expression, error) { ranges, newAccessConds, remainedConds, err := d.buildRangeOnColsByCNFCond(newTp, eqAndInCount, accessConds) if err != nil { @@ -706,23 +711,16 @@ func ReachPrefixLen(v *types.Datum, length int, tp *types.FieldType) bool { // But we cannot use the FieldType of column directly. e.g. the column a is int32 and we have a > 1111111111111111111. // Obviously the constant is bigger than MaxInt32, so we will get overflow error if we use the FieldType of column a. // In util/ranger here, we usually use "newTp" to emphasize its difference from the original FieldType of the column. -func newFieldType(tp *types.FieldType) *types.FieldType { +func newFieldTypeValue(tp *types.FieldType) (newTp types.FieldType) { + newTp = *tp switch tp.GetType() { - // To avoid overflow error. case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: - newTp := types.NewFieldType(mysql.TypeLonglong) - newTp.SetFlag(tp.GetFlag()) - newTp.SetCharset(tp.GetCharset()) - return newTp - // To avoid data truncate error. + newTp.SetType(mysql.TypeLonglong) case mysql.TypeFloat, mysql.TypeDouble, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString: - newTp := types.NewFieldTypeWithCollation(tp.GetType(), tp.GetCollate(), types.UnspecifiedLength) - newTp.SetCharset(tp.GetCharset()) - return newTp - default: - return tp + newTp.SetFlen(types.UnspecifiedLength) } + return } // points2EqOrInCond constructs a 'EQUAL' or 'IN' scalar function based on the diff --git a/pkg/util/ranger/ranger_test.go b/pkg/util/ranger/ranger_test.go index 46ae80fa73008..50676ccffc17a 100644 --- a/pkg/util/ranger/ranger_test.go +++ b/pkg/util/ranger/ranger_test.go @@ -274,8 +274,10 @@ func TestTableRange(t *testing.T) { } ctx := context.Background() + allocator := new(ranger.Allocator) for _, tt := range tests { t.Run(tt.exprStr, func(t *testing.T) { + allocator.Reset() sql := "select * from t where " + tt.exprStr sctx := testKit.Session() rctx := sctx.GetRangerCtx() @@ -301,7 +303,7 @@ func TestTableRange(t *testing.T) { conds, filter = ranger.DetachCondsForColumn(rctx, conds, col) require.Equal(t, tt.accessConds, expression.StringifyExpressionsWithCtx(ectx, conds)) require.Equal(t, tt.filterConds, expression.StringifyExpressionsWithCtx(ectx, filter)) - result, _, _, err := ranger.BuildTableRange(conds, rctx, col.RetType, 0) + result, _, _, err := ranger.BuildTableRangeWithAllocator(conds, rctx, allocator, col.RetType, 0) require.NoError(t, err) got := fmt.Sprintf("%v", result) require.Equal(t, tt.resultStr, got) @@ -474,8 +476,10 @@ create table t( } ctx := context.Background() + allocator := new(ranger.Allocator) for _, tt := range tests { t.Run(tt.exprStr, func(t *testing.T) { + allocator.Reset() sql := "select * from t where " + tt.exprStr sctx := testKit.Session() rctx := sctx.GetRangerCtx() @@ -498,7 +502,7 @@ create table t( } cols, lengths := expression.IndexInfo2PrefixCols(tbl.Columns, selection.Schema().Columns, tbl.Indices[tt.indexPos]) require.NotNil(t, cols) - res, err := ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, 0) + res, err := ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, 0) require.NoError(t, err) require.Equal(t, tt.accessConds, expression.StringifyExpressionsWithCtx(ectx, res.AccessConds)) require.Equal(t, tt.filterConds, expression.StringifyExpressionsWithCtx(ectx, res.RemainedConds)) @@ -998,8 +1002,10 @@ func TestIndexRangeForYear(t *testing.T) { } ctx := context.Background() + allocator := new(ranger.Allocator) for _, tt := range tests { t.Run(tt.exprStr, func(t *testing.T) { + allocator.Reset() sql := "select * from t where " + tt.exprStr sctx := testKit.Session() rctx := sctx.GetRangerCtx() @@ -1022,7 +1028,7 @@ func TestIndexRangeForYear(t *testing.T) { } cols, lengths := expression.IndexInfo2PrefixCols(tbl.Columns, selection.Schema().Columns, tbl.Indices[tt.indexPos]) require.NotNil(t, cols) - res, err := ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, 0) + res, err := ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, 0) require.NoError(t, err) require.Equal(t, tt.accessConds, expression.StringifyExpressionsWithCtx(ectx, res.AccessConds)) require.Equal(t, tt.filterConds, expression.StringifyExpressionsWithCtx(ectx, res.RemainedConds)) @@ -1069,8 +1075,10 @@ func TestPrefixIndexRangeScan(t *testing.T) { } ctx := context.Background() + allocator := new(ranger.Allocator) for _, tt := range tests { t.Run(tt.exprStr, func(t *testing.T) { + allocator.Reset() sql := "select * from t where " + tt.exprStr sctx := testKit.Session() rctx := sctx.GetRangerCtx() @@ -1093,7 +1101,7 @@ func TestPrefixIndexRangeScan(t *testing.T) { } cols, lengths := expression.IndexInfo2PrefixCols(tbl.Columns, selection.Schema().Columns, tbl.Indices[tt.indexPos]) require.NotNil(t, cols) - res, err := ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, 0) + res, err := ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, 0) require.NoError(t, err) require.Equal(t, tt.accessConds, expression.StringifyExpressionsWithCtx(ectx, res.AccessConds)) require.Equal(t, tt.filterConds, expression.StringifyExpressionsWithCtx(ectx, res.RemainedConds)) @@ -1419,8 +1427,10 @@ create table t( } ctx := context.Background() + allocator := new(ranger.Allocator) for _, tt := range tests { t.Run(tt.exprStr, func(t *testing.T) { + allocator.Reset() sql := "select * from t where " + tt.exprStr sctx := testKit.Session() ectx := sctx.GetExprCtx().GetEvalCtx() @@ -1442,7 +1452,7 @@ create table t( } cols, lengths := expression.IndexInfo2PrefixCols(tbl.Columns, selection.Schema().Columns, tbl.Indices[tt.indexPos]) require.NotNil(t, cols) - res, err := ranger.DetachCondAndBuildRangeForIndex(sctx.GetRangerCtx(), conds, cols, lengths, 0) + res, err := ranger.DetachCondAndBuildRangeForIndexWithAllocator(sctx.GetRangerCtx(), allocator, conds, cols, lengths, 0) require.NoError(t, err) require.Equal(t, tt.accessConds, expression.StringifyExpressionsWithCtx(ectx, res.AccessConds)) require.Equal(t, tt.filterConds, expression.StringifyExpressionsWithCtx(ectx, res.RemainedConds)) @@ -1908,6 +1918,7 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { tblInfo := tbl.Meta() sctx := tk.Session() rctx := sctx.GetRangerCtx() + allocator := new(ranger.Allocator) // test CNF condition sql := "select * from t1 where a in (10,20,30) and b in (40,50,60) and c >= 70 and c <= 80" @@ -1915,7 +1926,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { conds := selection.Conditions require.Equal(t, 4, len(conds)) cols, lengths := expression.IndexInfo2PrefixCols(tblInfo.Columns, selection.Schema().Columns, tblInfo.Indices[0]) - res, err := ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, 0) + allocator.Reset() + res, err := ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, 0) require.NoError(t, err) checkDetachRangeResult(t, res, "[in(test.t1.a, 10, 20, 30) in(test.t1.b, 40, 50, 60) ge(test.t1.c, 70) le(test.t1.c, 80)]", @@ -1923,7 +1935,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[10 40 70,10 40 80] [10 50 70,10 50 80] [10 60 70,10 60 80] [20 40 70,20 40 80] [20 50 70,20 50 80] [20 60 70,20 60 80] [30 40 70,30 40 80] [30 50 70,30 50 80] [30 60 70,30 60 80]]") checkRangeFallbackAndReset(t, sctx, false) quota := res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[in(test.t1.a, 10, 20, 30) in(test.t1.b, 40, 50, 60)]", @@ -1931,7 +1944,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[10 40,10 40] [10 50,10 50] [10 60,10 60] [20 40,20 40] [20 50,20 50] [20 60,20 60] [30 40,30 40] [30 50,30 50] [30 60,30 60]]") checkRangeFallbackAndReset(t, sctx, true) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[in(test.t1.a, 10, 20, 30)]", @@ -1939,7 +1953,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[10,10] [20,20] [30,30]]") checkRangeFallbackAndReset(t, sctx, true) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[]", @@ -1953,7 +1968,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { conds = selection.Conditions require.Equal(t, 1, len(conds)) cols, lengths = expression.IndexInfo2PrefixCols(tblInfo.Columns, selection.Schema().Columns, tblInfo.Indices[0]) - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, 0) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, 0) require.NoError(t, err) checkDetachRangeResult(t, res, "[or(eq(test.t1.a, 10), or(eq(test.t1.a, 20), eq(test.t1.a, 30)))]", @@ -1961,7 +1977,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[10,10] [20,20] [30,30]]") checkRangeFallbackAndReset(t, sctx, false) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[]", @@ -1974,7 +1991,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { conds = selection.Conditions require.Equal(t, 1, len(conds)) cols, lengths = expression.IndexInfo2PrefixCols(tblInfo.Columns, selection.Schema().Columns, tblInfo.Indices[0]) - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, 0) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, 0) require.NoError(t, err) checkDetachRangeResult(t, res, "[or(and(eq(test.t1.a, 10), eq(test.t1.b, 40)), or(and(eq(test.t1.a, 20), eq(test.t1.b, 50)), and(eq(test.t1.a, 30), eq(test.t1.b, 60))))]", @@ -1982,7 +2000,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[10 40,10 40] [20 50,20 50] [30 60,30 60]]") checkRangeFallbackAndReset(t, sctx, false) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[]", @@ -1996,7 +2015,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { conds = selection.Conditions require.Equal(t, 2, len(conds)) cols, lengths = expression.IndexInfo2PrefixCols(tblInfo.Columns, selection.Schema().Columns, tblInfo.Indices[0]) - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, 0) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, 0) require.NoError(t, err) checkDetachRangeResult(t, res, "[or(and(eq(test.t1.a, 10), eq(test.t1.b, 20)), and(eq(test.t1.a, 30), eq(test.t1.b, 40))) eq(test.t1.c, 50)]", @@ -2004,7 +2024,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[10 20 50,10 20 50] [30 40 50,30 40 50]]") checkRangeFallbackAndReset(t, sctx, false) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[or(and(eq(test.t1.a, 10), eq(test.t1.b, 20)), and(eq(test.t1.a, 30), eq(test.t1.b, 40)))]", @@ -2012,7 +2033,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[10 20,10 20] [30 40,30 40]]") checkRangeFallbackAndReset(t, sctx, true) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[or(eq(test.t1.a, 10), eq(test.t1.a, 30))]", @@ -2020,7 +2042,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[10,10] [30,30]]") checkRangeFallbackAndReset(t, sctx, true) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[]", @@ -2042,7 +2065,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { conds = selection.Conditions require.Equal(t, 4, len(conds)) cols, lengths = expression.IndexInfo2PrefixCols(tblInfo.Columns, selection.Schema().Columns, tblInfo.Indices[0]) - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, 0) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, 0) require.NoError(t, err) checkDetachRangeResult(t, res, "[in(test.t2.a, aaa, bbb, ccc) in(test.t2.b, ddd, eee, fff) ge(test.t2.c, ggg) le(test.t2.c, iii)]", @@ -2050,7 +2074,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[\"aa\" \"dd\" \"gg\",\"aa\" \"dd\" \"ii\"] [\"aa\" \"ee\" \"gg\",\"aa\" \"ee\" \"ii\"] [\"aa\" \"ff\" \"gg\",\"aa\" \"ff\" \"ii\"] [\"bb\" \"dd\" \"gg\",\"bb\" \"dd\" \"ii\"] [\"bb\" \"ee\" \"gg\",\"bb\" \"ee\" \"ii\"] [\"bb\" \"ff\" \"gg\",\"bb\" \"ff\" \"ii\"] [\"cc\" \"dd\" \"gg\",\"cc\" \"dd\" \"ii\"] [\"cc\" \"ee\" \"gg\",\"cc\" \"ee\" \"ii\"] [\"cc\" \"ff\" \"gg\",\"cc\" \"ff\" \"ii\"]]") checkRangeFallbackAndReset(t, sctx, false) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[in(test.t2.a, aaa, bbb, ccc) in(test.t2.b, ddd, eee, fff)]", @@ -2058,7 +2083,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[\"aa\" \"dd\",\"aa\" \"dd\"] [\"aa\" \"ee\",\"aa\" \"ee\"] [\"aa\" \"ff\",\"aa\" \"ff\"] [\"bb\" \"dd\",\"bb\" \"dd\"] [\"bb\" \"ee\",\"bb\" \"ee\"] [\"bb\" \"ff\",\"bb\" \"ff\"] [\"cc\" \"dd\",\"cc\" \"dd\"] [\"cc\" \"ee\",\"cc\" \"ee\"] [\"cc\" \"ff\",\"cc\" \"ff\"]]") checkRangeFallbackAndReset(t, sctx, true) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[in(test.t2.a, aaa, bbb, ccc)]", @@ -2066,7 +2092,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[\"aa\",\"aa\"] [\"bb\",\"bb\"] [\"cc\",\"cc\"]]") checkRangeFallbackAndReset(t, sctx, true) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[]", @@ -2080,7 +2107,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { conds = selection.Conditions require.Equal(t, 1, len(conds)) cols, lengths = expression.IndexInfo2PrefixCols(tblInfo.Columns, selection.Schema().Columns, tblInfo.Indices[0]) - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, 0) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, 0) require.NoError(t, err) checkDetachRangeResult(t, res, "[or(eq(test.t2.a, aaa), or(eq(test.t2.a, bbb), eq(test.t2.a, ccc)))]", @@ -2088,7 +2116,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[\"aa\",\"aa\"] [\"bb\",\"bb\"] [\"cc\",\"cc\"]]") checkRangeFallbackAndReset(t, sctx, false) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[]", @@ -2101,7 +2130,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { conds = selection.Conditions require.Equal(t, 1, len(conds)) cols, lengths = expression.IndexInfo2PrefixCols(tblInfo.Columns, selection.Schema().Columns, tblInfo.Indices[0]) - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, 0) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, 0) require.NoError(t, err) checkDetachRangeResult(t, res, "[or(and(eq(test.t2.a, aaa), eq(test.t2.b, ddd)), or(and(eq(test.t2.a, bbb), eq(test.t2.b, eee)), and(eq(test.t2.a, ccc), eq(test.t2.b, fff))))]", @@ -2109,7 +2139,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[\"aa\" \"dd\",\"aa\" \"dd\"] [\"bb\" \"ee\",\"bb\" \"ee\"] [\"cc\" \"ff\",\"cc\" \"ff\"]]") checkRangeFallbackAndReset(t, sctx, false) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[]", @@ -2123,7 +2154,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { conds = selection.Conditions require.Equal(t, 2, len(conds)) cols, lengths = expression.IndexInfo2PrefixCols(tblInfo.Columns, selection.Schema().Columns, tblInfo.Indices[0]) - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, 0) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, 0) require.NoError(t, err) checkDetachRangeResult(t, res, "[or(eq(test.t2.a, aaa), eq(test.t2.a, ccc))]", @@ -2131,7 +2163,8 @@ func TestRangeFallbackForDetachCondAndBuildRangeForIndex(t *testing.T) { "[[\"aa\",\"aa\"] [\"cc\",\"cc\"]]") checkRangeFallbackAndReset(t, sctx, false) quota = res.Ranges.MemUsage() - 1 - res, err = ranger.DetachCondAndBuildRangeForIndex(rctx, conds, cols, lengths, quota) + allocator.Reset() + res, err = ranger.DetachCondAndBuildRangeForIndexWithAllocator(rctx, allocator, conds, cols, lengths, quota) require.NoError(t, err) checkDetachRangeResult(t, res, "[]", @@ -2152,6 +2185,7 @@ func TestRangeFallbackForBuildTableRange(t *testing.T) { sctx := tk.Session() rctx := sctx.GetRangerCtx() ectx := sctx.GetExprCtx().GetEvalCtx() + allocator := new(ranger.Allocator) sql := "select * from t where a in (10,20,30,40,50)" selection := getSelectionFromQuery(t, sctx, sql) conds := selection.Conditions @@ -2161,14 +2195,16 @@ func TestRangeFallbackForBuildTableRange(t *testing.T) { conds, filters = ranger.DetachCondsForColumn(rctx, conds, col) require.Equal(t, 1, len(conds)) require.Equal(t, 0, len(filters)) - ranges, access, remained, err := ranger.BuildTableRange(conds, rctx, col.RetType, 0) + allocator.Reset() + ranges, access, remained, err := ranger.BuildTableRangeWithAllocator(conds, rctx, allocator, col.RetType, 0) require.NoError(t, err) require.Equal(t, "[[10,10] [20,20] [30,30] [40,40] [50,50]]", fmt.Sprintf("%v", ranges)) require.Equal(t, "[in(test.t.a, 10, 20, 30, 40, 50)]", expression.StringifyExpressionsWithCtx(ectx, access)) require.Equal(t, "[]", expression.StringifyExpressionsWithCtx(ectx, remained)) checkRangeFallbackAndReset(t, sctx, false) quota := ranges.MemUsage() - 1 - ranges, access, remained, err = ranger.BuildTableRange(conds, rctx, col.RetType, quota) + allocator.Reset() + ranges, access, remained, err = ranger.BuildTableRangeWithAllocator(conds, rctx, allocator, col.RetType, quota) require.NoError(t, err) require.Equal(t, "[[-inf,+inf]]", fmt.Sprintf("%v", ranges)) require.Equal(t, "[]", expression.StringifyExpressionsWithCtx(ectx, access)) @@ -2325,7 +2361,9 @@ create table t( collate.SetNewCollationEnabledForTest(true) defer func() { collate.SetNewCollationEnabledForTest(false) }() ctx := context.Background() + allocator := new(ranger.Allocator) for _, tt := range tests { + allocator.Reset() sql := "select * from t where " + tt.exprStr sctx := tk.Session() ectx := sctx.GetExprCtx().GetEvalCtx() @@ -2347,7 +2385,7 @@ create table t( } cols, lengths := expression.IndexInfo2PrefixCols(tbl.Columns, selection.Schema().Columns, tbl.Indices[tt.indexPos]) require.NotNil(t, cols) - res, err := ranger.DetachCondAndBuildRangeForIndex(sctx.GetRangerCtx(), conds, cols, lengths, 0) + res, err := ranger.DetachCondAndBuildRangeForIndexWithAllocator(sctx.GetRangerCtx(), allocator, conds, cols, lengths, 0) require.NoError(t, err) require.Equal(t, tt.accessConds, expression.StringifyExpressionsWithCtx(ectx, res.AccessConds), fmt.Sprintf("wrong access conditions for expr: %s", tt.exprStr)) require.Equal(t, tt.filterConds, expression.StringifyExpressionsWithCtx(ectx, res.RemainedConds), fmt.Sprintf("wrong filter conditions for expr: %s", tt.exprStr)) @@ -2488,8 +2526,10 @@ func TestMinAccessCondsForDNFCond(t *testing.T) { } ctx := context.Background() + allocator := new(ranger.Allocator) for _, tt := range tests { t.Run(tt.exprStr, func(t *testing.T) { + allocator.Reset() sql := "select * from t where " + tt.exprStr sctx := testKit.Session() ectx := sctx.GetExprCtx().GetEvalCtx() @@ -2511,7 +2551,7 @@ func TestMinAccessCondsForDNFCond(t *testing.T) { } cols, lengths := expression.IndexInfo2PrefixCols(tbl.Columns, selection.Schema().Columns, tbl.Indices[tt.indexPos]) require.NotNil(t, cols) - res, err := ranger.DetachCondAndBuildRangeForIndex(sctx.GetRangerCtx(), conds, cols, lengths, 0) + res, err := ranger.DetachCondAndBuildRangeForIndexWithAllocator(sctx.GetRangerCtx(), allocator, conds, cols, lengths, 0) require.NoError(t, err) require.Equal(t, tt.accessConds, expression.StringifyExpressionsWithCtx(ectx, res.AccessConds)) require.Equal(t, tt.minAccessCondsForDNFCond, res.MinAccessCondsForDNFCond) diff --git a/pkg/util/ranger/types.go b/pkg/util/ranger/types.go index a97c2956f0908..a0a2bd5b8dc90 100644 --- a/pkg/util/ranger/types.go +++ b/pkg/util/ranger/types.go @@ -502,7 +502,7 @@ func (ran *Range) IntersectRange(tc types.Context, otherRange *Range) (*Range, e lowVsHigh, err := compareLexicographically(tc, ran.LowVal, otherRange.HighVal, ran.Collators, ran.LowExclude, otherRange.HighExclude, true, false) if err != nil { - return &Range{}, err + return nil, err } if lowVsHigh == 1 { return nil, nil @@ -511,7 +511,7 @@ func (ran *Range) IntersectRange(tc types.Context, otherRange *Range) (*Range, e lowVsHigh, err = compareLexicographically(tc, otherRange.LowVal, ran.HighVal, ran.Collators, otherRange.LowExclude, ran.HighExclude, true, false) if err != nil { - return &Range{}, err + return nil, err } if lowVsHigh == 1 { return nil, nil @@ -520,7 +520,7 @@ func (ran *Range) IntersectRange(tc types.Context, otherRange *Range) (*Range, e lowVsLow, err := compareLexicographically(tc, ran.LowVal, otherRange.LowVal, ran.Collators, ran.LowExclude, otherRange.LowExclude, true, true) if err != nil { - return &Range{}, err + return nil, err } if lowVsLow == -1 { result.LowVal = otherRange.LowVal @@ -533,7 +533,7 @@ func (ran *Range) IntersectRange(tc types.Context, otherRange *Range) (*Range, e highVsHigh, err := compareLexicographically(tc, ran.HighVal, otherRange.HighVal, ran.Collators, ran.HighExclude, otherRange.HighExclude, false, false) if err != nil { - return &Range{}, err + return nil, err } if highVsHigh == 1 { result.HighVal = otherRange.HighVal @@ -565,3 +565,97 @@ func (rs Ranges) IntersectRanges(tc types.Context, otherRanges Ranges) Ranges { } return result } + +// AllocatorProvider defines the interface for getting an Allocator. +type AllocatorProvider interface { + GetRangerAllocator() *Allocator +} + +type baseRangeData struct { + lval [1]types.Datum + hval [1]types.Datum + collator [1]collate.Collator + Range +} + +// Allocator is used to allocate points and ranges. It reuses the underlying slices to reduce allocations. +type Allocator struct { + points []point + slices [][4]*point + ranges []baseRangeData + sindex int + rindex int +} + +// Reset resets the underlying data slices for reusing. +func (a *Allocator) Reset() { + a.points = a.points[:0] + a.sindex = 0 + a.rindex = 0 +} + +func (a *Allocator) newPoint(value types.Datum, excl bool, start bool) *point { + if a == nil { + return &point{value: value, excl: excl, start: start} + } + a.points = append(a.points, point{value: value, excl: excl, start: start}) + return &a.points[len(a.points)-1] +} + +func (a *Allocator) points2(p1, p2 *point) []*point { + if a == nil { + return []*point{p1, p2} + } + if a.sindex >= len(a.slices) { + a.slices = append(a.slices, [4]*point{}) + } + points := append(a.slices[a.sindex][:0], p1, p2) + a.sindex++ + return points +} + +func (a *Allocator) points4(p1, p2, p3, p4 *point) []*point { + if a == nil { + return []*point{p1, p2, p3, p4} + } + if a.sindex >= len(a.slices) { + a.slices = append(a.slices, [4]*point{}) + } + points := append(a.slices[a.sindex][:0], p1, p2, p3, p4) + a.sindex++ + return points +} + +func (a *Allocator) newRange(p1, p2 *point, collator collate.Collator) *Range { + if a == nil { + return &Range{ + LowVal: []types.Datum{p1.value}, + LowExclude: p1.excl, + HighVal: []types.Datum{p2.value}, + HighExclude: p2.excl, + Collators: []collate.Collator{collator}, + } + } + if a.rindex >= len(a.ranges) { + a.ranges = append(a.ranges, baseRangeData{}) + } + data := &a.ranges[a.rindex] + data.lval[0] = p1.value + data.Range.LowVal = data.lval[:] + data.Range.LowExclude = p1.excl + data.hval[0] = p2.value + data.Range.HighVal = data.hval[:] + data.Range.HighExclude = p2.excl + data.collator[0] = collator + data.Range.Collators = data.collator[:] + a.rindex++ + return &data.Range +} + +func (a *Allocator) rangesFromPoints(points []*point, collator collate.Collator) Ranges { + ranges := make(Ranges, 0, len(points)/2) + for i := 0; i < len(points); i += 2 { + ranges = append(ranges, a.newRange(points[i], points[i+1], collator)) + } + return ranges +} From 880415e698ed9ea8465687c1ededdc75210c5eac Mon Sep 17 00:00:00 2001 From: zyguan Date: Wed, 18 Dec 2024 02:50:42 +0000 Subject: [PATCH 2/2] fix a leak issue Signed-off-by: zyguan --- pkg/util/ranger/ranger.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/util/ranger/ranger.go b/pkg/util/ranger/ranger.go index 5ccb9c4c2eb02..7b42c65d0c9dd 100644 --- a/pkg/util/ranger/ranger.go +++ b/pkg/util/ranger/ranger.go @@ -91,6 +91,7 @@ func convertPoints(r *builder, rangePoints []*point, newTp *types.FieldType, ski for j := 0; j < numPoints; j += 2 { startPoint, err := convertPoint(r, rangePoints[j], newTp) if err != nil { + keybufPool.Put(buf) return nil, errors.Trace(err) } if tableRange {