From ea134cd62527a66588d4af6be7c9eca9cd759144 Mon Sep 17 00:00:00 2001 From: crazycs Date: Mon, 29 Jun 2020 14:04:46 +0800 Subject: [PATCH 1/2] cherry pick #18144 to release-4.0 Signed-off-by: ti-srebot --- ddl/ddl_api.go | 2 +- ddl/split_region.go | 37 +++++++++++++++++++++++++------------ executor/executor_test.go | 17 +++++++++++++++++ 3 files changed, 43 insertions(+), 13 deletions(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 69655c969a0bf..55e4381fb8d22 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -1616,7 +1616,7 @@ func (d *ddl) preSplitAndScatter(ctx sessionctx.Context, tbInfo *model.TableInfo scatterRegion = variable.TiDBOptOn(val) } if pi != nil { - preSplit = func() { splitPartitionTableRegion(ctx, sp, pi, scatterRegion) } + preSplit = func() { splitPartitionTableRegion(ctx, sp, tbInfo, pi, scatterRegion) } } else { preSplit = func() { splitTableRegion(ctx, sp, tbInfo, scatterRegion) } } diff --git a/ddl/split_region.go b/ddl/split_region.go index 466aa3c834763..eeedc09892520 100644 --- a/ddl/split_region.go +++ b/ddl/split_region.go @@ -24,13 +24,19 @@ import ( "go.uber.org/zap" ) -func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, pi *model.PartitionInfo, scatter bool) { +func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, pi *model.PartitionInfo, scatter bool) { // Max partition count is 4096, should we sample and just choose some of the partition to split? regionIDs := make([]uint64, 0, len(pi.Definitions)) ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() - for _, def := range pi.Definitions { - regionIDs = append(regionIDs, splitRecordRegion(ctxWithTimeout, store, def.ID, scatter)) + if tbInfo.ShardRowIDBits > 0 && tbInfo.PreSplitRegions > 0 { + for _, def := range pi.Definitions { + regionIDs = append(regionIDs, preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, def.ID, scatter)...) + } + } else { + for _, def := range pi.Definitions { + regionIDs = append(regionIDs, splitRecordRegion(ctxWithTimeout, store, def.ID, scatter)) + } } if scatter { waitScatterRegionFinish(ctxWithTimeout, store, regionIDs...) @@ -40,17 +46,18 @@ func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scatter bool) { ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() + var regionIDs []uint64 if tbInfo.ShardRowIDBits > 0 && tbInfo.PreSplitRegions > 0 { - splitPreSplitedTable(ctxWithTimeout, store, tbInfo, scatter) + regionIDs = preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, tbInfo.ID, scatter) } else { - regionID := splitRecordRegion(ctxWithTimeout, store, tbInfo.ID, scatter) - if scatter { - waitScatterRegionFinish(ctxWithTimeout, store, regionID) - } + regionIDs = append(regionIDs, splitRecordRegion(ctxWithTimeout, store, tbInfo.ID, scatter)) + } + if scatter { + waitScatterRegionFinish(ctxWithTimeout, store, regionIDs...) } } -func splitPreSplitedTable(ctx context.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scatter bool) { +func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableStore, tbInfo *model.TableInfo, physicalID int64, scatter bool) []uint64 { // Example: // ShardRowIDBits = 4 // PreSplitRegions = 2 @@ -78,10 +85,18 @@ func splitPreSplitedTable(ctx context.Context, store kv.SplittableStore, tbInfo step := int64(1 << (tbInfo.ShardRowIDBits - tbInfo.PreSplitRegions)) max := int64(1 << tbInfo.ShardRowIDBits) splitTableKeys := make([][]byte, 0, 1<<(tbInfo.PreSplitRegions)) +<<<<<<< HEAD for p := int64(step); p < max; p += step { recordID := p << (64 - tbInfo.ShardRowIDBits - 1) recordPrefix := tablecodec.GenTableRecordPrefix(tbInfo.ID) key := tablecodec.EncodeRecordKey(recordPrefix, recordID) +======= + splitTableKeys = append(splitTableKeys, tablecodec.GenTablePrefix(physicalID)) + for p := step; p < max; p += step { + recordID := p << (64 - tbInfo.ShardRowIDBits - 1) + recordPrefix := tablecodec.GenTableRecordPrefix(physicalID) + key := tablecodec.EncodeRecordKey(recordPrefix, kv.IntHandle(recordID)) +>>>>>>> 33f4b82... ddl: fix issue `pre_split_regions` table option doesn't work in the partition table. #18144 splitTableKeys = append(splitTableKeys, key) } var err error @@ -91,9 +106,7 @@ func splitPreSplitedTable(ctx context.Context, store kv.SplittableStore, tbInfo zap.Stringer("table", tbInfo.Name), zap.Int("successful region count", len(regionIDs)), zap.Error(err)) } regionIDs = append(regionIDs, splitIndexRegion(store, tbInfo, scatter)...) - if scatter { - waitScatterRegionFinish(ctx, store, regionIDs...) - } + return regionIDs } func splitRecordRegion(ctx context.Context, store kv.SplittableStore, tableID int64, scatter bool) uint64 { diff --git a/executor/executor_test.go b/executor/executor_test.go index fe35c199211ec..f57e2676d0db3 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -4402,6 +4402,23 @@ func (s *testSplitTable) TestShowTableRegion(c *C) { c.Assert(rows[1][1], Equals, fmt.Sprintf("t_%d_r_2305843009213693952", tbl.Meta().ID)) c.Assert(rows[2][1], Equals, fmt.Sprintf("t_%d_r_4611686018427387904", tbl.Meta().ID)) c.Assert(rows[3][1], Equals, fmt.Sprintf("t_%d_r_6917529027641081856", tbl.Meta().ID)) + + // Test pre-split table region when create table. + tk.MustExec("drop table if exists pt_pre") + tk.MustExec("create table pt_pre (a int, b int) shard_row_id_bits = 2 pre_split_regions=2 partition by hash(a) partitions 3;") + re = tk.MustQuery("show table pt_pre regions") + rows = re.Rows() + // Table t_regions should have 4 regions now. + c.Assert(len(rows), Equals, 12) + tbl = testGetTableByName(c, tk.Se, "test", "pt_pre") + pi := tbl.Meta().GetPartitionInfo().Definitions + c.Assert(len(pi), Equals, 3) + for i, p := range pi { + c.Assert(rows[1+4*i][1], Equals, fmt.Sprintf("t_%d_r_2305843009213693952", p.ID)) + c.Assert(rows[2+4*i][1], Equals, fmt.Sprintf("t_%d_r_4611686018427387904", p.ID)) + c.Assert(rows[3+4*i][1], Equals, fmt.Sprintf("t_%d_r_6917529027641081856", p.ID)) + } + defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0) // Test split partition table. From bd13e4666176be695f4b860b8373cea1e24d6783 Mon Sep 17 00:00:00 2001 From: crazycs520 Date: Wed, 29 Jul 2020 10:25:13 +0800 Subject: [PATCH 2/2] fix conflict Signed-off-by: crazycs520 --- ddl/split_region.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/ddl/split_region.go b/ddl/split_region.go index eeedc09892520..ce58f91523511 100644 --- a/ddl/split_region.go +++ b/ddl/split_region.go @@ -85,18 +85,11 @@ func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableS step := int64(1 << (tbInfo.ShardRowIDBits - tbInfo.PreSplitRegions)) max := int64(1 << tbInfo.ShardRowIDBits) splitTableKeys := make([][]byte, 0, 1<<(tbInfo.PreSplitRegions)) -<<<<<<< HEAD - for p := int64(step); p < max; p += step { - recordID := p << (64 - tbInfo.ShardRowIDBits - 1) - recordPrefix := tablecodec.GenTableRecordPrefix(tbInfo.ID) - key := tablecodec.EncodeRecordKey(recordPrefix, recordID) -======= splitTableKeys = append(splitTableKeys, tablecodec.GenTablePrefix(physicalID)) - for p := step; p < max; p += step { + for p := int64(step); p < max; p += step { recordID := p << (64 - tbInfo.ShardRowIDBits - 1) recordPrefix := tablecodec.GenTableRecordPrefix(physicalID) - key := tablecodec.EncodeRecordKey(recordPrefix, kv.IntHandle(recordID)) ->>>>>>> 33f4b82... ddl: fix issue `pre_split_regions` table option doesn't work in the partition table. #18144 + key := tablecodec.EncodeRecordKey(recordPrefix, recordID) splitTableKeys = append(splitTableKeys, key) } var err error