diff --git a/server/schedulers/hot_region.go b/server/schedulers/hot_region.go index 5142160b8b8..b6ef116351e 100644 --- a/server/schedulers/hot_region.go +++ b/server/schedulers/hot_region.go @@ -1493,11 +1493,11 @@ func toResourceType(rwTy statistics.RWType, opTy opType) resourceType { func stringToDim(name string) int { switch name { - case BytePriority: + case statistics.BytePriority: return statistics.ByteDim - case KeyPriority: + case statistics.KeyPriority: return statistics.KeyDim - case QueryPriority: + case statistics.QueryPriority: return statistics.QueryDim } return statistics.ByteDim @@ -1506,11 +1506,11 @@ func stringToDim(name string) int { func dimToString(dim int) string { switch dim { case statistics.ByteDim: - return BytePriority + return statistics.BytePriority case statistics.KeyDim: - return KeyPriority + return statistics.KeyPriority case statistics.QueryDim: - return QueryPriority + return statistics.QueryPriority default: return "" } diff --git a/server/schedulers/hot_region_config.go b/server/schedulers/hot_region_config.go index ba9adb4a9a9..9df9b1a25fa 100644 --- a/server/schedulers/hot_region_config.go +++ b/server/schedulers/hot_region_config.go @@ -36,12 +36,6 @@ import ( ) const ( - // BytePriority indicates hot-region-scheduler prefer byte dim - BytePriority = "byte" - // KeyPriority indicates hot-region-scheduler prefer key dim - KeyPriority = "key" - // QueryPriority indicates hot-region-scheduler prefer query dim - QueryPriority = "query" // Scheduling has a bigger impact on TiFlash, so it needs to be corrected in configuration items // In the default config, the TiKV difference is 1.05*1.05-1 = 0.1025, and the TiFlash difference is 1.15*1.15-1 = 0.3225 @@ -49,16 +43,16 @@ const ( ) var defaultPrioritiesConfig = prioritiesConfig{ - read: []string{QueryPriority, BytePriority}, - writeLeader: []string{KeyPriority, BytePriority}, - writePeer: []string{BytePriority, KeyPriority}, + read: []string{statistics.QueryPriority, statistics.BytePriority}, + writeLeader: []string{statistics.KeyPriority, statistics.BytePriority}, + writePeer: []string{statistics.BytePriority, statistics.KeyPriority}, } // because tikv below 5.2.0 does not report query information, we will use byte and key as the scheduling dimensions var compatiblePrioritiesConfig = prioritiesConfig{ - read: []string{BytePriority, KeyPriority}, - writeLeader: []string{KeyPriority, BytePriority}, - writePeer: []string{BytePriority, KeyPriority}, + read: []string{statistics.BytePriority, statistics.KeyPriority}, + writeLeader: []string{statistics.KeyPriority, statistics.BytePriority}, + writePeer: []string{statistics.BytePriority, statistics.KeyPriority}, } // params about hot region. @@ -344,7 +338,7 @@ func (conf *hotRegionSchedulerConfig) handleGetConfig(w http.ResponseWriter, r * func isPriorityValid(priorities []string) (map[string]bool, error) { priorityMap := map[string]bool{} for _, p := range priorities { - if p != BytePriority && p != KeyPriority && p != QueryPriority { + if p != statistics.BytePriority && p != statistics.KeyPriority && p != statistics.QueryPriority { return nil, errs.ErrSchedulerConfig.FastGenByArgs("invalid scheduling dimensions") } priorityMap[p] = true @@ -367,8 +361,8 @@ func (conf *hotRegionSchedulerConfig) valid() error { } if pm, err := isPriorityValid(conf.WritePeerPriorities); err != nil { return err - } else if pm[QueryPriority] { - return errs.ErrSchedulerConfig.FastGenByArgs("qps is not allowed to be set in priorities for write-peer-priorities") + } else if pm[statistics.QueryPriority] { + return errs.ErrSchedulerConfig.FastGenByArgs("query is not allowed to be set in priorities for write-peer-priorities") } if conf.RankFormulaVersion != "" && conf.RankFormulaVersion != "v1" && conf.RankFormulaVersion != "v2" { @@ -478,7 +472,7 @@ func getWritePeerPriorities(c *prioritiesConfig) []string { // because tikv below 5.2.0 does not report query information, we will use byte and key as the scheduling dimensions func adjustPrioritiesConfig(querySupport bool, origins []string, getPriorities func(*prioritiesConfig) []string) []string { withQuery := slice.AnyOf(origins, func(i int) bool { - return origins[i] == QueryPriority + return origins[i] == statistics.QueryPriority }) compatibles := getPriorities(&compatiblePrioritiesConfig) if !querySupport && withQuery { @@ -487,7 +481,7 @@ func adjustPrioritiesConfig(querySupport bool, origins []string, getPriorities f defaults := getPriorities(&defaultPrioritiesConfig) isLegal := slice.AllOf(origins, func(i int) bool { - return origins[i] == BytePriority || origins[i] == KeyPriority || origins[i] == QueryPriority + return origins[i] == statistics.BytePriority || origins[i] == statistics.KeyPriority || origins[i] == statistics.QueryPriority }) if len(defaults) == len(origins) && isLegal && origins[0] != origins[1] { return origins diff --git a/server/schedulers/hot_region_test.go b/server/schedulers/hot_region_test.go index 5f84f2db0cd..239c2388bb2 100644 --- a/server/schedulers/hot_region_test.go +++ b/server/schedulers/hot_region_test.go @@ -559,7 +559,7 @@ func TestHotWriteRegionScheduleWithQuery(t *testing.T) { re.NoError(err) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{QueryPriority, BytePriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.QueryPriority, statistics.BytePriority} tc := mockcluster.NewCluster(ctx, opt) tc.SetHotRegionCacheHitsThreshold(0) @@ -968,7 +968,7 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { scheduler, err := schedule.CreateScheduler(statistics.Read.String(), schedule.NewOperatorController(ctx, nil, nil), storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb := scheduler.(*hotScheduler) - hb.conf.ReadPriorities = []string{BytePriority, KeyPriority} + hb.conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} tc.SetHotRegionCacheHitsThreshold(0) // Add stores 1, 2, 3, 4, 5 with region counts 3, 2, 2, 2, 0. @@ -1122,7 +1122,7 @@ func TestHotReadRegionScheduleWithKeyRate(t *testing.T) { re.NoError(err) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) - hb.(*hotScheduler).conf.ReadPriorities = []string{BytePriority, KeyPriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} tc := mockcluster.NewCluster(ctx, opt) tc.SetHotRegionCacheHitsThreshold(0) @@ -1180,7 +1180,7 @@ func TestHotReadRegionScheduleWithPendingInfluence(t *testing.T) { hb.(*hotScheduler).conf.GreatDecRatio = 0.99 hb.(*hotScheduler).conf.MinorDecRatio = 1 hb.(*hotScheduler).conf.DstToleranceRatio = 1 - hb.(*hotScheduler).conf.ReadPriorities = []string{BytePriority, KeyPriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} old := pendingAmpFactor pendingAmpFactor = 0.0 defer func() { @@ -1299,7 +1299,7 @@ func TestHotReadWithEvictLeaderScheduler(t *testing.T) { hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetStrictPickingStore(false) - hb.(*hotScheduler).conf.ReadPriorities = []string{BytePriority, KeyPriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} tc := mockcluster.NewCluster(ctx, opt) tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) @@ -1854,7 +1854,7 @@ func TestHotReadPeerSchedule(t *testing.T) { sche, err := schedule.CreateScheduler(statistics.Read.String(), schedule.NewOperatorController(ctx, tc, nil), storage.NewStorageWithMemoryBackend(), schedule.ConfigJSONDecoder([]byte("null"))) re.NoError(err) hb := sche.(*hotScheduler) - hb.conf.ReadPriorities = []string{BytePriority, KeyPriority} + hb.conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} tc.UpdateStorageReadStats(1, 20*units.MiB, 20*units.MiB) tc.UpdateStorageReadStats(2, 19*units.MiB, 19*units.MiB) @@ -1903,12 +1903,12 @@ func TestHotScheduleWithPriority(t *testing.T) { {1, []uint64{1, 2, 3}, 2 * units.MiB, 1 * units.MiB, 0}, {6, []uint64{4, 2, 3}, 1 * units.MiB, 2 * units.MiB, 0}, }) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{BytePriority, KeyPriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} ops, _ := hb.Schedule(tc, false) re.Len(ops, 1) testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 1, 5) clearPendingInfluence(hb.(*hotScheduler)) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{KeyPriority, BytePriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.KeyPriority, statistics.BytePriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) testutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 4, 5) @@ -1925,12 +1925,12 @@ func TestHotScheduleWithPriority(t *testing.T) { addRegionInfo(tc, statistics.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 2 * units.MiB, 2 * units.MiB, 0}, }) - hb.(*hotScheduler).conf.ReadPriorities = []string{BytePriority, KeyPriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) clearPendingInfluence(hb.(*hotScheduler)) - hb.(*hotScheduler).conf.ReadPriorities = []string{KeyPriority, BytePriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.KeyPriority, statistics.BytePriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3) @@ -1944,7 +1944,7 @@ func TestHotScheduleWithPriority(t *testing.T) { tc.UpdateStorageWrittenStats(3, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenStats(4, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenStats(5, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 1*units.MiB*statistics.StoreHeartBeatReportInterval) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{BytePriority, KeyPriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} hb.(*hotScheduler).conf.StrictPickingStore = true ops, _ = hb.Schedule(tc, false) re.Empty(ops) @@ -1959,7 +1959,7 @@ func TestHotScheduleWithPriority(t *testing.T) { tc.UpdateStorageWrittenStats(3, 6*units.MiB*statistics.StoreHeartBeatReportInterval, 6*units.MiB*statistics.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenStats(4, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 10*units.MiB*statistics.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenStats(5, 1*units.MiB*statistics.StoreHeartBeatReportInterval, 1*units.MiB*statistics.StoreHeartBeatReportInterval) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{KeyPriority, BytePriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.KeyPriority, statistics.BytePriority} hb.(*hotScheduler).conf.StrictPickingStore = true ops, _ = hb.Schedule(tc, false) re.Empty(ops) @@ -1999,7 +1999,7 @@ func TestHotScheduleWithStddev(t *testing.T) { addRegionInfo(tc, statistics.Write, []testRegionInfo{ {6, []uint64{3, 4, 2}, 0.1 * units.MiB, 0.1 * units.MiB, 0}, }) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{BytePriority, KeyPriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} stddevThreshold = 0.1 ops, _ := hb.Schedule(tc, false) re.Empty(ops) @@ -2018,7 +2018,7 @@ func TestHotScheduleWithStddev(t *testing.T) { addRegionInfo(tc, statistics.Write, []testRegionInfo{ {6, []uint64{3, 4, 2}, 0.1 * units.MiB, 0.1 * units.MiB, 0}, }) - hb.(*hotScheduler).conf.WritePeerPriorities = []string{BytePriority, KeyPriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} stddevThreshold = 0.1 ops, _ = hb.Schedule(tc, false) re.Empty(ops) @@ -2062,11 +2062,11 @@ func TestHotWriteLeaderScheduleWithPriority(t *testing.T) { defer func() { schedulePeerPr, pendingAmpFactor = old1, old2 }() - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{KeyPriority, BytePriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority} ops, _ := hb.Schedule(tc, false) re.Len(ops, 1) testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 2) - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{BytePriority, KeyPriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.BytePriority, statistics.KeyPriority} ops, _ = hb.Schedule(tc, false) re.Len(ops, 1) testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3) @@ -2089,8 +2089,8 @@ func TestCompatibility(t *testing.T) { }) // config error value hb.(*hotScheduler).conf.ReadPriorities = []string{"error"} - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{"error", BytePriority} - hb.(*hotScheduler).conf.WritePeerPriorities = []string{QueryPriority, BytePriority, KeyPriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{"error", statistics.BytePriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.QueryPriority, statistics.BytePriority, statistics.KeyPriority} checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ {statistics.QueryDim, statistics.ByteDim}, {statistics.KeyDim, statistics.ByteDim}, @@ -2104,18 +2104,18 @@ func TestCompatibility(t *testing.T) { {statistics.ByteDim, statistics.KeyDim}, }) // config byte and key - hb.(*hotScheduler).conf.ReadPriorities = []string{KeyPriority, BytePriority} - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{BytePriority, KeyPriority} - hb.(*hotScheduler).conf.WritePeerPriorities = []string{KeyPriority, BytePriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.KeyPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.BytePriority, statistics.KeyPriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.KeyPriority, statistics.BytePriority} checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ {statistics.KeyDim, statistics.ByteDim}, {statistics.ByteDim, statistics.KeyDim}, {statistics.KeyDim, statistics.ByteDim}, }) // config query in low version - hb.(*hotScheduler).conf.ReadPriorities = []string{QueryPriority, BytePriority} - hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{QueryPriority, BytePriority} - hb.(*hotScheduler).conf.WritePeerPriorities = []string{QueryPriority, BytePriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.QueryPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.QueryPriority, statistics.BytePriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.QueryPriority, statistics.BytePriority} checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ {statistics.ByteDim, statistics.KeyDim}, {statistics.KeyDim, statistics.ByteDim}, @@ -2124,7 +2124,7 @@ func TestCompatibility(t *testing.T) { // config error value hb.(*hotScheduler).conf.ReadPriorities = []string{"error", "error"} hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{} - hb.(*hotScheduler).conf.WritePeerPriorities = []string{QueryPriority, BytePriority, KeyPriority} + hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.QueryPriority, statistics.BytePriority, statistics.KeyPriority} checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{ {statistics.ByteDim, statistics.KeyDim}, {statistics.KeyDim, statistics.ByteDim}, @@ -2229,7 +2229,7 @@ func TestConfigValidation(t *testing.T) { err = hc.valid() re.Error(err) - // qps is not allowed to be set in priorities for write-peer-priorities + // query is not allowed to be set in priorities for write-peer-priorities hc = initHotRegionScheduleConfig() hc.WritePeerPriorities = []string{"query", "byte"} err = hc.valid() diff --git a/server/schedulers/hot_region_v2.go b/server/schedulers/hot_region_v2.go index 591d616af3c..2de36b8bea8 100644 --- a/server/schedulers/hot_region_v2.go +++ b/server/schedulers/hot_region_v2.go @@ -19,6 +19,8 @@ package schedulers import ( "fmt" "math" + + "github.com/tikv/pd/server/statistics" ) const ( @@ -98,11 +100,11 @@ func (bs *balanceSolver) filterUniformStoreV2() (string, bool) { } if isUniformFirstPriority && (bs.cur.progressiveRank == -2 || bs.cur.progressiveRank == -3) { // If first priority dim is enough uniform, -2 is unnecessary and maybe lead to worse balance for second priority dim - return dimToString(bs.firstPriority), true + return statistics.DimToString(bs.firstPriority), true } if isUniformSecondPriority && bs.cur.progressiveRank == -1 { // If second priority dim is enough uniform, -1 is unnecessary and maybe lead to worse balance for first priority dim - return dimToString(bs.secondPriority), true + return statistics.DimToString(bs.secondPriority), true } return "", false } @@ -360,11 +362,11 @@ func (bs *balanceSolver) rankToDimStringV2() string { case -4: return "all" case -3: - return dimToString(bs.firstPriority) + return statistics.DimToString(bs.firstPriority) case -2: - return dimToString(bs.firstPriority) + "-only" + return statistics.DimToString(bs.firstPriority) + "-only" case -1: - return dimToString(bs.secondPriority) + return statistics.DimToString(bs.secondPriority) default: return "none" } diff --git a/server/schedulers/hot_region_v2_test.go b/server/schedulers/hot_region_v2_test.go index 43594025ac5..811e179bf10 100644 --- a/server/schedulers/hot_region_v2_test.go +++ b/server/schedulers/hot_region_v2_test.go @@ -51,7 +51,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { tc.AddRegionStore(3, 20) tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - hb.conf.WritePeerPriorities = []string{BytePriority, KeyPriority} + hb.conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} tc.UpdateStorageWrittenStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenStats(2, 16*units.MiB*statistics.StoreHeartBeatReportInterval, 20*units.MiB*statistics.StoreHeartBeatReportInterval) @@ -114,7 +114,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { tc.AddRegionStore(3, 20) tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - hb.conf.WritePeerPriorities = []string{BytePriority, KeyPriority} + hb.conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} tc.UpdateStorageWrittenStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenStats(2, 20*units.MiB*statistics.StoreHeartBeatReportInterval, 14*units.MiB*statistics.StoreHeartBeatReportInterval) @@ -168,7 +168,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { tc.AddRegionStore(3, 20) tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - hb.conf.WritePeerPriorities = []string{BytePriority, KeyPriority} + hb.conf.WritePeerPriorities = []string{statistics.BytePriority, statistics.KeyPriority} tc.UpdateStorageWrittenStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) tc.UpdateStorageWrittenStats(2, 20*units.MiB*statistics.StoreHeartBeatReportInterval, 14*units.MiB*statistics.StoreHeartBeatReportInterval) @@ -231,7 +231,7 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { tc.AddRegionStore(3, 20) tc.AddRegionStore(4, 20) tc.AddRegionStore(5, 20) - hb.conf.ReadPriorities = []string{BytePriority, KeyPriority} + hb.conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} tc.UpdateStorageReadStats(1, 15*units.MiB*statistics.StoreHeartBeatReportInterval, 15*units.MiB*statistics.StoreHeartBeatReportInterval) tc.UpdateStorageReadStats(2, 16*units.MiB*statistics.StoreHeartBeatReportInterval, 20*units.MiB*statistics.StoreHeartBeatReportInterval) @@ -284,7 +284,7 @@ func TestSkipUniformStore(t *testing.T) { hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetRankFormulaVersion("v2") - hb.(*hotScheduler).conf.ReadPriorities = []string{BytePriority, KeyPriority} + hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority} tc := mockcluster.NewCluster(ctx, opt) tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) diff --git a/server/statistics/collector.go b/server/statistics/collector.go index e904be43bed..73fff9b4c84 100644 --- a/server/statistics/collector.go +++ b/server/statistics/collector.go @@ -62,7 +62,7 @@ func (c tikvCollector) GetLoads(storeLoads, peerLoadSum []float64, rwTy RWType, // Use sum of hot peers to estimate leader-only byte rate. // For Write requests, Write{Bytes, Keys} is applied to all Peers at the same time, // while the Leader and Follower are under different loads (usually the Leader consumes more CPU). - // Write{QPS} does not require such processing. + // Write{Query} does not require such processing. loads[ByteDim] = peerLoadSum[ByteDim] loads[KeyDim] = peerLoadSum[KeyDim] loads[QueryDim] = storeLoads[StoreWriteQuery] diff --git a/server/statistics/hot_peer.go b/server/statistics/hot_peer.go index 85bdd7da403..6a028c0758f 100644 --- a/server/statistics/hot_peer.go +++ b/server/statistics/hot_peer.go @@ -24,14 +24,6 @@ import ( "go.uber.org/zap" ) -// Indicator dims. -const ( - ByteDim int = iota - KeyDim - QueryDim - DimLen -) - type dimStat struct { typ RegionStatKind rolling *movingaverage.TimeMedian // it's used to statistic hot degree and average speed. diff --git a/server/statistics/kind.go b/server/statistics/kind.go index d84eaf4e3f3..f9cd78a7626 100644 --- a/server/statistics/kind.go +++ b/server/statistics/kind.go @@ -18,6 +18,50 @@ import ( "github.com/tikv/pd/server/core" ) +const ( + // BytePriority indicates hot-region-scheduler prefer byte dim + BytePriority = "byte" + // KeyPriority indicates hot-region-scheduler prefer key dim + KeyPriority = "key" + // QueryPriority indicates hot-region-scheduler prefer query dim + QueryPriority = "query" +) + +// Indicator dims. +const ( + ByteDim int = iota + KeyDim + QueryDim + DimLen +) + +// StringToDim return dim according to string. +func StringToDim(name string) int { + switch name { + case BytePriority: + return ByteDim + case KeyPriority: + return KeyDim + case QueryPriority: + return QueryDim + } + return ByteDim +} + +// DimToString return string according to dim. +func DimToString(dim int) string { + switch dim { + case ByteDim: + return BytePriority + case KeyDim: + return KeyPriority + case QueryDim: + return QueryPriority + default: + return "" + } +} + // RegionStatKind represents the statistics type of region. type RegionStatKind int @@ -159,10 +203,9 @@ func (rw RWType) Inverse() RWType { switch rw { case Write: return Read - case Read: + default: // Case Read return Write } - return Read } // GetLoadRatesFromPeer gets the load rates of the read or write type from PeerInfo. diff --git a/tools/pd-ctl/pdctl/command/scheduler.go b/tools/pd-ctl/pdctl/command/scheduler.go index f0f8eb62065..f720273983e 100644 --- a/tools/pd-ctl/pdctl/command/scheduler.go +++ b/tools/pd-ctl/pdctl/command/scheduler.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/errors" "github.com/spf13/cobra" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/statistics" ) var ( @@ -684,15 +684,15 @@ func postSchedulerConfigCommandFunc(cmd *cobra.Command, schedulerName string, ar priorities := make([]string, 0) prioritiesMap := make(map[string]struct{}) for _, priority := range strings.Split(value, ",") { - if priority != schedulers.BytePriority && priority != schedulers.KeyPriority && priority != schedulers.QueryPriority { + if priority != statistics.BytePriority && priority != statistics.KeyPriority && priority != statistics.QueryPriority { cmd.Println(fmt.Sprintf("priority should be one of [%s, %s, %s]", - schedulers.BytePriority, - schedulers.QueryPriority, - schedulers.KeyPriority)) + statistics.BytePriority, + statistics.QueryPriority, + statistics.KeyPriority)) return } - if priority == schedulers.QueryPriority && key == "write-peer-priorities" { - cmd.Println("qps is not allowed to be set in priorities for write-peer-priorities") + if priority == statistics.QueryPriority && key == "write-peer-priorities" { + cmd.Println("query is not allowed to be set in priorities for write-peer-priorities") return } priorities = append(priorities, priority)