Skip to content

Commit 368f7c7

Browse files
HunDunDMti-chi-bot
authored andcommitted
This is an automated cherry-pick of tikv#5920
ref tikv#4570, close tikv#5909 Signed-off-by: ti-chi-bot <ti-community-prow-bot@tidb.io>
1 parent bb3953a commit 368f7c7

File tree

2 files changed

+131
-0
lines changed

2 files changed

+131
-0
lines changed

server/schedule/region_scatterer.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -310,6 +310,7 @@ func (r *RegionScatterer) scatterRegion(region *core.RegionInfo, group string) *
310310
// it is considered that the selected peer select itself.
311311
// This origin peer re-selects.
312312
if _, ok := peers[newPeer.GetStoreId()]; !ok || peer.GetStoreId() == newPeer.GetStoreId() {
313+
selectedStores[peer.GetStoreId()] = struct{}{}
313314
break
314315
}
315316
}

server/schedule/region_scatterer_test.go

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -492,9 +492,102 @@ func (s *testScatterRegionSuite) TestRegionFromDifferentGroups(c *C) {
492492
check(scatterer.ordinaryEngine.selectedPeer)
493493
}
494494

495+
<<<<<<< HEAD
495496
// TestSelectedStores tests if the peer count has changed due to the picking strategy.
496497
// Ref https://github.com/tikv/pd/issues/4565
497498
func (s *testScatterRegionSuite) TestSelectedStores(c *C) {
499+
=======
500+
func TestRegionHasLearner(t *testing.T) {
501+
re := require.New(t)
502+
ctx, cancel := context.WithCancel(context.Background())
503+
defer cancel()
504+
opt := config.NewTestOptions()
505+
tc := mockcluster.NewCluster(ctx, opt)
506+
stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false)
507+
oc := NewOperatorController(ctx, tc, stream)
508+
// Add 8 stores.
509+
voterCount := uint64(6)
510+
storeCount := uint64(8)
511+
for i := uint64(1); i <= voterCount; i++ {
512+
tc.AddLabelsStore(i, 0, map[string]string{"zone": "z1"})
513+
}
514+
for i := voterCount + 1; i <= 8; i++ {
515+
tc.AddLabelsStore(i, 0, map[string]string{"zone": "z2"})
516+
}
517+
tc.RuleManager.SetRule(&placement.Rule{
518+
GroupID: "pd",
519+
ID: "default",
520+
Role: placement.Voter,
521+
Count: 3,
522+
LabelConstraints: []placement.LabelConstraint{
523+
{
524+
Key: "zone",
525+
Op: placement.In,
526+
Values: []string{"z1"},
527+
},
528+
},
529+
})
530+
tc.RuleManager.SetRule(&placement.Rule{
531+
GroupID: "pd",
532+
ID: "learner",
533+
Role: placement.Learner,
534+
Count: 1,
535+
LabelConstraints: []placement.LabelConstraint{
536+
{
537+
Key: "zone",
538+
Op: placement.In,
539+
Values: []string{"z2"},
540+
},
541+
},
542+
})
543+
scatterer := NewRegionScatterer(ctx, tc, oc)
544+
regionCount := 50
545+
for i := 1; i <= regionCount; i++ {
546+
_, err := scatterer.Scatter(tc.AddRegionWithLearner(uint64(i), uint64(1), []uint64{uint64(2), uint64(3)}, []uint64{7}), "group")
547+
re.NoError(err)
548+
}
549+
check := func(ss *selectedStores) {
550+
max := uint64(0)
551+
min := uint64(math.MaxUint64)
552+
for i := uint64(1); i <= max; i++ {
553+
count := ss.TotalCountByStore(i)
554+
if count > max {
555+
max = count
556+
}
557+
if count < min {
558+
min = count
559+
}
560+
}
561+
re.LessOrEqual(max-min, uint64(2))
562+
}
563+
check(scatterer.ordinaryEngine.selectedPeer)
564+
checkLeader := func(ss *selectedStores) {
565+
max := uint64(0)
566+
min := uint64(math.MaxUint64)
567+
for i := uint64(1); i <= voterCount; i++ {
568+
count := ss.TotalCountByStore(i)
569+
if count > max {
570+
max = count
571+
}
572+
if count < min {
573+
min = count
574+
}
575+
}
576+
re.LessOrEqual(max-2, uint64(regionCount)/voterCount)
577+
re.LessOrEqual(min-1, uint64(regionCount)/voterCount)
578+
for i := voterCount + 1; i <= storeCount; i++ {
579+
count := ss.TotalCountByStore(i)
580+
re.LessOrEqual(count, uint64(0))
581+
}
582+
}
583+
checkLeader(scatterer.ordinaryEngine.selectedLeader)
584+
}
585+
586+
// TestSelectedStoresTooFewPeers tests if the peer count has changed due to the picking strategy.
587+
// Ref https://github.com/tikv/pd/issues/4565
588+
func TestSelectedStoresTooFewPeers(t *testing.T) {
589+
re := require.New(t)
590+
>>>>>>> f5b5391c0 (region_scatterer: fix the bug that could generate schedule with too many peers (#5920))
498591
ctx, cancel := context.WithCancel(context.Background())
499592
defer cancel()
500593
opt := config.NewTestOptions()
@@ -526,6 +619,43 @@ func (s *testScatterRegionSuite) TestSelectedStores(c *C) {
526619
}
527620
}
528621

622+
// TestSelectedStoresTooManyPeers tests if the peer count has changed due to the picking strategy.
623+
// Ref https://github.com/tikv/pd/issues/5909
624+
func TestSelectedStoresTooManyPeers(t *testing.T) {
625+
re := require.New(t)
626+
ctx, cancel := context.WithCancel(context.Background())
627+
defer cancel()
628+
opt := config.NewTestOptions()
629+
tc := mockcluster.NewCluster(ctx, opt)
630+
stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false)
631+
oc := NewOperatorController(ctx, tc, stream)
632+
// Add 4 stores.
633+
for i := uint64(1); i <= 5; i++ {
634+
tc.AddRegionStore(i, 0)
635+
// prevent store from being disconnected
636+
tc.SetStoreLastHeartbeatInterval(i, -10*time.Minute)
637+
}
638+
group := "group"
639+
scatterer := NewRegionScatterer(ctx, tc, oc)
640+
// priority 4 > 1 > 5 > 2 == 3
641+
for i := 0; i < 1200; i++ {
642+
scatterer.ordinaryEngine.selectedPeer.Put(2, group)
643+
scatterer.ordinaryEngine.selectedPeer.Put(3, group)
644+
}
645+
for i := 0; i < 800; i++ {
646+
scatterer.ordinaryEngine.selectedPeer.Put(5, group)
647+
}
648+
for i := 0; i < 400; i++ {
649+
scatterer.ordinaryEngine.selectedPeer.Put(1, group)
650+
}
651+
// test region with peer 1 2 3
652+
for i := uint64(1); i < 20; i++ {
653+
region := tc.AddLeaderRegion(i+200, i%3+1, (i+1)%3+1, (i+2)%3+1)
654+
op := scatterer.scatterRegion(region, group)
655+
re.False(isPeerCountChanged(op))
656+
}
657+
}
658+
529659
func isPeerCountChanged(op *operator.Operator) bool {
530660
if op == nil {
531661
return false

0 commit comments

Comments
 (0)