Skip to content

Commit d2c5888

Browse files
HunDunDMti-chi-bot
authored andcommitted
This is an automated cherry-pick of tikv#5920
ref tikv#4570, close tikv#5909 Signed-off-by: ti-chi-bot <ti-community-prow-bot@tidb.io>
1 parent 1ced7dd commit d2c5888

File tree

2 files changed

+131
-0
lines changed

2 files changed

+131
-0
lines changed

server/schedule/region_scatterer.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -309,6 +309,7 @@ func (r *RegionScatterer) scatterRegion(region *core.RegionInfo, group string) *
309309
// it is considered that the selected peer select itself.
310310
// This origin peer re-selects.
311311
if _, ok := peers[newPeer.GetStoreId()]; !ok || peer.GetStoreId() == newPeer.GetStoreId() {
312+
selectedStores[peer.GetStoreId()] = struct{}{}
312313
break
313314
}
314315
}

server/schedule/region_scatterer_test.go

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -476,9 +476,102 @@ func (s *testScatterRegionSuite) TestRegionFromDifferentGroups(c *C) {
476476
check(scatterer.ordinaryEngine.selectedPeer)
477477
}
478478

479+
<<<<<<< HEAD
479480
// TestSelectedStores tests if the peer count has changed due to the picking strategy.
480481
// Ref https://github.com/tikv/pd/issues/4565
481482
func (s *testScatterRegionSuite) TestSelectedStores(c *C) {
483+
=======
484+
func TestRegionHasLearner(t *testing.T) {
485+
re := require.New(t)
486+
ctx, cancel := context.WithCancel(context.Background())
487+
defer cancel()
488+
opt := config.NewTestOptions()
489+
tc := mockcluster.NewCluster(ctx, opt)
490+
stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false)
491+
oc := NewOperatorController(ctx, tc, stream)
492+
// Add 8 stores.
493+
voterCount := uint64(6)
494+
storeCount := uint64(8)
495+
for i := uint64(1); i <= voterCount; i++ {
496+
tc.AddLabelsStore(i, 0, map[string]string{"zone": "z1"})
497+
}
498+
for i := voterCount + 1; i <= 8; i++ {
499+
tc.AddLabelsStore(i, 0, map[string]string{"zone": "z2"})
500+
}
501+
tc.RuleManager.SetRule(&placement.Rule{
502+
GroupID: "pd",
503+
ID: "default",
504+
Role: placement.Voter,
505+
Count: 3,
506+
LabelConstraints: []placement.LabelConstraint{
507+
{
508+
Key: "zone",
509+
Op: placement.In,
510+
Values: []string{"z1"},
511+
},
512+
},
513+
})
514+
tc.RuleManager.SetRule(&placement.Rule{
515+
GroupID: "pd",
516+
ID: "learner",
517+
Role: placement.Learner,
518+
Count: 1,
519+
LabelConstraints: []placement.LabelConstraint{
520+
{
521+
Key: "zone",
522+
Op: placement.In,
523+
Values: []string{"z2"},
524+
},
525+
},
526+
})
527+
scatterer := NewRegionScatterer(ctx, tc, oc)
528+
regionCount := 50
529+
for i := 1; i <= regionCount; i++ {
530+
_, err := scatterer.Scatter(tc.AddRegionWithLearner(uint64(i), uint64(1), []uint64{uint64(2), uint64(3)}, []uint64{7}), "group")
531+
re.NoError(err)
532+
}
533+
check := func(ss *selectedStores) {
534+
max := uint64(0)
535+
min := uint64(math.MaxUint64)
536+
for i := uint64(1); i <= max; i++ {
537+
count := ss.TotalCountByStore(i)
538+
if count > max {
539+
max = count
540+
}
541+
if count < min {
542+
min = count
543+
}
544+
}
545+
re.LessOrEqual(max-min, uint64(2))
546+
}
547+
check(scatterer.ordinaryEngine.selectedPeer)
548+
checkLeader := func(ss *selectedStores) {
549+
max := uint64(0)
550+
min := uint64(math.MaxUint64)
551+
for i := uint64(1); i <= voterCount; i++ {
552+
count := ss.TotalCountByStore(i)
553+
if count > max {
554+
max = count
555+
}
556+
if count < min {
557+
min = count
558+
}
559+
}
560+
re.LessOrEqual(max-2, uint64(regionCount)/voterCount)
561+
re.LessOrEqual(min-1, uint64(regionCount)/voterCount)
562+
for i := voterCount + 1; i <= storeCount; i++ {
563+
count := ss.TotalCountByStore(i)
564+
re.LessOrEqual(count, uint64(0))
565+
}
566+
}
567+
checkLeader(scatterer.ordinaryEngine.selectedLeader)
568+
}
569+
570+
// TestSelectedStoresTooFewPeers tests if the peer count has changed due to the picking strategy.
571+
// Ref https://github.com/tikv/pd/issues/4565
572+
func TestSelectedStoresTooFewPeers(t *testing.T) {
573+
re := require.New(t)
574+
>>>>>>> f5b5391c0 (region_scatterer: fix the bug that could generate schedule with too many peers (#5920))
482575
ctx, cancel := context.WithCancel(context.Background())
483576
defer cancel()
484577
opt := config.NewTestOptions()
@@ -510,6 +603,43 @@ func (s *testScatterRegionSuite) TestSelectedStores(c *C) {
510603
}
511604
}
512605

606+
// TestSelectedStoresTooManyPeers tests if the peer count has changed due to the picking strategy.
607+
// Ref https://github.com/tikv/pd/issues/5909
608+
func TestSelectedStoresTooManyPeers(t *testing.T) {
609+
re := require.New(t)
610+
ctx, cancel := context.WithCancel(context.Background())
611+
defer cancel()
612+
opt := config.NewTestOptions()
613+
tc := mockcluster.NewCluster(ctx, opt)
614+
stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false)
615+
oc := NewOperatorController(ctx, tc, stream)
616+
// Add 4 stores.
617+
for i := uint64(1); i <= 5; i++ {
618+
tc.AddRegionStore(i, 0)
619+
// prevent store from being disconnected
620+
tc.SetStoreLastHeartbeatInterval(i, -10*time.Minute)
621+
}
622+
group := "group"
623+
scatterer := NewRegionScatterer(ctx, tc, oc)
624+
// priority 4 > 1 > 5 > 2 == 3
625+
for i := 0; i < 1200; i++ {
626+
scatterer.ordinaryEngine.selectedPeer.Put(2, group)
627+
scatterer.ordinaryEngine.selectedPeer.Put(3, group)
628+
}
629+
for i := 0; i < 800; i++ {
630+
scatterer.ordinaryEngine.selectedPeer.Put(5, group)
631+
}
632+
for i := 0; i < 400; i++ {
633+
scatterer.ordinaryEngine.selectedPeer.Put(1, group)
634+
}
635+
// test region with peer 1 2 3
636+
for i := uint64(1); i < 20; i++ {
637+
region := tc.AddLeaderRegion(i+200, i%3+1, (i+1)%3+1, (i+2)%3+1)
638+
op := scatterer.scatterRegion(region, group)
639+
re.False(isPeerCountChanged(op))
640+
}
641+
}
642+
513643
func isPeerCountChanged(op *operator.Operator) bool {
514644
if op == nil {
515645
return false

0 commit comments

Comments
 (0)