Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions server/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -680,7 +680,7 @@ func (c *RaftCluster) processRegionHeartbeat(region *core.RegionInfo) error {
}

if c.regionStats != nil {
c.regionStats.Observe(region, c.takeRegionStoresLocked(region))
c.regionStats.Observe(region, c.getRegionStoresLocked(region))
}

for _, writeItem := range writeItems {
Expand Down Expand Up @@ -1339,14 +1339,14 @@ func (c *RaftCluster) updateRegionsLabelLevelStats(regions []*core.RegionInfo) {
c.Lock()
defer c.Unlock()
for _, region := range regions {
c.labelLevelStats.Observe(region, c.takeRegionStoresLocked(region), c.opt.GetLocationLabels())
c.labelLevelStats.Observe(region, c.getRegionStoresLocked(region), c.opt.GetLocationLabels())
}
}

func (c *RaftCluster) takeRegionStoresLocked(region *core.RegionInfo) []*core.StoreInfo {
func (c *RaftCluster) getRegionStoresLocked(region *core.RegionInfo) []*core.StoreInfo {
stores := make([]*core.StoreInfo, 0, len(region.GetPeers()))
for _, p := range region.GetPeers() {
if store := c.core.TakeStore(p.StoreId); store != nil {
if store := c.core.GetStore(p.StoreId); store != nil {
stores = append(stores, store)
}
}
Expand Down
7 changes: 0 additions & 7 deletions server/core/basic_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -282,13 +282,6 @@ func (bc *BasicCluster) DeleteStore(store *StoreInfo) {
bc.Stores.DeleteStore(store)
}

// TakeStore returns the point of the origin StoreInfo with the specified storeID.
func (bc *BasicCluster) TakeStore(storeID uint64) *StoreInfo {
bc.RLock()
defer bc.RUnlock()
return bc.Stores.TakeStore(storeID)
}

// PreCheckPutRegion checks if the region is valid to put.
func (bc *BasicCluster) PreCheckPutRegion(region *RegionInfo) (*RegionInfo, error) {
bc.RLock()
Expand Down
9 changes: 0 additions & 9 deletions server/core/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -515,15 +515,6 @@ func (s *StoresInfo) GetStore(storeID uint64) *StoreInfo {
return store
}

// TakeStore returns the point of the origin StoreInfo with the specified storeID.
func (s *StoresInfo) TakeStore(storeID uint64) *StoreInfo {
store, ok := s.stores[storeID]
if !ok {
return nil
}
return store
}

// SetStore sets a StoreInfo with storeID.
func (s *StoresInfo) SetStore(store *StoreInfo) {
s.stores[store.GetID()] = store
Expand Down
4 changes: 2 additions & 2 deletions server/schedule/checker/rule_checker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ func (s *testRuleCheckerSuite) TestIssue2419(c *C) {
// Ref https://github.com/tikv/pd/issues/3521
// The problem is when offline a store, we may add learner multiple times if
// the operator is timeout.
func (s *testRuleCheckerSuite) TestIssue3521_PriorityFixOrphanPeer(c *C) {
func (s *testRuleCheckerSuite) TestPriorityFixOrphanPeer(c *C) {
s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"})
s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"})
s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"})
Expand Down Expand Up @@ -392,7 +392,7 @@ func (s *testRuleCheckerSuite) TestIssue3293(c *C) {
},
})
c.Assert(err, IsNil)
s.cluster.DeleteStore(s.cluster.TakeStore(5))
s.cluster.DeleteStore(s.cluster.GetStore(5))
err = s.ruleManager.SetRule(&placement.Rule{
GroupID: "TiDB_DDL_51",
ID: "default",
Expand Down
94 changes: 32 additions & 62 deletions server/schedule/operator_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -641,85 +641,29 @@ func (oc *OperatorController) SendScheduleCommand(region *core.RegionInfo, step
// The newly added peer is pending.
return
}
cmd = &pdpb.RegionHeartbeatResponse{
ChangePeer: &pdpb.ChangePeer{
ChangeType: eraftpb.ConfChangeType_AddNode,
Peer: &metapb.Peer{
Id: st.PeerID,
StoreId: st.ToStore,
Role: metapb.PeerRole_Voter,
},
},
}
cmd = addNode(st.PeerID, st.ToStore)
case operator.AddLightPeer:
if region.GetStorePeer(st.ToStore) != nil {
// The newly added peer is pending.
return
}
cmd = &pdpb.RegionHeartbeatResponse{
ChangePeer: &pdpb.ChangePeer{
ChangeType: eraftpb.ConfChangeType_AddNode,
Peer: &metapb.Peer{
Id: st.PeerID,
StoreId: st.ToStore,
Role: metapb.PeerRole_Voter,
},
},
}
cmd = addNode(st.PeerID, st.ToStore)
case operator.AddLearner:
if region.GetStorePeer(st.ToStore) != nil {
// The newly added peer is pending.
return
}
cmd = &pdpb.RegionHeartbeatResponse{
ChangePeer: &pdpb.ChangePeer{
ChangeType: eraftpb.ConfChangeType_AddLearnerNode,
Peer: &metapb.Peer{
Id: st.PeerID,
StoreId: st.ToStore,
Role: metapb.PeerRole_Learner,
},
},
}
cmd = addLearnerNode(st.PeerID, st.ToStore)
case operator.AddLightLearner:
if region.GetStorePeer(st.ToStore) != nil {
// The newly added peer is pending.
return
}
cmd = &pdpb.RegionHeartbeatResponse{
ChangePeer: &pdpb.ChangePeer{
ChangeType: eraftpb.ConfChangeType_AddLearnerNode,
Peer: &metapb.Peer{
Id: st.PeerID,
StoreId: st.ToStore,
Role: metapb.PeerRole_Learner,
},
},
}
cmd = addLearnerNode(st.PeerID, st.ToStore)
case operator.PromoteLearner:
cmd = &pdpb.RegionHeartbeatResponse{
ChangePeer: &pdpb.ChangePeer{
// reuse AddNode type
ChangeType: eraftpb.ConfChangeType_AddNode,
Peer: &metapb.Peer{
Id: st.PeerID,
StoreId: st.ToStore,
Role: metapb.PeerRole_Voter,
},
},
}
cmd = addNode(st.PeerID, st.ToStore)
case operator.DemoteFollower:
cmd = &pdpb.RegionHeartbeatResponse{
ChangePeer: &pdpb.ChangePeer{
// reuse AddLearnerNode type
ChangeType: eraftpb.ConfChangeType_AddLearnerNode,
Peer: &metapb.Peer{
Id: st.PeerID,
StoreId: st.ToStore,
Role: metapb.PeerRole_Learner,
},
},
}
cmd = addLearnerNode(st.PeerID, st.ToStore)
case operator.RemovePeer:
cmd = &pdpb.RegionHeartbeatResponse{
ChangePeer: &pdpb.ChangePeer{
Expand Down Expand Up @@ -758,6 +702,32 @@ func (oc *OperatorController) SendScheduleCommand(region *core.RegionInfo, step
oc.hbStreams.SendMsg(region, cmd)
}

func addNode(id, storeID uint64) *pdpb.RegionHeartbeatResponse {
return &pdpb.RegionHeartbeatResponse{
ChangePeer: &pdpb.ChangePeer{
ChangeType: eraftpb.ConfChangeType_AddNode,
Peer: &metapb.Peer{
Id: id,
StoreId: storeID,
Role: metapb.PeerRole_Voter,
},
},
}
}

func addLearnerNode(id, storeID uint64) *pdpb.RegionHeartbeatResponse {
return &pdpb.RegionHeartbeatResponse{
ChangePeer: &pdpb.ChangePeer{
ChangeType: eraftpb.ConfChangeType_AddLearnerNode,
Peer: &metapb.Peer{
Id: id,
StoreId: storeID,
Role: metapb.PeerRole_Learner,
},
},
}
}

func (oc *OperatorController) pushHistory(op *operator.Operator) {
oc.Lock()
defer oc.Unlock()
Expand Down
1 change: 1 addition & 0 deletions server/statistics/hot_peer_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,7 @@ func (t *testHotPeerCache) TestThresholdWithUpdateHotPeerStat(c *C) {
t.testMetrics(c, 17., byteRate, expectThreshold)
t.testMetrics(c, 1., byteRate, expectThreshold)
}

func (t *testHotPeerCache) testMetrics(c *C, interval, byteRate, expectThreshold float64) {
cache := NewHotStoresStats(ReadFlow)
minThresholds := minHotThresholds[cache.kind]
Expand Down