Skip to content

Commit 9b040a3

Browse files
committed
server: rename raft cluster start log prefix
Signed-off-by: bufferflies <1045931706@qq.com>
1 parent 3e30a4e commit 9b040a3

File tree

2 files changed

+53
-53
lines changed

2 files changed

+53
-53
lines changed

server/cluster/cluster.go

Lines changed: 32 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -344,24 +344,24 @@ func (c *RaftCluster) Start(s Server, bootstrap bool) (err error) {
344344
return nil
345345
}
346346
c.isKeyspaceGroupEnabled = s.IsKeyspaceGroupEnabled()
347-
log.Info("[leader-ready] start to initialize cluster")
347+
log.Info("[raft-cluster-start] start to initialize cluster")
348348
initClusterStart := time.Now()
349349
err = c.InitCluster(s.GetAllocator(), s.GetPersistOptions(), s.GetHBStreams(), s.GetKeyspaceGroupManager())
350350
if err != nil {
351-
log.Warn("[leader-ready] failed to initialize cluster", errs.ZapError(err), zap.Duration("cost", time.Since(initClusterStart)))
351+
log.Warn("[raft-cluster-start] failed to initialize cluster", errs.ZapError(err), zap.Duration("cost", time.Since(initClusterStart)))
352352
return err
353353
}
354354
initClusterDuration := time.Since(initClusterStart)
355-
log.Info("[leader-ready] initialize cluster completed", zap.Duration("cost", initClusterDuration))
355+
log.Info("[raft-cluster-start] initialize cluster completed", zap.Duration("cost", initClusterDuration))
356356
// We should not manage tso service when bootstrap try to start raft cluster.
357357
// It only is controlled by leader election.
358358
// Ref: https://github.com/tikv/pd/issues/8836
359359
if !bootstrap {
360-
log.Info("[leader-ready] start to check TSO service")
360+
log.Info("[raft-cluster-start] start to check TSO service")
361361
checkTSOStart := time.Now()
362362
c.checkTSOService()
363363
checkTSODuration := time.Since(checkTSOStart)
364-
log.Info("[leader-ready] check TSO service completed", zap.Duration("cost", checkTSODuration))
364+
log.Info("[raft-cluster-start] check TSO service completed", zap.Duration("cost", checkTSODuration))
365365
}
366366
defer func() {
367367
if !bootstrap && err != nil {
@@ -376,39 +376,39 @@ func (c *RaftCluster) Start(s Server, bootstrap bool) (err error) {
376376
}
377377
failpoint.Return(err)
378378
})
379-
log.Info("[leader-ready] start to load cluster info")
379+
log.Info("[raft-cluster-start] start to load cluster info")
380380
loadClusterInfoStart := time.Now()
381381
cluster, err := c.LoadClusterInfo()
382382
if err != nil {
383-
log.Warn("[leader-ready] failed to load cluster info", errs.ZapError(err), zap.Duration("cost", time.Since(loadClusterInfoStart)))
383+
log.Warn("[raft-cluster-start] failed to load cluster info", errs.ZapError(err), zap.Duration("cost", time.Since(loadClusterInfoStart)))
384384
return err
385385
}
386386
if cluster == nil {
387387
loadClusterInfoDuration := time.Since(loadClusterInfoStart)
388-
log.Warn("[leader-ready] cluster is not bootstrapped", zap.Duration("cost", loadClusterInfoDuration))
388+
log.Warn("[raft-cluster-start] cluster is not bootstrapped", zap.Duration("cost", loadClusterInfoDuration))
389389
return nil
390390
}
391391
if c.opt.IsPlacementRulesEnabled() {
392392
ruleInitStart := time.Now()
393393
err := c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels(), c.opt.GetIsolationLevel(), false)
394394
if err != nil {
395-
log.Warn("[leader-ready] failed to initialize placement rules", errs.ZapError(err), zap.Duration("cost", time.Since(ruleInitStart)))
395+
log.Warn("[raft-cluster-start] failed to initialize placement rules", errs.ZapError(err), zap.Duration("cost", time.Since(ruleInitStart)))
396396
return err
397397
}
398-
log.Info("[leader-ready] initialize placement rules completed", zap.Duration("cost", time.Since(ruleInitStart)))
398+
log.Info("[raft-cluster-start] initialize placement rules completed", zap.Duration("cost", time.Since(ruleInitStart)))
399399
}
400400
loadClusterInfoDuration := time.Since(loadClusterInfoStart)
401-
log.Info("[leader-ready] load cluster info completed", zap.Duration("cost", loadClusterInfoDuration))
401+
log.Info("[raft-cluster-start] load cluster info completed", zap.Duration("cost", loadClusterInfoDuration))
402402

403-
log.Info("[leader-ready] creating region labeler")
403+
log.Info("[raft-cluster-start] creating region labeler")
404404
labelerStart := time.Now()
405405
c.regionLabeler, err = labeler.NewRegionLabeler(c.ctx, c.storage, regionLabelGCInterval)
406406
labelerDuration := time.Since(labelerStart)
407407
if err != nil {
408-
log.Warn("[leader-ready] region labeler creation failed", zap.Error(err), zap.Duration("cost", labelerDuration))
408+
log.Warn("[raft-cluster-start] region labeler creation failed", zap.Error(err), zap.Duration("cost", labelerDuration))
409409
return err
410410
}
411-
log.Info("[leader-ready] region labeler created", zap.Duration("cost", labelerDuration))
411+
log.Info("[raft-cluster-start] region labeler created", zap.Duration("cost", labelerDuration))
412412

413413
// create affinity manager with region labeler for key range validation and rebuild
414414
c.affinityManager, err = affinity.NewManager(c.ctx, c.storage, c, c.GetOpts(), c.regionLabeler)
@@ -417,52 +417,52 @@ func (c *RaftCluster) Start(s Server, bootstrap bool) (err error) {
417417
}
418418

419419
if !c.IsServiceIndependent(constant.SchedulingServiceName) {
420-
log.Info("[leader-ready] start to observe slow store status")
420+
log.Info("[raft-cluster-start] start to observe slow store status")
421421
observeSlowStoreStart := time.Now()
422422
for _, store := range c.GetStores() {
423423
storeID := store.GetID()
424424
c.slowStat.ObserveSlowStoreStatus(storeID, store.IsSlow())
425425
}
426426
observeSlowStoreDuration := time.Since(observeSlowStoreStart)
427-
log.Info("[leader-ready] observe slow store status completed", zap.Duration("cost", observeSlowStoreDuration))
427+
log.Info("[raft-cluster-start] observe slow store status completed", zap.Duration("cost", observeSlowStoreDuration))
428428
}
429-
log.Info("[leader-ready] start to create replication mode manager")
429+
log.Info("[raft-cluster-start] start to create replication mode manager")
430430
replicationModeStart := time.Now()
431431
c.replicationMode, err = replication.NewReplicationModeManager(s.GetConfig().ReplicationMode, c.storage, cluster, s)
432432
if err != nil {
433-
log.Warn("[leader-ready] failed to create replication mode manager", errs.ZapError(err), zap.Duration("cost", time.Since(replicationModeStart)))
433+
log.Warn("[raft-cluster-start] failed to create replication mode manager", errs.ZapError(err), zap.Duration("cost", time.Since(replicationModeStart)))
434434
return err
435435
}
436436
replicationModeDuration := time.Since(replicationModeStart)
437-
log.Info("[leader-ready] create replication mode manager completed", zap.Duration("cost", replicationModeDuration))
438-
log.Info("[leader-ready] start to load external timestamp")
437+
log.Info("[raft-cluster-start] create replication mode manager completed", zap.Duration("cost", replicationModeDuration))
438+
log.Info("[raft-cluster-start] start to load external timestamp")
439439
loadExternalTSStart := time.Now()
440440
c.loadExternalTS()
441-
log.Info("[leader-ready] load external timestamp completed", zap.Duration("cost", time.Since(loadExternalTSStart)))
442-
log.Info("[leader-ready] start to load min resolved ts")
441+
log.Info("[raft-cluster-start] load external timestamp completed", zap.Duration("cost", time.Since(loadExternalTSStart)))
442+
log.Info("[raft-cluster-start] start to load min resolved ts")
443443
loadMinResolvedTSStart := time.Now()
444444
c.loadMinResolvedTS()
445-
log.Info("[leader-ready] load min resolved ts completed", zap.Duration("cost", time.Since(loadMinResolvedTSStart)))
445+
log.Info("[raft-cluster-start] load min resolved ts completed", zap.Duration("cost", time.Since(loadMinResolvedTSStart)))
446446

447447
if c.isKeyspaceGroupEnabled {
448448
// bootstrap keyspace group manager after starting other parts successfully.
449449
// This order avoids a stuck goroutine in keyspaceGroupManager when it fails to create raftcluster.
450-
log.Info("[leader-ready] start to bootstrap keyspace group manager")
450+
log.Info("[raft-cluster-start] start to bootstrap keyspace group manager")
451451
bootstrapKeyspaceStart := time.Now()
452452
err = c.keyspaceGroupManager.Bootstrap(c.ctx)
453453
if err != nil {
454-
log.Warn("[leader-ready] failed to bootstrap keyspace group manager", errs.ZapError(err), zap.Duration("cost", time.Since(bootstrapKeyspaceStart)))
454+
log.Warn("[raft-cluster-start] failed to bootstrap keyspace group manager", errs.ZapError(err), zap.Duration("cost", time.Since(bootstrapKeyspaceStart)))
455455
return err
456456
}
457457
bootstrapKeyspaceDuration := time.Since(bootstrapKeyspaceStart)
458-
log.Info("[leader-ready] bootstrap keyspace group manager completed", zap.Duration("cost", bootstrapKeyspaceDuration))
458+
log.Info("[raft-cluster-start] bootstrap keyspace group manager completed", zap.Duration("cost", bootstrapKeyspaceDuration))
459459
}
460-
log.Info("[leader-ready] start to check scheduling service")
460+
log.Info("[raft-cluster-start] start to check scheduling service")
461461
checkSchedulingStart := time.Now()
462462
c.checkSchedulingService()
463463
checkSchedulingDuration := time.Since(checkSchedulingStart)
464-
log.Info("[leader-ready] check scheduling service completed", zap.Duration("cost", checkSchedulingDuration))
465-
log.Info("[leader-ready] start to start background jobs")
464+
log.Info("[raft-cluster-start] check scheduling service completed", zap.Duration("cost", checkSchedulingDuration))
465+
log.Info("[raft-cluster-start] start to start background jobs")
466466
backgroundJobsStart := time.Now()
467467
c.wg.Add(11)
468468
go c.runServiceCheckJob()
@@ -477,17 +477,17 @@ func (c *RaftCluster) Start(s Server, bootstrap bool) (err error) {
477477
go c.startProgressGC()
478478
go c.runStorageSizeCollector(s.GetMeteringWriter(), c.regionLabeler, s.GetKeyspaceManager())
479479
backgroundJobsDuration := time.Since(backgroundJobsStart)
480-
log.Info("[leader-ready] start background jobs completed", zap.Duration("cost", backgroundJobsDuration))
480+
log.Info("[raft-cluster-start] start background jobs completed", zap.Duration("cost", backgroundJobsDuration))
481481

482-
log.Info("[leader-ready] start to start runners")
482+
log.Info("[raft-cluster-start] start to start runners")
483483
runnersStart := time.Now()
484484
c.running = true
485485
c.heartbeatRunner.Start(c.ctx)
486486
c.miscRunner.Start(c.ctx)
487487
c.logRunner.Start(c.ctx)
488488
c.syncRegionRunner.Start(c.ctx)
489489
runnersDuration := time.Since(runnersStart)
490-
log.Info("[leader-ready] start runners completed", zap.Duration("cost", runnersDuration))
490+
log.Info("[raft-cluster-start] start runners completed", zap.Duration("cost", runnersDuration))
491491
return nil
492492
}
493493

server/server.go

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1750,7 +1750,7 @@ func (s *Server) campaignLeader() {
17501750
}
17511751
// Start timing from when leader is successfully elected
17521752
leaderReadyStart := time.Now()
1753-
log.Info("[leader-ready] leader election succeeded, start leader ready process")
1753+
log.Info("[raft-cluster-start] leader election succeeded, start leader ready process")
17541754

17551755
// Start keepalive the leadership and enable TSO service.
17561756
// TSO service is strictly enabled/disabled by PD leader lease for 2 reasons:
@@ -1764,76 +1764,76 @@ func (s *Server) campaignLeader() {
17641764
})
17651765

17661766
// maintain the PD leadership, after this, TSO can be service.
1767-
log.Info("[leader-ready] start to keep leader lease")
1767+
log.Info("[raft-cluster-start] start to keep leader lease")
17681768
keepLeaderStart := time.Now()
17691769
s.member.GetLeadership().Keep(ctx)
17701770
keepLeaderDuration := time.Since(keepLeaderStart)
1771-
log.Info("[leader-ready] keep leader lease completed", zap.Duration("cost", keepLeaderDuration))
1771+
log.Info("[raft-cluster-start] keep leader lease completed", zap.Duration("cost", keepLeaderDuration))
17721772
log.Info("campaign PD leader ok", zap.String("campaign-leader-name", s.Name()))
17731773

17741774
reloadConfigStart := time.Now()
17751775
if err := s.reloadConfigFromKV(); err != nil {
1776-
log.Warn("[leader-ready] failed to reload configuration", errs.ZapError(err), zap.Duration("cost", time.Since(reloadConfigStart)))
1776+
log.Warn("[raft-cluster-start] failed to reload configuration", errs.ZapError(err), zap.Duration("cost", time.Since(reloadConfigStart)))
17771777
return
17781778
}
17791779
reloadConfigDuration := time.Since(reloadConfigStart)
1780-
log.Info("[leader-ready] reload config from KV completed", zap.Duration("cost", reloadConfigDuration))
1780+
log.Info("[raft-cluster-start] reload config from KV completed", zap.Duration("cost", reloadConfigDuration))
17811781

17821782
loadTTLStart := time.Now()
17831783
if err := s.persistOptions.LoadTTLFromEtcd(s.ctx, s.client); err != nil {
1784-
log.Warn("[leader-ready] failed to load persistOptions from etcd", errs.ZapError(err), zap.Duration("cost", time.Since(loadTTLStart)))
1784+
log.Warn("[raft-cluster-start] failed to load persistOptions from etcd", errs.ZapError(err), zap.Duration("cost", time.Since(loadTTLStart)))
17851785
return
17861786
}
17871787
loadTTLDuration := time.Since(loadTTLStart)
1788-
log.Info("[leader-ready] load persist options from etcd completed", zap.Duration("cost", loadTTLDuration))
1788+
log.Info("[raft-cluster-start] load persist options from etcd completed", zap.Duration("cost", loadTTLDuration))
17891789

17901790
encryptionStart := time.Now()
17911791
if err := s.encryptionKeyManager.SetLeadership(s.member.GetLeadership()); err != nil {
1792-
log.Warn("[leader-ready] failed to initialize encryption", errs.ZapError(err), zap.Duration("cost", time.Since(encryptionStart)))
1792+
log.Warn("[raft-cluster-start] failed to initialize encryption", errs.ZapError(err), zap.Duration("cost", time.Since(encryptionStart)))
17931793
return
17941794
}
17951795
encryptionDuration := time.Since(encryptionStart)
1796-
log.Info("[leader-ready] initialize encryption completed", zap.Duration("cost", encryptionDuration))
1796+
log.Info("[raft-cluster-start] initialize encryption completed", zap.Duration("cost", encryptionDuration))
17971797

17981798
callbacksStart := time.Now()
1799-
log.Info("[leader-ready] triggering the leader callback functions")
1799+
log.Info("[raft-cluster-start] triggering the leader callback functions")
18001800
for _, cb := range s.leaderCallbacks {
18011801
if err := cb(ctx); err != nil {
1802-
log.Warn("[leader-ready] failed to execute leader callback function", errs.ZapError(err), zap.Duration("cost", time.Since(callbacksStart)))
1802+
log.Warn("[raft-cluster-start] failed to execute leader callback function", errs.ZapError(err), zap.Duration("cost", time.Since(callbacksStart)))
18031803
return
18041804
}
18051805
}
18061806
callbacksDuration := time.Since(callbacksStart)
1807-
log.Info("[leader-ready] trigger leader callback functions completed", zap.Duration("cost", callbacksDuration))
1807+
log.Info("[raft-cluster-start] trigger leader callback functions completed", zap.Duration("cost", callbacksDuration))
18081808

18091809
// Try to create raft cluster.
18101810
createRaftClusterStart := time.Now()
18111811
if err := s.createRaftCluster(); err != nil {
1812-
log.Warn("[leader-ready] failed to create raft cluster", errs.ZapError(err), zap.Duration("cost", time.Since(createRaftClusterStart)))
1812+
log.Warn("[raft-cluster-start] failed to create raft cluster", errs.ZapError(err), zap.Duration("cost", time.Since(createRaftClusterStart)))
18131813
return
18141814
}
18151815
createRaftClusterDuration := time.Since(createRaftClusterStart)
1816-
log.Info("[leader-ready] create raft cluster completed", zap.Duration("cost", createRaftClusterDuration))
1816+
log.Info("[raft-cluster-start] create raft cluster completed", zap.Duration("cost", createRaftClusterDuration))
18171817
defer s.stopRaftCluster()
18181818
failpoint.Inject("rebaseErr", func() {
18191819
failpoint.Return()
18201820
})
18211821
rebaseStart := time.Now()
18221822
if err := s.idAllocator.Rebase(); err != nil {
1823-
log.Warn("[leader-ready] failed to sync id from etcd", errs.ZapError(err), zap.Duration("cost", time.Since(rebaseStart)))
1823+
log.Warn("[raft-cluster-start] failed to sync id from etcd", errs.ZapError(err), zap.Duration("cost", time.Since(rebaseStart)))
18241824
return
18251825
}
18261826
rebaseDuration := time.Since(rebaseStart)
1827-
log.Info("[leader-ready] sync id from etcd completed", zap.Duration("cost", rebaseDuration))
1827+
log.Info("[raft-cluster-start] sync id from etcd completed", zap.Duration("cost", rebaseDuration))
18281828
// PromoteSelf to accept the remaining service, such as GetStore, GetRegion.
1829-
log.Info("[leader-ready] start to promote leader")
1829+
log.Info("[raft-cluster-start] start to promote leader")
18301830
enableLeaderStart := time.Now()
18311831
s.member.PromoteSelf()
18321832
enableLeaderDuration := time.Since(enableLeaderStart)
18331833
member.ServiceMemberGauge.WithLabelValues(PD).Set(1)
1834-
log.Info("[leader-ready] promote leader completed", zap.Duration("cost", enableLeaderDuration))
1834+
log.Info("[raft-cluster-start] promote leader completed", zap.Duration("cost", enableLeaderDuration))
18351835
totalDuration := time.Since(leaderReadyStart)
1836-
log.Info("[leader-ready] PD leader is ready to serve", zap.String("leader-name", s.Name()), zap.Duration("total-cost", totalDuration))
1836+
log.Info("[raft-cluster-start] PD leader is ready to serve", zap.String("leader-name", s.Name()), zap.Duration("total-cost", totalDuration))
18371837
defer resetLeaderOnce.Do(func() {
18381838
// as soon as cancel the leadership keepalive, then other member have chance
18391839
// to be new leader.
@@ -1842,11 +1842,11 @@ func (s *Server) campaignLeader() {
18421842
member.ServiceMemberGauge.WithLabelValues(PD).Set(0)
18431843
})
18441844

1845-
log.Info("[leader-ready] start to check PD version with cluster version")
1845+
log.Info("[raft-cluster-start] start to check PD version with cluster version")
18461846
versionCheckStart := time.Now()
18471847
CheckPDVersionWithClusterVersion(s.persistOptions)
18481848
versionCheckDuration := time.Since(versionCheckStart)
1849-
log.Info("[leader-ready] check PD version with cluster version completed", zap.Duration("cost", versionCheckDuration))
1849+
log.Info("[raft-cluster-start] check PD version with cluster version completed", zap.Duration("cost", versionCheckDuration))
18501850

18511851
leaderTicker := time.NewTicker(mcs.LeaderTickInterval)
18521852
defer leaderTicker.Stop()

0 commit comments

Comments
 (0)