@@ -41,21 +41,21 @@ import (
4141// TSO Stream Builder Factory
4242
4343type tsoStreamBuilderFactory interface {
44- makeBuilder (cc * grpc.ClientConn ) tsoStreamBuilder
44+ makeBuilder (cc * grpc.ClientConn , keyspaceName string ) tsoStreamBuilder
4545}
4646
4747// PDStreamBuilderFactory is a factory for building TSO streams to the PD cluster.
4848type PDStreamBuilderFactory struct {}
4949
50- func (* PDStreamBuilderFactory ) makeBuilder (cc * grpc.ClientConn ) tsoStreamBuilder {
51- return & pdStreamBuilder {client : pdpb .NewPDClient (cc ), serverURL : cc .Target ()}
50+ func (* PDStreamBuilderFactory ) makeBuilder (cc * grpc.ClientConn , keyspaceName string ) tsoStreamBuilder {
51+ return & pdStreamBuilder {client : pdpb .NewPDClient (cc ), serverURL : cc .Target (), keyspaceName : keyspaceName }
5252}
5353
5454// MSStreamBuilderFactory is a factory for building TSO streams to the microservice cluster.
5555type MSStreamBuilderFactory struct {}
5656
57- func (* MSStreamBuilderFactory ) makeBuilder (cc * grpc.ClientConn ) tsoStreamBuilder {
58- return & msStreamBuilder {client : tsopb .NewTSOClient (cc ), serverURL : cc .Target ()}
57+ func (* MSStreamBuilderFactory ) makeBuilder (cc * grpc.ClientConn , keyspaceName string ) tsoStreamBuilder {
58+ return & msStreamBuilder {client : tsopb .NewTSOClient (cc ), serverURL : cc .Target (), keyspaceName : keyspaceName }
5959}
6060
6161// TSO Stream Builder
@@ -65,8 +65,9 @@ type tsoStreamBuilder interface {
6565}
6666
6767type pdStreamBuilder struct {
68- serverURL string
69- client pdpb.PDClient
68+ serverURL string
69+ keyspaceName string
70+ client pdpb.PDClient
7071}
7172
7273func (b * pdStreamBuilder ) build (ctx context.Context , cancel context.CancelFunc , timeout time.Duration ) (* tsoStream , error ) {
@@ -76,14 +77,15 @@ func (b *pdStreamBuilder) build(ctx context.Context, cancel context.CancelFunc,
7677 stream , err := b .client .Tso (ctx )
7778 done <- struct {}{}
7879 if err == nil {
79- return newTSOStream (ctx , b .serverURL , pdTSOStreamAdapter {stream }), nil
80+ return newTSOStream (ctx , b .serverURL , pdTSOStreamAdapter {stream }, b . keyspaceName ), nil
8081 }
8182 return nil , err
8283}
8384
8485type msStreamBuilder struct {
85- serverURL string
86- client tsopb.TSOClient
86+ serverURL string
87+ client tsopb.TSOClient
88+ keyspaceName string
8789}
8890
8991func (b * msStreamBuilder ) build (
@@ -95,7 +97,7 @@ func (b *msStreamBuilder) build(
9597 stream , err := b .client .Tso (ctx )
9698 done <- struct {}{}
9799 if err == nil {
98- return newTSOStream (ctx , b .serverURL , tsoTSOStreamAdapter {stream }), nil
100+ return newTSOStream (ctx , b .serverURL , tsoTSOStreamAdapter {stream }, b . keyspaceName ), nil
99101 }
100102 return nil , err
101103}
@@ -220,6 +222,8 @@ type tsoStream struct {
220222
221223 ongoingRequestCountGauge prometheus.Gauge
222224 ongoingRequests atomic.Int32
225+
226+ keyspaceName string
223227}
224228
225229const (
@@ -235,7 +239,7 @@ const (
235239 maxPendingRequestsInTSOStream = 64
236240)
237241
238- func newTSOStream (ctx context.Context , serverURL string , stream grpcTSOStreamAdapter ) * tsoStream {
242+ func newTSOStream (ctx context.Context , serverURL string , stream grpcTSOStreamAdapter , keyspaceName string ) * tsoStream {
239243 streamID := fmt .Sprintf ("%s-%d" , serverURL , streamIDAlloc .Add (1 ))
240244 // To make error handling in `tsoDispatcher` work, the internal `cancel` and external `cancel` is better to be
241245 // distinguished.
@@ -250,6 +254,7 @@ func newTSOStream(ctx context.Context, serverURL string, stream grpcTSOStreamAda
250254 cancel : cancel ,
251255
252256 ongoingRequestCountGauge : metrics .OngoingRequestCountGauge .WithLabelValues (streamID ),
257+ keyspaceName : keyspaceName ,
253258 }
254259 s .wg .Add (1 )
255260 go s .recvLoop (ctx )
@@ -390,6 +395,13 @@ func (s *tsoStream) recvLoop(ctx context.Context) {
390395 // Update the metrics in seconds.
391396 metrics .EstimateTSOLatencyGauge .WithLabelValues (s .streamID ).Set (micros * 1e-6 )
392397 }
398+ successObserver := sync .OnceValue (func () prometheus.Observer {
399+ return metrics .RequestDuration .WithLabelValues ("tso" , s .keyspaceName )
400+ })
401+
402+ failedObserver := sync .OnceValue (func () prometheus.Observer {
403+ return metrics .RequestDuration .WithLabelValues ("tso-failed" , s .keyspaceName )
404+ })
393405
394406recvLoop:
395407 for {
@@ -401,7 +413,6 @@ recvLoop:
401413 }
402414
403415 res , err := s .stream .Recv ()
404-
405416 // Try to load the corresponding `batchedRequests`. If `Recv` is successful, there must be a request pending
406417 // in the queue.
407418 select {
@@ -419,7 +430,7 @@ recvLoop:
419430 // Note that it's also possible that the stream is broken due to network without being requested. In this
420431 // case, `Recv` may return an error while no request is pending.
421432 if hasReq {
422- metrics . RequestFailedDurationTSO .Observe (latencySeconds )
433+ successObserver () .Observe (latencySeconds )
423434 }
424435 if err == io .EOF {
425436 finishWithErr = errors .WithStack (errs .ErrClientTSOStreamClosed )
@@ -431,8 +442,7 @@ recvLoop:
431442 finishWithErr = errors .New ("tsoStream timing order broken" )
432443 break recvLoop
433444 }
434-
435- metrics .RequestDurationTSO .Observe (latencySeconds )
445+ failedObserver ().Observe (latencySeconds )
436446 metrics .TSOBatchSize .Observe (float64 (res .count ))
437447 updateEstimatedLatency (currentReq .startTime , latency )
438448
0 commit comments