-
Notifications
You must be signed in to change notification settings - Fork 32
Expand file tree
/
Copy pathMainPipe.scala
More file actions
1127 lines (1003 loc) · 50.7 KB
/
MainPipe.scala
File metadata and controls
1127 lines (1003 loc) · 50.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/** *************************************************************************************
* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
* Copyright (c) 2020-2021 Peng Cheng Laboratory
*
* XiangShan is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
*
* See the Mulan PSL v2 for more details.
* *************************************************************************************
*/
package coupledL2.tl2chi
import chisel3._
import chisel3.util._
import utility._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.tilelink.TLMessages._
import freechips.rocketchip.tilelink.TLPermissions._
import org.chipsalliance.cde.config.Parameters
import coupledL2._
import coupledL2.prefetch.{PrefetchTrain, PfSource}
import coupledL2.tl2chi.CHICohStates._
import coupledL2.MetaData._
class MainPipe(implicit p: Parameters) extends TL2CHIL2Module with HasCHIOpcodes with HasPerfEvents {
val io = IO(new Bundle() {
/* receive task from arbiter at stage 2 */
val taskFromArb_s2 = Flipped(ValidIO(new TaskBundle()))
/* status from arbiter at stage1 */
val taskInfo_s1 = Flipped(ValidIO(new TaskBundle()))
/* handle set conflict in req arb */
val fromReqArb = Input(new Bundle() {
val status_s1 = new PipeEntranceStatus
})
/* block B and C at Entrance */
val toReqArb = Output(new BlockInfo())
/* block A at Entrance */
val toReqBuf = Output(Vec(2, Bool()))
/* handle capacity conflict of GrantBuffer */
val status_vec_toD = Vec(3, ValidIO(new PipeStatus))
/* handle capacity conflict of TX channels */
val status_vec_toTX = Vec(3, ValidIO(new PipeStatusWithCHI))
/* get dir result at stage 3 */
val dirResp_s3 = Input(new DirResult())
val replResp = Flipped(ValidIO(new ReplacerResult()))
/* send task to MSHRCtl at stage 3 */
val toMSHRCtl = new Bundle() {
val mshr_alloc_s3 = ValidIO(new MSHRRequest())
}
val fromMSHRCtl = new Bundle() {
val mshr_alloc_ptr = Input(UInt(mshrBits.W))
}
/* read C-channel Release Data and write into DS */
val bufResp = Input(new PipeBufferResp)
/* get ReleaseBuffer and RefillBuffer read result */
val refillBufResp_s3 = Flipped(ValidIO(new DSBlock))
val releaseBufResp_s3 = Flipped(ValidIO(new DSBlock))
/* read or write data storage */
val toDS = new Bundle() {
val en_s3 = Output(Bool())
val req_s3 = ValidIO(new DSRequest)
val rdata_s5 = Input(new DSBlock)
val wdata_s3 = Output(new DSBlock)
val error_s5 = Input(Bool())
}
/* send Grant via SourceD channel */
val toSourceD = DecoupledIO(new TaskWithData())
/* send req/Comp/CompData via TXREQ/TXRSP/TXDAT channel */
val toTXREQ = DecoupledIO(new CHIREQ())
val toTXRSP = DecoupledIO(new TaskBundle())
val toTXDAT = DecoupledIO(new TaskWithData())
/* write dir, including reset dir */
val metaWReq = ValidIO(new MetaWrite)
val tagWReq = ValidIO(new TagWrite)
/* read DS and write data into ReleaseBuf when the task needs to replace */
val releaseBufWrite = ValidIO(new MSHRBufWrite())
/* nested writeback */
val nestedwb = Output(new NestedWriteback())
val nestedwbData = Output(new DSBlock())
/* l2 refill hint */
val l1Hint = DecoupledIO(new L2ToL1Hint())
// val grantBufferHint = Flipped(ValidIO(new L2ToL1Hint()))
// val globalCounter = Input(UInt((log2Ceil(mshrsAll) + 1).W))
/* send prefetchTrain to Prefetch to trigger a prefetch req */
val prefetchTrain = prefetchOpt.map(_ => DecoupledIO(new PrefetchTrain))
/* top-down monitor */
// TODO
/* ECC error*/
val error = ValidIO(new L2CacheErrorInfo)
/* l2 flush (CMO All) */
val cmoAllBlock = Option.when(cacheParams.enableL2Flush) (Input(Bool()))
val cmoLineDone = Option.when(cacheParams.enableL2Flush) (Output(Bool()))
})
require(chiOpt.isDefined)
val resetFinish = RegInit(false.B)
val resetIdx = RegInit((cacheParams.sets - 1).U)
/* block reqs when reset */
when (!resetFinish) {
resetIdx := resetIdx - 1.U
}
when (resetIdx === 0.U) {
resetFinish := true.B
}
val txreq_s3, txreq_s4, txreq_s5 = WireInit(0.U.asTypeOf(io.toTXREQ.cloneType))
val txrsp_s3, txrsp_s4, txrsp_s5 = Wire(io.toTXRSP.cloneType)
val txdat_s3, txdat_s4, txdat_s5 = Wire(io.toTXDAT.cloneType)
val d_s3, d_s4, d_s5 = Wire(io.toSourceD.cloneType)
/* ======== Stage 2 ======== */
val task_s2 = io.taskFromArb_s2
/* ======== Stage 3 ======== */
val task_s3 = RegInit(0.U.asTypeOf(Valid(new TaskBundle)))
task_s3.valid := task_s2.valid
when (task_s2.valid) {
task_s3.bits := task_s2.bits
}
/* ======== Enchantment ======== */
val dirResult_s3 = io.dirResp_s3
val meta_s3 = dirResult_s3.meta
val req_s3 = task_s3.bits
val cmoHitInvalid = io.cmoAllBlock.getOrElse(false.B) && (meta_s3.state === INVALID)
val mshr_req_s3 = req_s3.mshrTask
val sink_req_s3 = !mshr_req_s3
val sinkA_req_s3 = !mshr_req_s3 && req_s3.fromA
val sinkB_req_s3 = !mshr_req_s3 && req_s3.fromB
val sinkC_req_s3 = !mshr_req_s3 && req_s3.fromC
val req_acquire_s3 = sinkA_req_s3 && (req_s3.opcode === AcquireBlock || req_s3.opcode === AcquirePerm)
val req_acquireBlock_s3 = sinkA_req_s3 && req_s3.opcode === AcquireBlock
val req_prefetch_s3 = sinkA_req_s3 && req_s3.opcode === Hint
val req_get_s3 = sinkA_req_s3 && req_s3.opcode === Get
val req_cbo_clean_s3 = sinkA_req_s3 && req_s3.opcode === CBOClean
val req_cbo_flush_s3 = sinkA_req_s3 && req_s3.opcode === CBOFlush && !cmoHitInvalid
val req_cbo_inval_s3 = sinkA_req_s3 && req_s3.opcode === CBOInval
val mshr_grant_s3 = mshr_req_s3 && req_s3.fromA && (req_s3.opcode === Grant || req_s3.opcode === GrantData)
val mshr_grantdata_s3 = mshr_req_s3 && req_s3.fromA && req_s3.opcode === GrantData
val mshr_accessackdata_s3 = mshr_req_s3 && req_s3.fromA && req_s3.opcode === AccessAckData
val mshr_hintack_s3 = mshr_req_s3 && req_s3.fromA && req_s3.opcode === HintAck
val mshr_cmoresp_s3 = mshr_req_s3 && req_s3.fromA && req_s3.opcode === CBOAck
val mshr_snpResp_s3 = mshr_req_s3 && req_s3.toTXRSP && req_s3.chiOpcode.get === SnpResp
val mshr_snpRespFwded_s3 = mshr_req_s3 && req_s3.toTXRSP && req_s3.chiOpcode.get === SnpRespFwded
val mshr_snpRespData_s3 = mshr_req_s3 && req_s3.toTXDAT && req_s3.chiOpcode.get === SnpRespData
val mshr_snpRespDataPtl_s3 = mshr_req_s3 && req_s3.toTXDAT && req_s3.chiOpcode.get === SnpRespDataPtl
val mshr_snpRespDataFwded_s3 = mshr_req_s3 && req_s3.toTXDAT && req_s3.chiOpcode.get === SnpRespDataFwded
val mshr_snpRespX_s3 = mshr_snpResp_s3 || mshr_snpRespFwded_s3
val mshr_snpRespDataX_s3 = mshr_snpRespData_s3 || mshr_snpRespDataPtl_s3 || mshr_snpRespDataFwded_s3
val mshr_dct_s3 = mshr_req_s3 && req_s3.toTXDAT && req_s3.chiOpcode.get === CompData
val mshr_writeCleanFull_s3 = mshr_req_s3 && req_s3.toTXREQ && req_s3.chiOpcode.get === WriteCleanFull
val mshr_writeBackFull_s3 = mshr_req_s3 && req_s3.toTXREQ && req_s3.chiOpcode.get === WriteBackFull
val mshr_writeEvictFull_s3 = mshr_req_s3 && req_s3.toTXREQ && req_s3.chiOpcode.get === WriteEvictFull
val mshr_writeEvictOrEvict_s3 = mshr_req_s3 && req_s3.toTXREQ &&
afterIssueEbOrElse(req_s3.chiOpcode.get === WriteEvictOrEvict, false.B)
val mshr_evict_s3 = mshr_req_s3 && req_s3.toTXREQ && req_s3.chiOpcode.get === Evict
val mshr_cbWrData_s3 = mshr_req_s3 && req_s3.toTXDAT && req_s3.chiOpcode.get === CopyBackWrData
val meta_has_clients_s3 = meta_s3.clients.orR
val req_needT_s3 = needT(req_s3.opcode, req_s3.param)
val cmo_cbo_retention_s3 = req_cbo_clean_s3 || req_cbo_flush_s3
val cmo_cbo_s3 = req_cbo_clean_s3 || req_cbo_flush_s3 || req_cbo_inval_s3
val cache_alias = req_acquire_s3 && dirResult_s3.hit && meta_s3.clients(0) &&
meta_s3.alias.getOrElse(0.U) =/= req_s3.alias.getOrElse(0.U)
// *NOTICE: 'nestable_*' must not be used in A Channel related logics.
val nestable_dirResult_s3 = Wire(chiselTypeOf(dirResult_s3))
val nestable_meta_s3 = nestable_dirResult_s3.meta
val nestable_meta_has_clients_s3 = nestable_dirResult_s3.meta.clients.orR
nestable_dirResult_s3 := dirResult_s3
when (req_s3.snpHitRelease) {
// Meta states from MSHRs were considered as directory result here.
// Therefore, meta states were always inferred to be hit when nesting release, no matter the fact that directory
// was always non-hit on cache replacement subsequent release.
nestable_dirResult_s3.hit := req_s3.snpHitReleaseMeta.state =/= INVALID
nestable_dirResult_s3.meta := req_s3.snpHitReleaseMeta
nestable_dirResult_s3.set := req_s3.set
nestable_dirResult_s3.tag := req_s3.tag
}
val tagError_s3 = io.dirResp_s3.error || meta_s3.tagErr
val dataError_s3 = meta_s3.dataErr
val l2TagError_s3 = io.dirResp_s3.error
val l2Error_s3 = io.dirResp_s3.error || mshr_req_s3 && req_s3.dataCheckErr.getOrElse(false.B)
val mshr_refill_s3 = mshr_accessackdata_s3 || mshr_hintack_s3 || mshr_grant_s3 // needs refill to L2 DS
val replResp_valid_s3 = io.replResp.valid
val replResp_valid_s4 = RegNext(io.replResp.valid, init = false.B)
val replResp_valid_hold = replResp_valid_s3 || replResp_valid_s4
val retry = replResp_valid_hold && io.replResp.bits.retry
val need_repl = replResp_valid_hold && io.replResp.bits.meta.state =/= INVALID && req_s3.replTask
/* ======== Interact with MSHR ======== */
// *NOTICE: A Channel requests should be blocked by RequestBuffer when MSHR nestable,
// 'nestable_*' must not be used here.
val acquire_on_miss_s3 = req_acquire_s3 || req_prefetch_s3 || req_get_s3
val acquire_on_hit_s3 = meta_s3.state === BRANCH && req_needT_s3 && !req_prefetch_s3
val need_acquire_s3_a = req_s3.fromA && (Mux(
dirResult_s3.hit,
acquire_on_hit_s3,
acquire_on_miss_s3
) || cmo_cbo_s3)
val need_probe_s3_a = dirResult_s3.hit && meta_has_clients_s3 && (
req_get_s3 && (meta_s3.state === TRUNK) ||
req_cbo_clean_s3 && (meta_s3.state === TRUNK) ||
req_cbo_flush_s3 ||
req_cbo_inval_s3
)
val need_release_s3_a = dirResult_s3.hit && (
req_cbo_clean_s3 && (!need_probe_s3_a && meta_s3.dirty) ||
req_cbo_flush_s3 && (isValid(meta_s3.state)) ||
req_cbo_inval_s3 && (isValid(meta_s3.state))
)
val need_cmoresp_s3_a = cmo_cbo_s3
val need_compack_s3_a = !cmo_cbo_s3
val need_mshr_s3_a = need_acquire_s3_a || need_probe_s3_a || cache_alias
/**
* 1. For SnpOnce/SnpOnceFwd, SnpQuery, and SnpStash, only the latest copy of the cacheline is needed without changing
* the state of the cacheline at the snoopee. Therefore L2 should only send pProbe toT (to get the latest copy)
* when the state in L2 is TRUNK
* 2. For SnpClean/SnpCleanFwd, SnpShared/SnpSharedFwd, SnpNotSharedDirty/SnpNotSharedDirtyFwd, and SnpCleanShared,
* the snooped cacheline should be degraded into BRANCH state because there is no SharedDirty state or Owner
* state (of MOESI) in CoupledL2. Therefore L2 should only send pProbe toB to degrade upper clients when the
* state in L2 is TRUNK
* 3. For SnpUnique/SnpUniqueFwd/SnpUniqueStash, SnpCleanInvalid, SnpMakeInvalid/SnpMakeInvalidStash, the snooped
* cacheline should be degraded into INVALID state. Therefore L2 should only send pProbe toN to degrade upper
* clients when the state in L2 is TRUNK or BRANCH with clients.orR = 1
* 4. When tagErr(NDERR), never forward data, and the snoopee should invalidate cache state
*
*/
// whether L2 should do forwarding or not
val expectFwd = isSnpXFwd(req_s3.chiOpcode.get)
val canFwd = nestable_dirResult_s3.hit && !(nestable_dirResult_s3.meta.tagErr || nestable_dirResult_s3.error)
val doFwd = expectFwd && canFwd
val need_pprobe_s3_b_snpStable = req_s3.fromB && (
isSnpOnceX(req_s3.chiOpcode.get) || isSnpQuery(req_s3.chiOpcode.get) || isSnpStashX(req_s3.chiOpcode.get)
) && dirResult_s3.hit && meta_s3.state === TRUNK && meta_has_clients_s3
val need_pprobe_s3_b_snpToB = req_s3.fromB && (
isSnpToB(req_s3.chiOpcode.get) ||
req_s3.chiOpcode.get === SnpCleanShared
) && dirResult_s3.hit && meta_s3.state === TRUNK && meta_has_clients_s3
val need_pprobe_s3_b_snpToN = req_s3.fromB && (
isSnpUniqueX(req_s3.chiOpcode.get) ||
req_s3.chiOpcode.get === SnpCleanInvalid ||
isSnpMakeInvalidX(req_s3.chiOpcode.get)
) && dirResult_s3.hit && meta_has_clients_s3
val need_pprobe_s3_b_snpNDERR = req_s3.fromB && tagError_s3 && dirResult_s3.hit
val need_pprobe_s3_b = need_pprobe_s3_b_snpStable || need_pprobe_s3_b_snpToB || need_pprobe_s3_b_snpToN || need_pprobe_s3_b_snpNDERR
val need_dct_s3_b = doFwd // DCT
val need_mshr_s3_b = need_pprobe_s3_b || need_dct_s3_b
val need_mshr_s3 = need_mshr_s3_a || need_mshr_s3_b
/* Signals to MSHR Ctl */
val alloc_state = WireInit(0.U.asTypeOf(new FSMState()))
alloc_state.elements.foreach(_._2 := true.B)
io.toMSHRCtl.mshr_alloc_s3.valid := task_s3.valid && !mshr_req_s3 && need_mshr_s3
io.toMSHRCtl.mshr_alloc_s3.bits.dirResult := nestable_dirResult_s3
io.toMSHRCtl.mshr_alloc_s3.bits.state := alloc_state
io.toMSHRCtl.mshr_alloc_s3.bits.task match { case task =>
task := req_s3
task.bufIdx := 0.U(bufIdxBits.W)
task.mshrTask := false.B
task.aliasTask.foreach(_ := cache_alias)
task.wayMask := 0.U(cacheParams.ways.W)
// TODO
}
/* ======== Resps to SinkA/B/C Reqs ======== */
val sink_resp_s3 = WireInit(0.U.asTypeOf(Valid(new TaskBundle)))
val sink_resp_s3_a_promoteT = dirResult_s3.hit && isT(meta_s3.state)
// whether L2 should respond data to HN or not
val retToSrc = req_s3.retToSrc.getOrElse(false.B)
val neverRespData = isSnpMakeInvalidX(req_s3.chiOpcode.get) ||
isSnpStashX(req_s3.chiOpcode.get) ||
isSnpQuery(req_s3.chiOpcode.get) ||
req_s3.chiOpcode.get === SnpOnceFwd ||
req_s3.chiOpcode.get === SnpUniqueFwd
val shouldRespData_dirty = nestable_dirResult_s3.hit &&
(nestable_meta_s3.state === TIP || nestable_meta_s3.state === TRUNK) && nestable_meta_s3.dirty
// For SnpOnce, always response data under UC when L1 was BRANCH
val shouldRespData_once = nestable_dirResult_s3.hit &&
nestable_meta_s3.state === TIP && !nestable_meta_s3.dirty &&
req_s3.chiOpcode.get === SnpOnce
// For forwarding snoops, if the RetToSrc value is 1, must return a copy is the cache line is Dirty or Clean.
val shouldRespData_retToSrc_fwd = nestable_dirResult_s3.hit && retToSrc && isSnpXFwd(req_s3.chiOpcode.get)
// For non-forwarding snoops, ig the RetToSrc value is 1, must return a copy if the cache line is Shared Clean and
// snoopee retains a copy of the cache line.
val shouldRespData_retToSrc_nonFwd = nestable_dirResult_s3.hit && retToSrc && nestable_meta_s3.state === BRANCH && (
req_s3.chiOpcode.get === SnpOnce ||
req_s3.chiOpcode.get === SnpUnique ||
isSnpToBNonFwd(req_s3.chiOpcode.get)
)
val shouldRespData = shouldRespData_dirty || shouldRespData_once || shouldRespData_retToSrc_fwd || shouldRespData_retToSrc_nonFwd
val doRespData = shouldRespData && !neverRespData
dontTouch(doRespData)
dontTouch(shouldRespData)
dontTouch(neverRespData)
// On directory hit under non-invalidating snoop nesting WriteCleanFull,
// excluding SnpStashX and SnpQuery:
// 1. SnpCleanShared[1-sink_resp] : UD -> UC_PD, UC -> UC, SC -> SC
// 2. SnpOnce*[2-sink_resp] : UD -> SC_PD, UC -> SC, SC -> SC
// 3. snpToB : UD -> SC_PD, UC -> SC, SC -> SC
//
// *NOTE[1-sink_resp]:
// UD -> SC transitions were not used on WriteCleanFull without nesting snoop, and
// only UD -> UC update could be observed on directory in this case
// Therefore, it was unnecessary to observe cache state from nested WriteCleanFull MSHRs, while
// extracting PassDirty from MSHRs
//
// *NOTE[2-sink_resp]:
// UD -> UC transitions were not allowed on SnpOnce*, while permitting UD -> UD and UC -> UC
// On SnpOnce*, UD/UC were turned into SC on nested WriteClean, on which directory must hit
// Otherwise, the cache state was fast forwarded to I by default
// Directory might be missing after multiple nesting snoops on WriteClean, indicating losing UD
//
// *NOTE[tagErr/NDERR]:
// ALL -> I, snoopee invalidates local copy
//
// Resp[2: 0] = {PassDirty, CacheState[1: 0]}
val respCacheState = WireInit(I)
val respPassDirty = doRespData && nestable_dirResult_s3.hit && isT(nestable_meta_s3.state) && nestable_meta_s3.dirty &&
(req_s3.chiOpcode.get =/= SnpOnce || req_s3.snpHitRelease) &&
!(isSnpStashX(req_s3.chiOpcode.get) || isSnpQuery(req_s3.chiOpcode.get))
when (nestable_dirResult_s3.hit && !tagError_s3) {
when (isSnpToB(req_s3.chiOpcode.get)) {
respCacheState := Mux(req_s3.snpHitReleaseToInval, I, SC)
}
when (isSnpOnceX(req_s3.chiOpcode.get) || isSnpStashX(req_s3.chiOpcode.get) || isSnpQuery(req_s3.chiOpcode.get)) {
/**
* NOTICE: On Stash and Query:
* the cache state must maintain unchanged on nested copy-back writes
*/
respCacheState := Mux(
nestable_meta_s3.state === BRANCH,
SC,
Mux(nestable_meta_s3.dirty, UD, UC)
)
}
when (isSnpOnceX(req_s3.chiOpcode.get)) {
// On SnpOnce/SnpOnceFwd nesting WriteCleanFull, turn UD to SC
when (req_s3.snpHitReleaseToClean && nestable_meta_s3.dirty) {
respCacheState := SC
}
// On SnpOnce/SnpOnceFwd nesting WriteBack*/WriteEvict*, turn UD to I
when (req_s3.snpHitReleaseToInval) {
respCacheState := I
}
}
when (req_s3.chiOpcode.get === SnpCleanShared) {
respCacheState := Mux(isT(nestable_meta_s3.state), UC, SC)
}
}
// FwdState[2: 0] = {PassDirty, CacheState[1: 0]}
val fwdCacheState = WireInit(I)
val fwdPassDirty = WireInit(false.B)
when (nestable_dirResult_s3.hit) {
when (isSnpToBFwd(req_s3.chiOpcode.get)) {
fwdCacheState := Mux(req_s3.snpHitReleaseToInval, I, SC)
}
when (req_s3.chiOpcode.get === SnpUniqueFwd) {
when (nestable_meta_s3.state === TIP && nestable_meta_s3.dirty) {
fwdCacheState := UD
fwdPassDirty := true.B
}.otherwise {
fwdCacheState := UC
}
}
}
val sink_resp_s3_b_meta = MetaEntry()
val sink_resp_s3_b_metaWen = Wire(Bool())
sink_resp_s3.valid := task_s3.valid && !mshr_req_s3 && !need_mshr_s3
sink_resp_s3.bits := task_s3.bits
sink_resp_s3.bits.mshrId := (1 << (mshrBits-1)).U + sink_resp_s3.bits.sourceId
when (req_s3.fromA) {
sink_resp_s3.bits.opcode := odOpGen(req_s3.opcode)
sink_resp_s3.bits.param := Mux (
req_acquire_s3,
Mux(req_s3.param === NtoB && !sink_resp_s3_a_promoteT, toB, toT),
0.U // reserved
)
}.elsewhen (req_s3.fromB) {
sink_resp_s3.bits.opcode := 0.U
sink_resp_s3.bits.param := 0.U
sink_resp_s3.bits.tgtID.foreach(_ := task_s3.bits.srcID.get)
sink_resp_s3.bits.srcID.foreach(_ := task_s3.bits.tgtID.get) // TODO: srcID should be fixed. FIX THIS!!!
sink_resp_s3.bits.txnID.foreach(_ := task_s3.bits.txnID.get)
sink_resp_s3.bits.dbID.foreach(_ := 0.U)
sink_resp_s3.bits.pCrdType.foreach(_ := 0.U) // TODO
sink_resp_s3.bits.chiOpcode.foreach(_ := MuxLookup(
Cat(doFwd, doRespData),
SnpResp
)(Seq(
Cat(false.B, false.B) -> SnpResp,
Cat(true.B, false.B) -> SnpRespFwded,
Cat(false.B, true.B) -> SnpRespData, // ignore SnpRespDataPtl for now
Cat(true.B, true.B) -> SnpRespDataFwded
)))
sink_resp_s3.bits.resp.foreach(_ := setPD(respCacheState, respPassDirty && doRespData))
sink_resp_s3.bits.fwdState.foreach(_ := setPD(fwdCacheState, fwdPassDirty))
sink_resp_s3.bits.txChannel := Cat(doRespData, !doRespData, false.B) // TODO: parameterize this
sink_resp_s3.bits.size := log2Ceil(blockBytes).U
sink_resp_s3.bits.meta := sink_resp_s3_b_meta
sink_resp_s3.bits.metaWen := sink_resp_s3_b_metaWen
}.otherwise { // req_s3.fromC
sink_resp_s3.bits.opcode := ReleaseAck
sink_resp_s3.bits.param := 0.U // param of ReleaseAck must be 0
}
val source_req_s3 = Wire(new TaskBundle)
source_req_s3 := Mux(sink_resp_s3.valid, sink_resp_s3.bits, req_s3)
source_req_s3.isKeyword.foreach(_ := req_s3.isKeyword.getOrElse(false.B))
/* ======== Interact with DS ======== */
val data_s3 = Mux(io.releaseBufResp_s3.valid, io.releaseBufResp_s3.bits.data, io.refillBufResp_s3.bits.data)
val c_releaseData_s3 = io.bufResp.data.asUInt
val hasData_s3_tl = source_req_s3.opcode(0) // whether to respond data to TileLink-side
val hasData_s3_chi = source_req_s3.toTXDAT // whether to respond data to CHI-side
val hasData_s3 = hasData_s3_tl || hasData_s3_chi
val need_data_a = dirResult_s3.hit && (req_get_s3 || req_acquireBlock_s3)
val need_data_b = sinkB_req_s3 && (doRespData || doFwd || nestable_dirResult_s3.hit && nestable_meta_s3.state === TRUNK)
val need_data_mshr_repl = mshr_refill_s3 && need_repl && !retry
val need_data_cmo = cmo_cbo_s3 && nestable_dirResult_s3.hit && nestable_meta_s3.dirty
val ren = need_data_a || need_data_b || need_data_mshr_repl || need_data_cmo
val wen_c = sinkC_req_s3 && isParamFromT(req_s3.param) && req_s3.opcode(0) && dirResult_s3.hit
val wen_mshr = req_s3.dsWen && (
mshr_snpRespX_s3 || mshr_snpRespDataX_s3 ||
mshr_writeCleanFull_s3 || mshr_writeBackFull_s3 ||
mshr_writeEvictFull_s3 || mshr_writeEvictOrEvict_s3 || mshr_evict_s3 ||
mshr_refill_s3 && !need_repl && !retry
)
val wen = wen_c || wen_mshr
// This is to let io.toDS.req_s3.valid hold for 2 cycles (see DataStorage for details)
val task_s3_valid_hold2 = RegInit(0.U(2.W))
when(task_s2.valid) {
task_s3_valid_hold2 := "b11".U
}.otherwise {
task_s3_valid_hold2 := task_s3_valid_hold2 >> 1.U
}
io.toDS.en_s3 := task_s3.valid && (ren || wen)
io.toDS.req_s3.valid := task_s3_valid_hold2(0) && (ren || wen)
io.toDS.req_s3.bits.way := Mux(
mshr_refill_s3 && req_s3.replTask,
io.replResp.bits.way,
Mux(mshr_req_s3, req_s3.way, dirResult_s3.way)
)
io.toDS.req_s3.bits.set := Mux(mshr_req_s3, req_s3.set, dirResult_s3.set)
io.toDS.req_s3.bits.wen := wen
io.toDS.wdata_s3.data := Mux(
!mshr_req_s3,
c_releaseData_s3,
Mux(
req_s3.useProbeData,
io.releaseBufResp_s3.bits.data,
io.refillBufResp_s3.bits.data
)
)
/* ======== Read DS and store data in Buffer ======== */
// A: need_write_releaseBuf indicates that DS should be read and the data will be written into ReleaseBuffer
// need_write_releaseBuf is assigned true when:
// inner clients' data is needed, but whether the client will ack data is uncertain, so DS data is also needed
val need_write_releaseBuf = need_probe_s3_a ||
cache_alias ||
need_data_b && need_mshr_s3_b ||
need_data_mshr_repl ||
need_data_cmo
// B: need_write_refillBuf indicates that DS should be read and the data will be written into RefillBuffer
// when L1 AcquireBlock but L2 AcquirePerm to L3, we need to prepare data for L1
// but this will no longer happen, cuz we always AcquireBlock for L1 AcquireBlock
val need_write_refillBuf = false.B
/* ======== Write Directory ======== */
// B, C: Requests from Channel B (RXSNP) and Channel C would only downgrade permission,
// so there is no need to use 'nestable_*'.
val metaW_valid_s3_a = sinkA_req_s3 && !need_mshr_s3_a && !req_get_s3 && !req_prefetch_s3 && !cmo_cbo_s3 // get & prefetch that hit will not write meta
// Also write directory on:
// 1. SnpOnce nesting WriteCleanFull under UD (SnpOnceFwd always needs MSHR) for UD -> SC
val metaW_valid_s3_b = sinkB_req_s3 && !need_mshr_s3_b && dirResult_s3.hit &&
(!isSnpOnce(req_s3.chiOpcode.get) || (req_s3.snpHitReleaseToClean && req_s3.snpHitReleaseMeta.dirty)) &&
!isSnpStashX(req_s3.chiOpcode.get) && !isSnpQuery(req_s3.chiOpcode.get) && (
meta_s3.state === TIP || meta_s3.state === BRANCH && isSnpToN(req_s3.chiOpcode.get)
)
val metaW_valid_s3_c = sinkC_req_s3 && dirResult_s3.hit
val metaW_valid_s3_mshr = mshr_req_s3 && req_s3.metaWen && !(mshr_refill_s3 && retry)
val metaW_valid_s3_cmo = req_cbo_inval_s3 && dirResult_s3.hit
require(clientBits == 1)
val metaW_s3_a_alias = Mux(
req_get_s3 || req_prefetch_s3,
meta_s3.alias.getOrElse(0.U),
req_s3.alias.getOrElse(0.U)
)
val metaW_s3_a = MetaEntry(
dirty = meta_s3.dirty,
state = Mux(req_needT_s3 || sink_resp_s3_a_promoteT, TRUNK, meta_s3.state),
clients = Fill(clientBits, Mux(l2TagError_s3, false.B, true.B)),
alias = Some(metaW_s3_a_alias),
accessed = true.B,
tagErr = meta_s3.tagErr,
dataErr = meta_s3.dataErr
)
val metaW_s3_b = Mux(isSnpToN(req_s3.chiOpcode.get), MetaEntry(),
MetaEntry(
dirty = false.B,
state = Mux(req_s3.chiOpcode.get === SnpCleanShared, meta_s3.state, BRANCH),
clients = meta_s3.clients,
alias = meta_s3.alias,
accessed = meta_s3.accessed,
tagErr = meta_s3.tagErr,
dataErr = meta_s3.dataErr
)
)
val metaW_s3_c = MetaEntry(
dirty = meta_s3.dirty || wen_c,
state = Mux(isParamFromT(req_s3.param), TIP, meta_s3.state),
clients = Fill(clientBits, !isToN(req_s3.param)),
alias = meta_s3.alias,
accessed = meta_s3.accessed,
tagErr = Mux(wen_c, req_s3.denied, meta_s3.tagErr),
dataErr = Mux(wen_c, req_s3.corrupt, meta_s3.dataErr) // update error when write DS
)
// use merge_meta if mergeA
val metaW_s3_mshr = WireInit(Mux(req_s3.mergeA, req_s3.aMergeTask.meta, req_s3.meta))
metaW_s3_mshr.tagErr := req_s3.denied
metaW_s3_mshr.dataErr := req_s3.corrupt
val metaW_s3_cmo = MetaEntry() // invalid the block
val metaW_way = Mux(
mshr_refill_s3 && req_s3.replTask,
io.replResp.bits.way, // grant always use replResp way
Mux(mshr_req_s3, req_s3.way, dirResult_s3.way)
)
// dir write signals in s3
val metaWReq_s3 = Wire(Valid(new MetaWrite()))
val tagWReq_s3 = Wire(Valid(new TagWrite()))
metaWReq_s3.valid := !resetFinish || task_s3.valid && (
metaW_valid_s3_a || metaW_valid_s3_b || metaW_valid_s3_c || metaW_valid_s3_mshr || metaW_valid_s3_cmo
)
metaWReq_s3.bits.set := Mux(resetFinish, req_s3.set, resetIdx)
metaWReq_s3.bits.wayOH := Mux(resetFinish, UIntToOH(metaW_way), Fill(cacheParams.ways, true.B))
metaWReq_s3.bits.wmeta := Mux(
resetFinish,
ParallelPriorityMux(
Seq(metaW_valid_s3_a, metaW_valid_s3_b, metaW_valid_s3_c, metaW_valid_s3_mshr, metaW_valid_s3_cmo),
Seq(metaW_s3_a, metaW_s3_b, metaW_s3_c, metaW_s3_mshr, metaW_s3_cmo)
),
MetaEntry()
)
tagWReq_s3.valid := task_s3.valid && req_s3.tagWen && mshr_refill_s3 && !retry
tagWReq_s3.bits.set := req_s3.set
tagWReq_s3.bits.way := Mux(mshr_refill_s3 && req_s3.replTask, io.replResp.bits.way, req_s3.way)
tagWReq_s3.bits.wtag := req_s3.tag
sink_resp_s3_b_metaWen := metaW_valid_s3_b
sink_resp_s3_b_meta := metaW_s3_b
/* ======== Interact with Channels (SourceD/TXREQ/TXRSP/TXDAT) ======== */
val chnl_fire_s3 = d_s3.fire || txreq_s3.fire || txrsp_s3.fire || txdat_s3.fire
val req_drop_s3 = !need_write_releaseBuf && (
!mshr_req_s3 && need_mshr_s3 || chnl_fire_s3
) || mshr_refill_s3 && retry
val data_unready_s3 = hasData_s3 && !mshr_req_s3
val data_unready_s3_tl = hasData_s3_tl && !mshr_req_s3
/**
* The combinational logic path of
* Directory metaAll
* -> Directory response
* -> MainPipe judging whether to respond data
* is too long. Therefore the sinkB response may be latched to s4 for better timing.
*/
val d_s3_latch = true
val txdat_s3_latch = true
val isD_s3 = Mux(
mshr_req_s3,
mshr_cmoresp_s3 && !io.cmoAllBlock.getOrElse(false.B) || mshr_refill_s3 && !retry,
req_s3.fromC || req_s3.fromA && !need_mshr_s3_a && !data_unready_s3_tl && req_s3.opcode =/= Hint && !io.cmoAllBlock.getOrElse(false.B)
)
val isD_s3_ready = Mux(
mshr_req_s3,
mshr_cmoresp_s3 && !io.cmoAllBlock.getOrElse(false.B) || mshr_refill_s3 && !retry,
req_s3.fromC || req_s3.fromA && !need_mshr_s3_a && !data_unready_s3_tl && req_s3.opcode =/= Hint && !d_s3_latch.B
)
val isTXRSP_s3 = Mux(
mshr_req_s3,
mshr_snpRespX_s3,
req_s3.fromB && !need_mshr_s3 && !hasData_s3
)
val isTXDAT_s3 = Mux(
mshr_req_s3,
mshr_snpRespDataX_s3 || mshr_cbWrData_s3 || mshr_dct_s3,
req_s3.fromB && !need_mshr_s3 &&
(doRespData && (!data_unready_s3 || req_s3.snpHitRelease && req_s3.snpHitReleaseWithData))
)
val isTXDAT_s3_ready = Mux(
mshr_req_s3,
mshr_snpRespDataX_s3 || mshr_cbWrData_s3 || mshr_dct_s3,
req_s3.fromB && !need_mshr_s3 && !txdat_s3_latch.B &&
(doRespData && (!data_unready_s3 || req_s3.snpHitRelease && req_s3.snpHitReleaseWithData))
)
val isTXREQ_s3 = mshr_req_s3 && (mshr_writeBackFull_s3 || mshr_writeCleanFull_s3 ||
mshr_writeEvictFull_s3 || mshr_writeEvictOrEvict_s3 || mshr_evict_s3)
txreq_s3.valid := task_s3.valid && isTXREQ_s3
txrsp_s3.valid := task_s3.valid && isTXRSP_s3
txdat_s3.valid := task_s3.valid && isTXDAT_s3_ready
d_s3.valid := task_s3.valid && isD_s3_ready
txreq_s3.bits := source_req_s3.toCHIREQBundle()
txrsp_s3.bits := source_req_s3
txdat_s3.bits.task := source_req_s3
txdat_s3.bits.data.data := data_s3
d_s3.bits.task := source_req_s3
d_s3.bits.data.data := data_s3
when (task_s3.valid) {
OneHot.checkOneHot(Seq(isTXREQ_s3, isTXRSP_s3, isTXDAT_s3, isD_s3))
}
/* ======== nested writeback ======== */
io.nestedwb.set := req_s3.set
io.nestedwb.tag := req_s3.tag
// This serves as VALID signal
// c_set_dirty is true iff Release has Data
io.nestedwb.c_set_dirty := task_s3.valid && task_s3.bits.fromC && task_s3.bits.opcode === ReleaseData && task_s3.bits.param === TtoN
io.nestedwb.c_set_tip := task_s3.valid && task_s3.bits.fromC && task_s3.bits.opcode === Release && task_s3.bits.param === TtoN
/**
* Snoop nesting happens when:
* 1. snoop nests a copy-back request
* 2. snoop nests a Read/MakeUnique request
*
* *NOTICE: Never allow 'b_inv_dirty' on SnpStash*, SnpQuery and other future snoops that would
* leave cache line state untouched.
* Never allow 'b_inv_dirty' on SnpOnce* nesting WriteCleanFull, which would end with SC.
*/
io.nestedwb.b_inv_dirty := task_s3.valid && task_s3.bits.fromB && source_req_s3.snpHitReleaseToInval &&
!(isSnpStashX(req_s3.chiOpcode.get) || isSnpQuery(req_s3.chiOpcode.get))
io.nestedwb.b_toB.foreach(_ :=
task_s3.valid && task_s3.bits.fromB && source_req_s3.metaWen && source_req_s3.meta.state === BRANCH
)
io.nestedwb.b_toN.foreach(_ :=
task_s3.valid && task_s3.bits.fromB && source_req_s3.metaWen && source_req_s3.meta.state === INVALID
)
io.nestedwb.b_toClean.foreach(_ :=
task_s3.valid && task_s3.bits.fromB && source_req_s3.metaWen && !source_req_s3.meta.dirty
)
io.nestedwbData := c_releaseData_s3.asTypeOf(new DSBlock)
// TODO: add nested writeback from Snoop
/* ======== prefetch ======== */
io.prefetchTrain.foreach {
train =>
// train on request(with needHint flag) miss or hit on prefetched block
// trigger train also in a_merge here
train.valid := task_s3.valid && ((req_acquire_s3 || req_get_s3) && req_s3.needHint.getOrElse(false.B) &&
(!dirResult_s3.hit || meta_s3.prefetch.get) || req_s3.mergeA)
train.bits.tag := req_s3.tag
train.bits.set := req_s3.set
train.bits.needT := Mux(
req_s3.mergeA,
needT(req_s3.aMergeTask.opcode, req_s3.aMergeTask.param),
req_needT_s3
)
train.bits.source := Mux(req_s3.mergeA, req_s3.aMergeTask.sourceId, req_s3.sourceId)
train.bits.vaddr.foreach(_ := Mux(req_s3.mergeA, req_s3.aMergeTask.vaddr.getOrElse(0.U), req_s3.vaddr.getOrElse(0.U)))
train.bits.hit := Mux(req_s3.mergeA, true.B, dirResult_s3.hit)
train.bits.prefetched := Mux(req_s3.mergeA, true.B, meta_s3.prefetch.getOrElse(false.B))
train.bits.pfsource := meta_s3.prefetchSrc.getOrElse(PfSource.NoWhere.id.U) // TODO
train.bits.reqsource := req_s3.reqSource
}
/* ======== Stage 4 ======== */
val task_s4 = RegInit(0.U.asTypeOf(Valid(new TaskBundle())))
val taskWDir_s4 = RegInit(0.U.asTypeOf(Valid(new TaskBundle())))
val metaWReq_s4 = RegInit(0.U.asTypeOf(Valid(new MetaWrite())))
val tagWReq_s4 = RegInit(0.U.asTypeOf(Valid(new TagWrite())))
val data_unready_s4 = RegInit(false.B)
val data_s4 = Reg(UInt((blockBytes * 8).W))
val ren_s4 = RegInit(false.B)
val need_write_releaseBuf_s4 = RegInit(false.B)
val isD_s4, isTXREQ_s4, isTXRSP_s4, isTXDAT_s4 = RegInit(false.B)
val tagError_s4 = RegInit(false.B)
val dataError_s4 = RegInit(false.B)
val l2Error_s4 = RegInit(false.B)
val pendingTXDAT_s4 = task_s4.bits.fromB && !task_s4.bits.mshrTask && task_s4.bits.toTXDAT
val pendingD_s4 = task_s4.bits.fromA && !task_s4.bits.mshrTask && (
task_s4.bits.opcode === GrantData || task_s4.bits.opcode === AccessAckData
)
task_s4.valid := task_s3.valid && !req_drop_s3
when (task_s3.valid && !req_drop_s3) {
task_s4.bits := source_req_s3
when (!task_s3.bits.mshrTask && need_mshr_s3) {
task_s4.bits.mshrId := io.fromMSHRCtl.mshr_alloc_ptr
}
data_unready_s4 := data_unready_s3
data_s4 := data_s3
ren_s4 := ren
need_write_releaseBuf_s4 := need_write_releaseBuf
isD_s4 := isD_s3
isTXREQ_s4 := isTXREQ_s3
isTXRSP_s4 := isTXRSP_s3
isTXDAT_s4 := isTXDAT_s3
tagError_s4 := tagError_s3
dataError_s4 := dataError_s3
l2Error_s4 := l2Error_s3
}
taskWDir_s4.valid := task_s3.valid && (metaWReq_s3.valid || tagWReq_s3.valid)
when (task_s3.valid || !resetFinish) {
taskWDir_s4.bits := source_req_s3
metaWReq_s4 := metaWReq_s3
tagWReq_s4 := tagWReq_s3
}
io.metaWReq.valid := metaWReq_s4.valid && (taskWDir_s4.valid || RegNext(!resetFinish))
io.metaWReq.bits := metaWReq_s4.bits
io.tagWReq.valid := tagWReq_s4.valid && (taskWDir_s4.valid || RegNext(!resetFinish))
io.tagWReq.bits := tagWReq_s4.bits
// for reqs that CANNOT give response in MainPipe, but needs to write releaseBuf/refillBuf
// we cannot drop them at s3, we must let them go to s4/s5
val chnl_fire_s4 = d_s4.fire || txreq_s4.fire || txrsp_s4.fire || txdat_s4.fire
val req_drop_s4 = !need_write_releaseBuf_s4 && chnl_fire_s4
val chnl_valid_s4 = task_s4.valid && !RegNext(chnl_fire_s3, false.B)
d_s4.valid := chnl_valid_s4 && isD_s4
txreq_s4.valid := chnl_valid_s4 && isTXREQ_s4
txrsp_s4.valid := chnl_valid_s4 && isTXRSP_s4
txdat_s4.valid := chnl_valid_s4 && isTXDAT_s4
d_s4.bits.task := task_s4.bits
d_s4.bits.data.data := data_s4
txreq_s4.bits := task_s4.bits.toCHIREQBundle()
txrsp_s4.bits := task_s4.bits
txdat_s4.bits.task := task_s4.bits
txdat_s4.bits.data.data := data_s4
/* ======== Stage 5 ======== */
val task_s5 = RegInit(0.U.asTypeOf(Valid(new TaskBundle())))
val ren_s5 = RegInit(false.B)
val data_s5 = Reg(UInt((blockBytes * 8).W))
val need_write_releaseBuf_s5 = RegInit(false.B)
val isD_s5, isTXREQ_s5, isTXRSP_s5, isTXDAT_s5 = RegInit(false.B)
val tagError_s5 = RegInit(false.B)
val dataMetaError_s5 = RegInit(false.B)
val l2TagError_s5 = RegInit(false.B)
task_s5.valid := task_s4.valid && !req_drop_s4
when (task_s4.valid && !req_drop_s4) {
task_s5.bits := task_s4.bits
ren_s5 := ren_s4
data_s5 := data_s4
need_write_releaseBuf_s5 := need_write_releaseBuf_s4
isD_s5 := isD_s4 || pendingD_s4
isTXREQ_s5 := isTXREQ_s4
isTXRSP_s5 := isTXRSP_s4
isTXDAT_s5 := isTXDAT_s4 || pendingTXDAT_s4
tagError_s5 := tagError_s4
dataMetaError_s5 := dataError_s4
l2TagError_s5 := l2Error_s4
}
val rdata_s5 = io.toDS.rdata_s5.data
val dataError_s5 = io.toDS.error_s5 || dataMetaError_s5
val l2Error_s5 = l2TagError_s5 || io.toDS.error_s5
val out_data_s5 = Mux(task_s5.bits.mshrTask || task_s5.bits.snpHitReleaseWithData, data_s5, rdata_s5)
val chnl_fire_s5 = d_s5.fire || txreq_s5.fire || txrsp_s5.fire || txdat_s5.fire
// TODO: check this
val customL1Hint = Module(new CustomL1Hint)
customL1Hint.io.s1 := io.taskInfo_s1
// customL1Hint.io.s2 := task_s2
customL1Hint.io.s3.task := task_s3
// overwrite opcode: if sinkReq can respond, use sink_resp_s3.bits.opcode = Grant/GrantData
customL1Hint.io.s3.task.bits.opcode := Mux(sink_resp_s3.valid, sink_resp_s3.bits.opcode, task_s3.bits.opcode)
// customL1Hint.io.s3.d := d_s3.valid
customL1Hint.io.s3.need_mshr := need_mshr_s3
// customL1Hint.io.s4.task := task_s4
// customL1Hint.io.s4.d := d_s4.valid
// customL1Hint.io.s4.need_write_releaseBuf := need_write_releaseBuf_s4
// customL1Hint.io.s5.task := task_s5
// customL1Hint.io.s5.d := d_s5.valid
// customL1Hint.io.globalCounter := io.globalCounter
// customL1Hint.io.grantBufferHint <> io.grantBufferHint
customL1Hint.io.l1Hint <> io.l1Hint
io.releaseBufWrite.valid := task_s5.valid && need_write_releaseBuf_s5
io.releaseBufWrite.bits.id := task_s5.bits.mshrId
io.releaseBufWrite.bits.data.data := rdata_s5
io.releaseBufWrite.bits.beatMask := Fill(beatSize, true.B)
val chnl_valid_s5 = task_s5.valid && !RegNext(chnl_fire_s4, false.B) && !RegNextN(chnl_fire_s3, 2, Some(false.B))
d_s5.valid := chnl_valid_s5 && isD_s5
txreq_s5.valid := chnl_valid_s5 && isTXREQ_s5
txrsp_s5.valid := chnl_valid_s5 && isTXRSP_s5
txdat_s5.valid := chnl_valid_s5 && isTXDAT_s5
d_s5.bits.task := task_s5.bits
d_s5.bits.task.denied := Mux(task_s5.bits.mshrTask || task_s5.bits.snpHitReleaseWithData, task_s5.bits.denied, tagError_s5)
d_s5.bits.task.corrupt := Mux(task_s5.bits.mshrTask || task_s5.bits.snpHitReleaseWithData, task_s5.bits.corrupt, dataError_s5)
d_s5.bits.data.data := out_data_s5
txreq_s5.bits := task_s5.bits.toCHIREQBundle()
txrsp_s5.bits := task_s5.bits
txrsp_s5.bits.denied := tagError_s5
txdat_s5.bits.task := task_s5.bits
txdat_s5.bits.task.denied := tagError_s5
txdat_s5.bits.task.corrupt := task_s5.bits.corrupt || dataError_s5
txdat_s5.bits.data.data := out_data_s5
/* ======== BlockInfo ======== */
// if s2/s3 might write Dir, we must block s1 sink entrance
// TODO:[Check] it seems that s3 Dir write will naturally block all s1 by dirRead.ready
// (an even stronger blocking than set blocking)
// so we might not need s3 blocking here
def s23Block(chn: Char, s: TaskBundle): Bool = {
val s1 = io.fromReqArb.status_s1
val s1_set = chn match {
case 'a' => s1.a_set
case 'b' => s1.b_set
case 'c' => s1.c_set
case 'g' => s1.g_set
}
s.set === s1_set && !(s.mshrTask && !s.metaWen) // if guaranteed not to write meta, no blocking needed
}
def bBlock(s: TaskBundle, tag: Boolean = false): Bool = {
val s1 = io.fromReqArb.status_s1
// tag true: compare tag + set
s.set === s1.b_set && (if(tag) s.tag === s1.b_tag else true.B)
}
io.toReqBuf(0) := task_s2.valid && s23Block('a', task_s2.bits)
io.toReqBuf(1) := task_s3.valid && s23Block('a', task_s3.bits)
io.toReqArb.blockC_s1 := task_s2.valid && s23Block('c', task_s2.bits) ||
task_s3.valid && s23Block('c', task_s3.bits) && metaWReq_s3.valid
io.toReqArb.blockB_s1 :=
task_s2.valid && bBlock(task_s2.bits) ||
task_s3.valid && bBlock(task_s3.bits) ||
task_s4.valid && bBlock(task_s4.bits, tag = true) ||
taskWDir_s4.valid && bBlock(taskWDir_s4.bits, tag = true) ||
task_s5.valid && bBlock(task_s5.bits, tag = true)
io.toReqArb.blockA_s1 := false.B
io.toReqArb.blockG_s1 := task_s2.valid && s23Block('g', task_s2.bits) ||
task_s3.valid && s23Block('g', task_s3.bits) && metaWReq_s3.valid
/* ======== Pipeline Status ======== */
require(io.status_vec_toD.size == 3)
io.status_vec_toD(0).valid := task_s3.valid && Mux(
mshr_req_s3,
mshr_refill_s3 && !retry,
true.B
// TODO:
// To consider grantBuffer capacity conflict, only " req_s3.fromC || req_s3.fromA && !need_mshr_s3 " is needed
// But to consider mshrFull, all channel_reqs are needed
// so maybe it is excessive for grantBuf capacity conflict
)
io.status_vec_toD(0).bits.channel := task_s3.bits.channel
io.status_vec_toD(1).valid := task_s4.valid && (isD_s4 || pendingD_s4)
io.status_vec_toD(1).bits.channel := task_s4.bits.channel
io.status_vec_toD(2).valid := d_s5.valid
io.status_vec_toD(2).bits.channel := task_s5.bits.channel
// capacity control of TX channels
val tx_task_s3 = Wire(Valid(new TaskBundle))
tx_task_s3.valid := task_s3.valid // TODO: review this
tx_task_s3.bits := source_req_s3
val tasks = Seq(tx_task_s3, task_s4, task_s5)
io.status_vec_toTX.zip(tasks).foreach { case (status, task) =>
status.valid := task.valid
status.bits.channel := task.bits.channel
// To optimize timing, we restrict the blocking condition of TXRSP and TXDAT.
// This may be inaccurate, but it works.
status.bits.txChannel := task.bits.txChannel
status.bits.mshrTask := task.bits.mshrTask
}
/* ======== Other Signals Assignment ======== */
// Initial state assignment
// ! Caution: s_ and w_ are false-as-valid
when (req_s3.fromA) {
alloc_state.s_refill := cmo_cbo_s3
alloc_state.w_replResp := cmo_cbo_s3 || dirResult_s3.hit
// need Acquire downwards
when (need_acquire_s3_a) {
alloc_state.s_acquire := false.B
alloc_state.s_rcompack.get := !need_compack_s3_a
alloc_state.w_grantfirst := false.B
alloc_state.w_grantlast := false.B
alloc_state.w_grant := false.B
}
// need Probe for alias
// need Probe when Get hits on a TRUNK block
when (cache_alias || need_probe_s3_a) {
alloc_state.s_rprobe := false.B
alloc_state.w_rprobeackfirst := false.B
alloc_state.w_rprobeacklast := false.B
}
// need Release dirty block downwards by CMO
when (need_release_s3_a) {
alloc_state.s_release := false.B
alloc_state.w_releaseack := false.B
}
// need CMOAck
when (need_cmoresp_s3_a) {
alloc_state.s_cmoresp := false.B
}
}
when (req_s3.fromB) {
alloc_state.s_probeack := false.B
// need pprobe
when (need_pprobe_s3_b) {
alloc_state.s_pprobe := false.B
alloc_state.w_pprobeackfirst := false.B
alloc_state.w_pprobeacklast := false.B
}
// need forwarding response
when (need_dct_s3_b) {
alloc_state.s_dct.get := false.B
}
}
val d = Seq(d_s5, d_s4, d_s3)
val txreq = Seq(txreq_s5, txreq_s4, txreq_s3)
val txrsp = Seq(txrsp_s5, txrsp_s4, txrsp_s3)
val txdat = Seq(txdat_s5, txdat_s4, txdat_s3)
// DO NOT use TLArbiter because TLArbiter will send continuous beats for the same source
arb(d, io.toSourceD, Some("toSourceD"))
arb(txreq, io.toTXREQ, Some("toTXREQ"))
arb(txrsp, io.toTXRSP, Some("toTXRSP"))
arb(txdat, io.toTXDAT, Some("toTXDAT"))
io.error.valid := task_s5.valid
io.error.bits.valid := l2Error_s5 // if not enableECC, should be false