3 # Copyright (C) 2020,2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
4 # Copyright (C) 2020 Cole Poirier
5 # Copyright (C) 2020,2021 Cesar Strauss
6 # Copyright (C) 2021 Tobias Platen
8 # Original dcache.vhdl Copyright of its authors and licensed
9 # by IBM under CC-BY 4.0
10 # https://github.com/antonblanchard/microwatt
12 # Conversion to nmigen funded by NLnet and NGI POINTER under EU Grants
13 # 871528 and 957073, under the LGPL-v3+ License
17 based on Anton Blanchard microwatt dcache.vhdl
19 note that the microwatt dcache wishbone interface expects "stall".
20 for simplicity at the moment this is hard-coded to cyc & ~ack.
21 see WB4 spec, p84, section 5.2.1
23 IMPORTANT: for store, the data is sampled the cycle AFTER the "valid"
28 * https://libre-soc.org/3d_gpu/architecture/set_associative_cache.jpg
29 * https://bugs.libre-soc.org/show_bug.cgi?id=469
30 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
31 (discussion about brams for ECP5)
37 from nmutil
.gtkw
import write_gtkw
39 sys
.setrecursionlimit(1000000)
41 from enum
import Enum
, unique
43 from nmigen
import (Module
, Signal
, Elaboratable
, Cat
, Repl
, Array
, Const
,
45 from nmutil
.util
import Display
46 from nmigen
.lib
.coding
import Decoder
48 from copy
import deepcopy
49 from random
import randint
, seed
51 from nmigen_soc
.wishbone
.bus
import Interface
53 from nmigen
.cli
import main
54 from nmutil
.iocontrol
import RecordObject
55 from nmigen
.utils
import log2_int
56 from soc
.experiment
.mem_types
import (LoadStore1ToDCacheType
,
57 DCacheToLoadStore1Type
,
61 from soc
.experiment
.wb_types
import (WB_ADDR_BITS
, WB_DATA_BITS
, WB_SEL_BITS
,
62 WBAddrType
, WBDataType
, WBSelType
,
63 WBMasterOut
, WBSlaveOut
,
64 WBMasterOutVector
, WBSlaveOutVector
,
65 WBIOMasterOut
, WBIOSlaveOut
)
67 from soc
.experiment
.cache_ram
import CacheRam
68 from soc
.experiment
.plru
import PLRU
, PLRUs
69 #from nmutil.plru import PLRU, PLRUs
72 from soc
.bus
.sram
import SRAM
73 from nmigen
import Memory
74 from nmigen
.cli
import rtlil
76 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
77 # Also, check out the cxxsim nmigen branch, and latest yosys from git
78 from nmutil
.sim_tmp_alternative
import Simulator
80 from nmutil
.util
import wrap
82 LOG_LENGTH
= 0 # Non-zero to enable log data collection
85 return (1<<log2_int(x
, False)) == x
89 def __init__(self
, LINE_SIZE
= 64, # Line size in bytes
90 NUM_LINES
= 64, # Number of lines in a set
91 NUM_WAYS
= 2, # Number of ways
92 TLB_SET_SIZE
= 64, # L1 DTLB entries per set
93 TLB_NUM_WAYS
= 2, # L1 DTLB number of sets
94 TLB_LG_PGSZ
= 12): # L1 DTLB log_2(page_size)
95 self
.LINE_SIZE
= LINE_SIZE
96 self
.NUM_LINES
= NUM_LINES
97 self
.NUM_WAYS
= NUM_WAYS
98 self
.TLB_SET_SIZE
= TLB_SET_SIZE
99 self
.TLB_NUM_WAYS
= TLB_NUM_WAYS
100 self
.TLB_LG_PGSZ
= TLB_LG_PGSZ
102 # BRAM organisation: We never access more than
103 # -- WB_DATA_BITS at a time so to save
104 # -- resources we make the array only that wide, and
105 # -- use consecutive indices to make a cache "line"
107 # -- ROW_SIZE is the width in bytes of the BRAM
108 # -- (based on WB, so 64-bits)
109 self
.ROW_SIZE
= WB_DATA_BITS
// 8;
111 # ROW_PER_LINE is the number of row (wishbone
112 # transactions) in a line
113 self
.ROW_PER_LINE
= self
.LINE_SIZE
// self
.ROW_SIZE
115 # BRAM_ROWS is the number of rows in BRAM needed
116 # to represent the full dcache
117 self
.BRAM_ROWS
= self
.NUM_LINES
* self
.ROW_PER_LINE
119 print ("ROW_SIZE", self
.ROW_SIZE
)
120 print ("ROW_PER_LINE", self
.ROW_PER_LINE
)
121 print ("BRAM_ROWS", self
.BRAM_ROWS
)
122 print ("NUM_WAYS", self
.NUM_WAYS
)
124 # Bit fields counts in the address
126 # REAL_ADDR_BITS is the number of real address
128 self
.REAL_ADDR_BITS
= 56
130 # ROW_BITS is the number of bits to select a row
131 self
.ROW_BITS
= log2_int(self
.BRAM_ROWS
)
133 # ROW_LINE_BITS is the number of bits to select
134 # a row within a line
135 self
.ROW_LINE_BITS
= log2_int(self
.ROW_PER_LINE
)
137 # LINE_OFF_BITS is the number of bits for
138 # the offset in a cache line
139 self
.LINE_OFF_BITS
= log2_int(self
.LINE_SIZE
)
141 # ROW_OFF_BITS is the number of bits for
142 # the offset in a row
143 self
.ROW_OFF_BITS
= log2_int(self
.ROW_SIZE
)
145 # INDEX_BITS is the number if bits to
146 # select a cache line
147 self
.INDEX_BITS
= log2_int(self
.NUM_LINES
)
149 # SET_SIZE_BITS is the log base 2 of the set size
150 self
.SET_SIZE_BITS
= self
.LINE_OFF_BITS
+ self
.INDEX_BITS
152 # TAG_BITS is the number of bits of
153 # the tag part of the address
154 self
.TAG_BITS
= self
.REAL_ADDR_BITS
- self
.SET_SIZE_BITS
156 # TAG_WIDTH is the width in bits of each way of the tag RAM
157 self
.TAG_WIDTH
= self
.TAG_BITS
+ 7 - ((self
.TAG_BITS
+ 7) % 8)
159 # WAY_BITS is the number of bits to select a way
160 self
.WAY_BITS
= log2_int(self
.NUM_WAYS
)
162 # Example of layout for 32 lines of 64 bytes:
165 |.. -----------------------| REAL_ADDR_BITS ({self.REAL_ADDR_BITS})
166 .. |--------------| SET_SIZE_BITS ({self.SET_SIZE_BITS})
167 .. tag |index| line |
169 .. | |---| | ROW_LINE_BITS ({self.ROW_LINE_BITS})
170 .. | |--- - --| LINE_OFF_BITS ({self.LINE_OFF_BITS})
171 .. | |- --| ROW_OFF_BITS ({self.ROW_OFF_BITS})
172 .. |----- ---| | ROW_BITS ({self.ROW_BITS})
173 .. |-----| | INDEX_BITS ({self.INDEX_BITS})
174 .. --------| | TAG_BITS ({self.TAG_BITS})
177 print ("Dcache TAG %d IDX %d ROW_BITS %d ROFF %d LOFF %d RLB %d" % \
178 (self
.TAG_BITS
, self
.INDEX_BITS
, self
.ROW_BITS
,
179 self
.ROW_OFF_BITS
, self
.LINE_OFF_BITS
, self
.ROW_LINE_BITS
))
180 print ("index @: %d-%d" % (self
.LINE_OFF_BITS
, self
.SET_SIZE_BITS
))
181 print ("row @: %d-%d" % (self
.LINE_OFF_BITS
, self
.ROW_OFF_BITS
))
182 print ("tag @: %d-%d width %d" % (self
.SET_SIZE_BITS
,
183 self
.REAL_ADDR_BITS
, self
.TAG_WIDTH
))
185 self
.TAG_RAM_WIDTH
= self
.TAG_WIDTH
* self
.NUM_WAYS
187 print ("TAG_RAM_WIDTH", self
.TAG_RAM_WIDTH
)
188 print (" TAG_WIDTH", self
.TAG_WIDTH
)
189 print (" NUM_WAYS", self
.NUM_WAYS
)
190 print (" NUM_LINES", self
.NUM_LINES
)
193 self
.TLB_SET_BITS
= log2_int(self
.TLB_SET_SIZE
)
194 self
.TLB_WAY_BITS
= log2_int(self
.TLB_NUM_WAYS
)
195 self
.TLB_EA_TAG_BITS
= 64 - (self
.TLB_LG_PGSZ
+ self
.TLB_SET_BITS
)
196 self
.TLB_TAG_WAY_BITS
= self
.TLB_NUM_WAYS
* self
.TLB_EA_TAG_BITS
197 self
.TLB_PTE_BITS
= 64
198 self
.TLB_PTE_WAY_BITS
= self
.TLB_NUM_WAYS
* self
.TLB_PTE_BITS
;
200 assert (self
.LINE_SIZE
% self
.ROW_SIZE
) == 0, \
201 "LINE_SIZE not multiple of ROW_SIZE"
202 assert ispow2(self
.LINE_SIZE
), "LINE_SIZE not power of 2"
203 assert ispow2(self
.NUM_LINES
), "NUM_LINES not power of 2"
204 assert ispow2(self
.ROW_PER_LINE
), "ROW_PER_LINE not power of 2"
205 assert self
.ROW_BITS
== \
206 (self
.INDEX_BITS
+ self
.ROW_LINE_BITS
), \
207 "geometry bits don't add up"
208 assert (self
.LINE_OFF_BITS
== \
209 self
.ROW_OFF_BITS
+ self
.ROW_LINE_BITS
), \
210 "geometry bits don't add up"
211 assert self
.REAL_ADDR_BITS
== \
212 (self
.TAG_BITS
+ self
.INDEX_BITS
+ self
.LINE_OFF_BITS
), \
213 "geometry bits don't add up"
214 assert self
.REAL_ADDR_BITS
== \
215 (self
.TAG_BITS
+ self
.ROW_BITS
+ self
.ROW_OFF_BITS
), \
216 "geometry bits don't add up"
217 assert 64 == WB_DATA_BITS
, \
218 "Can't yet handle wb width that isn't 64-bits"
219 assert self
.SET_SIZE_BITS
<= self
.TLB_LG_PGSZ
, \
220 "Set indexed by virtual address"
222 def CacheTagArray(self
):
223 return Array(Signal(self
.TAG_RAM_WIDTH
, name
="tag%d" % x
) \
224 for x
in range(self
.NUM_LINES
))
226 def CacheValidsArray(self
):
227 return Array(Signal(self
.NUM_WAYS
, name
="tag_valids%d" % x
)
228 for x
in range(self
.NUM_LINES
))
230 def RowPerLineValidArray(self
):
231 return Array(Signal(name
="rows_valid%d" % x
) \
232 for x
in range(self
.ROW_PER_LINE
))
234 def TLBHit(self
, name
):
235 return Record([('valid', 1),
236 ('way', self
.TLB_WAY_BITS
)], name
=name
)
238 def TLBTagEAArray(self
):
239 return Array(Signal(self
.TLB_EA_TAG_BITS
, name
="tlbtagea%d" % x
) \
240 for x
in range (self
.TLB_NUM_WAYS
))
242 def TLBRecord(self
, name
):
243 tlb_layout
= [('valid', self
.TLB_NUM_WAYS
),
244 ('tag', self
.TLB_TAG_WAY_BITS
),
245 ('pte', self
.TLB_PTE_WAY_BITS
)
247 return Record(tlb_layout
, name
=name
)
249 def TLBValidArray(self
):
250 return Array(Signal(self
.TLB_NUM_WAYS
, name
="tlb_valid%d" % x
)
251 for x
in range(self
.TLB_SET_SIZE
))
254 return Array(Signal(self
.WAY_BITS
, name
="hitway_%d" % x
) \
255 for x
in range(self
.TLB_NUM_WAYS
))
257 # Cache RAM interface
258 def CacheRamOut(self
):
259 return Array(Signal(self
.WB_DATA_BITS
, name
="cache_out%d" % x
) \
260 for x
in range(self
.NUM_WAYS
))
262 # PLRU output interface
264 return Array(Signal(self
.WAY_BITS
, name
="plru_out%d" % x
) \
265 for x
in range(self
.NUM_LINES
))
267 # TLB PLRU output interface
268 def TLBPLRUOut(self
):
269 return Array(Signal(self
.TLB_WAY_BITS
, name
="tlbplru_out%d" % x
) \
270 for x
in range(self
.TLB_SET_SIZE
))
272 # Helper functions to decode incoming requests
274 # Return the cache line index (tag index) for an address
275 def get_index(self
, addr
):
276 return addr
[self
.LINE_OFF_BITS
:self
.SET_SIZE_BITS
]
278 # Return the cache row index (data memory) for an address
279 def get_row(self
, addr
):
280 return addr
[self
.ROW_OFF_BITS
:self
.SET_SIZE_BITS
]
282 # Return the index of a row within a line
283 def get_row_of_line(self
, row
):
284 return row
[:self
.ROW_BITS
][:self
.ROW_LINE_BITS
]
286 # Returns whether this is the last row of a line
287 def is_last_row_addr(self
, addr
, last
):
288 return addr
[self
.ROW_OFF_BITS
:self
.LINE_OFF_BITS
] == last
290 # Returns whether this is the last row of a line
291 def is_last_row(self
, row
, last
):
292 return self
.get_row_of_line(row
) == last
294 # Return the next row in the current cache line. We use a
295 # dedicated function in order to limit the size of the
296 # generated adder to be only the bits within a cache line
297 # (3 bits with default settings)
298 def next_row(self
, row
):
299 row_v
= row
[0:self
.ROW_LINE_BITS
] + 1
300 return Cat(row_v
[:self
.ROW_LINE_BITS
], row
[self
.ROW_LINE_BITS
:])
302 # Get the tag value from the address
303 def get_tag(self
, addr
):
304 return addr
[self
.SET_SIZE_BITS
:self
.REAL_ADDR_BITS
]
306 # Read a tag from a tag memory row
307 def read_tag(self
, way
, tagset
):
308 return tagset
.word_select(way
, self
.TAG_WIDTH
)[:self
.TAG_BITS
]
310 # Read a TLB tag from a TLB tag memory row
311 def read_tlb_tag(self
, way
, tags
):
312 return tags
.word_select(way
, self
.TLB_EA_TAG_BITS
)
314 # Write a TLB tag to a TLB tag memory row
315 def write_tlb_tag(self
, way
, tags
, tag
):
316 return self
.read_tlb_tag(way
, tags
).eq(tag
)
318 # Read a PTE from a TLB PTE memory row
319 def read_tlb_pte(self
, way
, ptes
):
320 return ptes
.word_select(way
, self
.TLB_PTE_BITS
)
322 def write_tlb_pte(self
, way
, ptes
, newpte
):
323 return self
.read_tlb_pte(way
, ptes
).eq(newpte
)
326 # Record for storing permission, attribute, etc. bits from a PTE
327 class PermAttr(RecordObject
):
328 def __init__(self
, name
=None):
329 super().__init
__(name
=name
)
330 self
.reference
= Signal()
331 self
.changed
= Signal()
332 self
.nocache
= Signal()
334 self
.rd_perm
= Signal()
335 self
.wr_perm
= Signal()
338 def extract_perm_attr(pte
):
343 # Type of operation on a "valid" input
347 OP_BAD
= 1 # NC cache hit, TLB miss, prot/RC failure
348 OP_STCX_FAIL
= 2 # conditional store w/o reservation
349 OP_LOAD_HIT
= 3 # Cache hit on load
350 OP_LOAD_MISS
= 4 # Load missing cache
351 OP_LOAD_NC
= 5 # Non-cachable load
352 OP_STORE_HIT
= 6 # Store hitting cache
353 OP_STORE_MISS
= 7 # Store missing cache
356 # Cache state machine
359 IDLE
= 0 # Normal load hit processing
360 RELOAD_WAIT_ACK
= 1 # Cache reload wait ack
361 STORE_WAIT_ACK
= 2 # Store wait ack
362 NC_LOAD_WAIT_ACK
= 3 # Non-cachable load wait ack
367 # In order to make timing, we use the BRAMs with
368 # an output buffer, which means that the BRAM
369 # output is delayed by an extra cycle.
371 # Thus, the dcache has a 2-stage internal pipeline
372 # for cache hits with no stalls.
374 # All other operations are handled via stalling
375 # in the first stage.
377 # The second stage can thus complete a hit at the same
378 # time as the first stage emits a stall for a complex op.
380 # Stage 0 register, basically contains just the latched request
382 class RegStage0(RecordObject
):
383 def __init__(self
, name
=None):
384 super().__init
__(name
=name
)
385 self
.req
= LoadStore1ToDCacheType(name
="lsmem")
386 self
.tlbie
= Signal() # indicates a tlbie request (from MMU)
387 self
.doall
= Signal() # with tlbie, indicates flush whole TLB
388 self
.tlbld
= Signal() # indicates a TLB load request (from MMU)
389 self
.mmu_req
= Signal() # indicates source of request
390 self
.d_valid
= Signal() # indicates req.data is valid now
393 class MemAccessRequest(RecordObject
):
394 def __init__(self
, cfg
, name
=None):
395 super().__init
__(name
=name
)
397 self
.valid
= Signal()
399 self
.real_addr
= Signal(cfg
.REAL_ADDR_BITS
)
400 self
.data
= Signal(64)
401 self
.byte_sel
= Signal(8)
402 self
.hit_way
= Signal(cfg
.WAY_BITS
)
403 self
.same_tag
= Signal()
404 self
.mmu_req
= Signal()
407 # First stage register, contains state for stage 1 of load hits
408 # and for the state machine used by all other operations
409 class RegStage1(RecordObject
):
410 def __init__(self
, cfg
, name
=None):
411 super().__init
__(name
=name
)
412 # Info about the request
413 self
.full
= Signal() # have uncompleted request
414 self
.mmu_req
= Signal() # request is from MMU
415 self
.req
= MemAccessRequest(cfg
, name
="reqmem")
418 self
.hit_way
= Signal(cfg
.WAY_BITS
)
419 self
.hit_load_valid
= Signal()
420 self
.hit_index
= Signal(cfg
.INDEX_BITS
)
421 self
.cache_hit
= Signal()
424 self
.tlb_hit
= cfg
.TLBHit("tlb_hit")
425 self
.tlb_hit_index
= Signal(cfg
.TLB_SET_BITS
)
427 # 2-stage data buffer for data forwarded from writes to reads
428 self
.forward_data1
= Signal(64)
429 self
.forward_data2
= Signal(64)
430 self
.forward_sel1
= Signal(8)
431 self
.forward_valid1
= Signal()
432 self
.forward_way1
= Signal(cfg
.WAY_BITS
)
433 self
.forward_row1
= Signal(cfg
.ROW_BITS
)
434 self
.use_forward1
= Signal()
435 self
.forward_sel
= Signal(8)
437 # Cache miss state (reload state machine)
438 self
.state
= Signal(State
)
440 self
.write_bram
= Signal()
441 self
.write_tag
= Signal()
442 self
.slow_valid
= Signal()
443 self
.wb
= WBMasterOut("wb")
444 self
.reload_tag
= Signal(cfg
.TAG_BITS
)
445 self
.store_way
= Signal(cfg
.WAY_BITS
)
446 self
.store_row
= Signal(cfg
.ROW_BITS
)
447 self
.store_index
= Signal(cfg
.INDEX_BITS
)
448 self
.end_row_ix
= Signal(cfg
.ROW_LINE_BITS
)
449 self
.rows_valid
= cfg
.RowPerLineValidArray()
450 self
.acks_pending
= Signal(3)
451 self
.inc_acks
= Signal()
452 self
.dec_acks
= Signal()
454 # Signals to complete (possibly with error)
455 self
.ls_valid
= Signal()
456 self
.ls_error
= Signal()
457 self
.mmu_done
= Signal()
458 self
.mmu_error
= Signal()
459 self
.cache_paradox
= Signal()
461 # Signal to complete a failed stcx.
462 self
.stcx_fail
= Signal()
465 # Reservation information
466 class Reservation(RecordObject
):
467 def __init__(self
, cfg
, name
=None):
468 super().__init
__(name
=name
)
469 self
.valid
= Signal()
470 self
.addr
= Signal(64-cfg
.LINE_OFF_BITS
)
473 class DTLBUpdate(Elaboratable
):
474 def __init__(self
, cfg
):
476 self
.tlbie
= Signal()
477 self
.tlbwe
= Signal()
478 self
.doall
= Signal()
479 self
.tlb_hit
= cfg
.TLBHit("tlb_hit")
480 self
.tlb_req_index
= Signal(cfg
.TLB_SET_BITS
)
482 self
.repl_way
= Signal(cfg
.TLB_WAY_BITS
)
483 self
.eatag
= Signal(cfg
.TLB_EA_TAG_BITS
)
484 self
.pte_data
= Signal(cfg
.TLB_PTE_BITS
)
486 # read from dtlb array
487 self
.tlb_read
= Signal()
488 self
.tlb_read_index
= Signal(cfg
.TLB_SET_BITS
)
489 self
.tlb_way
= cfg
.TLBRecord("o_tlb_way")
491 def elaborate(self
, platform
):
497 # there are 3 parts to this:
498 # QTY TLB_NUM_WAYs TAGs - of width (say) 46 bits of Effective Address
499 # QTY TLB_NUM_WAYs PTEs - of width (say) 64 bits
500 # "Valid" bits, one per "way", of QTY TLB_NUM_WAYs. these cannot
501 # be a Memory because they can all be cleared (tlbie, doall), i mean,
502 # we _could_, in theory, by overriding the Reset Signal of the Memory,
505 dtlb_valid
= cfg
.TLBValidArray()
506 tlb_req_index
= self
.tlb_req_index
508 print ("TLB_TAG_WAY_BITS", cfg
.TLB_TAG_WAY_BITS
)
509 print (" TLB_EA_TAG_BITS", cfg
.TLB_EA_TAG_BITS
)
510 print (" TLB_NUM_WAYS", cfg
.TLB_NUM_WAYS
)
511 print ("TLB_PTE_WAY_BITS", cfg
.TLB_PTE_WAY_BITS
)
512 print (" TLB_PTE_BITS", cfg
.TLB_PTE_BITS
)
513 print (" TLB_NUM_WAYS", cfg
.TLB_NUM_WAYS
)
515 # TAG and PTE Memory SRAMs. transparent, write-enables are TLB_NUM_WAYS
516 tagway
= Memory(depth
=cfg
.TLB_SET_SIZE
, width
=cfg
.TLB_TAG_WAY_BITS
,
517 attrs
={'syn_ramstyle': "block_ram"})
518 m
.submodules
.rd_tagway
= rd_tagway
= tagway
.read_port()
519 m
.submodules
.wr_tagway
= wr_tagway
= tagway
.write_port(
520 granularity
=cfg
.TLB_EA_TAG_BITS
)
522 pteway
= Memory(depth
=cfg
.TLB_SET_SIZE
, width
=cfg
.TLB_PTE_WAY_BITS
,
523 attrs
={'syn_ramstyle': "block_ram"})
524 m
.submodules
.rd_pteway
= rd_pteway
= pteway
.read_port()
525 m
.submodules
.wr_pteway
= wr_pteway
= pteway
.write_port(
526 granularity
=cfg
.TLB_PTE_BITS
)
528 # commented out for now, can be put in if Memory.reset can be
529 # used for tlbie&doall to reset the entire Memory to zero in 1 cycle
530 #validm = Memory(depth=TLB_SET_SIZE, width=TLB_NUM_WAYS)
531 #m.submodules.rd_valid = rd_valid = validm.read_port()
532 #m.submodules.wr_valid = wr_valid = validm.write_port(
535 # connect up read and write addresses to Valid/PTE/TAG SRAMs
536 m
.d
.comb
+= rd_pteway
.addr
.eq(self
.tlb_read_index
)
537 m
.d
.comb
+= rd_tagway
.addr
.eq(self
.tlb_read_index
)
538 #m.d.comb += rd_valid.addr.eq(self.tlb_read_index)
539 m
.d
.comb
+= wr_tagway
.addr
.eq(tlb_req_index
)
540 m
.d
.comb
+= wr_pteway
.addr
.eq(tlb_req_index
)
541 #m.d.comb += wr_valid.addr.eq(tlb_req_index)
545 tb_out
= Signal(cfg
.TLB_TAG_WAY_BITS
) # tlb_way_tags_t
546 db_out
= Signal(cfg
.TLB_NUM_WAYS
) # tlb_way_valids_t
547 pb_out
= Signal(cfg
.TLB_PTE_WAY_BITS
) # tlb_way_ptes_t
548 dv
= Signal(cfg
.TLB_NUM_WAYS
) # tlb_way_valids_t
550 comb
+= dv
.eq(dtlb_valid
[tlb_req_index
])
551 comb
+= db_out
.eq(dv
)
553 with m
.If(self
.tlbie
& self
.doall
):
554 # clear all valid bits at once
555 # XXX hmmm, validm _could_ use Memory reset here...
556 for i
in range(cfg
.TLB_SET_SIZE
):
557 sync
+= dtlb_valid
[i
].eq(0)
558 with m
.Elif(self
.tlbie
):
559 # invalidate just the hit_way
560 with m
.If(self
.tlb_hit
.valid
):
561 comb
+= db_out
.bit_select(self
.tlb_hit
.way
, 1).eq(0)
562 comb
+= v_updated
.eq(1)
563 with m
.Elif(self
.tlbwe
):
564 # write to the requested tag and PTE
565 comb
+= cfg
.write_tlb_tag(self
.repl_way
, tb_out
, self
.eatag
)
566 comb
+= cfg
.write_tlb_pte(self
.repl_way
, pb_out
, self
.pte_data
)
568 comb
+= db_out
.bit_select(self
.repl_way
, 1).eq(1)
570 comb
+= updated
.eq(1)
571 comb
+= v_updated
.eq(1)
573 # above, sometimes valid is requested to be updated but data not
574 # therefore split them out, here. note the granularity thing matches
575 # with the shift-up of the eatag/pte_data into the correct TLB way.
576 # thus is it not necessary to write the entire lot, just the portion
577 # being altered: hence writing the *old* copy of the row is not needed
578 with m
.If(updated
): # PTE and TAG to be written
579 comb
+= wr_pteway
.data
.eq(pb_out
)
580 comb
+= wr_pteway
.en
.eq(1<<self
.repl_way
)
581 comb
+= wr_tagway
.data
.eq(tb_out
)
582 comb
+= wr_tagway
.en
.eq(1<<self
.repl_way
)
583 with m
.If(v_updated
): # Valid to be written
584 sync
+= dtlb_valid
[tlb_req_index
].eq(db_out
)
585 #comb += wr_valid.data.eq(db_out)
586 #comb += wr_valid.en.eq(1<<self.repl_way)
588 # select one TLB way, use a register here
590 sync
+= r_delay
.eq(self
.tlb_read
)
591 # first deal with the valids, which are not in a Memory.
592 # tlb way valid is output on a 1 clock delay with sync,
593 # but have to explicitly deal with "forwarding" here
594 with m
.If(self
.tlb_read
):
595 with m
.If(v_updated
): # write *and* read in same cycle: forward
596 sync
+= self
.tlb_way
.valid
.eq(db_out
)
598 sync
+= self
.tlb_way
.valid
.eq(dtlb_valid
[self
.tlb_read_index
])
599 # now deal with the Memory-read case. the output must remain
600 # valid (stable) even when a read-request is not made, but stable
601 # on a one-clock delay, hence the register
602 r_tlb_way
= cfg
.TLBRecord("r_tlb_way")
604 # on one clock delay, capture the contents of the read port(s)
605 comb
+= self
.tlb_way
.tag
.eq(rd_tagway
.data
)
606 comb
+= self
.tlb_way
.pte
.eq(rd_pteway
.data
)
607 sync
+= r_tlb_way
.tag
.eq(rd_tagway
.data
)
608 sync
+= r_tlb_way
.pte
.eq(rd_pteway
.data
)
610 # ... so that the register can output it when no read is requested
611 # it's rather overkill but better to be safe than sorry
612 comb
+= self
.tlb_way
.tag
.eq(r_tlb_way
.tag
)
613 comb
+= self
.tlb_way
.pte
.eq(r_tlb_way
.pte
)
614 #comb += self.tlb_way.eq(r_tlb_way)
619 class DCachePendingHit(Elaboratable
):
621 def __init__(self
, cfg
, tlb_way
,
622 cache_i_validdx
, cache_tag_set
,
626 self
.virt_mode
= Signal()
627 self
.is_hit
= Signal()
628 self
.tlb_hit
= cfg
.TLBHit("tlb_hit")
629 self
.hit_way
= Signal(cfg
.WAY_BITS
)
630 self
.rel_match
= Signal()
631 self
.req_index
= Signal(cfg
.INDEX_BITS
)
632 self
.reload_tag
= Signal(cfg
.TAG_BITS
)
634 self
.tlb_way
= tlb_way
635 self
.cache_i_validdx
= cache_i_validdx
636 self
.cache_tag_set
= cache_tag_set
637 self
.req_addr
= req_addr
640 def elaborate(self
, platform
):
646 virt_mode
= self
.virt_mode
648 tlb_way
= self
.tlb_way
649 cache_i_validdx
= self
.cache_i_validdx
650 cache_tag_set
= self
.cache_tag_set
651 req_addr
= self
.req_addr
652 tlb_hit
= self
.tlb_hit
653 hit_way
= self
.hit_way
654 rel_match
= self
.rel_match
655 req_index
= self
.req_index
656 reload_tag
= self
.reload_tag
659 hit_set
= Array(Signal(name
="hit_set_%d" % i
) \
660 for i
in range(cfg
.TLB_NUM_WAYS
))
661 rel_matches
= Array(Signal(name
="rel_matches_%d" % i
) \
662 for i
in range(cfg
.TLB_NUM_WAYS
))
663 hit_way_set
= cfg
.HitWaySet()
665 # Test if pending request is a hit on any way
666 # In order to make timing in virtual mode,
667 # when we are using the TLB, we compare each
668 # way with each of the real addresses from each way of
669 # the TLB, and then decide later which match to use.
671 with m
.If(virt_mode
):
672 for j
in range(cfg
.TLB_NUM_WAYS
): # tlb_num_way_t
673 s_tag
= Signal(cfg
.TAG_BITS
, name
="s_tag%d" % j
)
674 s_hit
= Signal(name
="s_hit%d" % j
)
675 s_pte
= Signal(cfg
.TLB_PTE_BITS
, name
="s_pte%d" % j
)
676 s_ra
= Signal(cfg
.REAL_ADDR_BITS
, name
="s_ra%d" % j
)
677 # read the PTE, calc the Real Address, get tge tag
678 comb
+= s_pte
.eq(cfg
.read_tlb_pte(j
, tlb_way
.pte
))
679 comb
+= s_ra
.eq(Cat(req_addr
[0:cfg
.TLB_LG_PGSZ
],
680 s_pte
[cfg
.TLB_LG_PGSZ
:cfg
.REAL_ADDR_BITS
]))
681 comb
+= s_tag
.eq(cfg
.get_tag(s_ra
))
682 # for each way check tge tag against the cache tag set
683 for i
in range(cfg
.NUM_WAYS
): # way_t
684 is_tag_hit
= Signal(name
="is_tag_hit_%d_%d" % (j
, i
))
685 comb
+= is_tag_hit
.eq(go
& cache_i_validdx
[i
] &
686 (cfg
.read_tag(i
, cache_tag_set
) == s_tag
)
687 & (tlb_way
.valid
[j
]))
688 with m
.If(is_tag_hit
):
689 comb
+= hit_way_set
[j
].eq(i
)
691 comb
+= hit_set
[j
].eq(s_hit
)
692 comb
+= rel_matches
[j
].eq(s_tag
== reload_tag
)
693 with m
.If(tlb_hit
.valid
):
694 comb
+= is_hit
.eq(hit_set
[tlb_hit
.way
])
695 comb
+= hit_way
.eq(hit_way_set
[tlb_hit
.way
])
696 comb
+= rel_match
.eq(rel_matches
[tlb_hit
.way
])
698 s_tag
= Signal(cfg
.TAG_BITS
)
699 comb
+= s_tag
.eq(cfg
.get_tag(req_addr
))
700 for i
in range(cfg
.NUM_WAYS
): # way_t
701 is_tag_hit
= Signal(name
="is_tag_hit_%d" % i
)
702 comb
+= is_tag_hit
.eq(go
& cache_i_validdx
[i
] &
703 (cfg
.read_tag(i
, cache_tag_set
) == s_tag
))
704 with m
.If(is_tag_hit
):
705 comb
+= hit_way
.eq(i
)
707 with m
.If(s_tag
== reload_tag
):
708 comb
+= rel_match
.eq(1)
713 class DCache(Elaboratable
, DCacheConfig
):
714 """Set associative dcache write-through
716 TODO (in no specific order):
717 * See list in icache.vhdl
718 * Complete load misses on the cycle when WB data comes instead of
719 at the end of line (this requires dealing with requests coming in
722 def __init__(self
, pspec
=None):
723 self
.d_in
= LoadStore1ToDCacheType("d_in")
724 self
.d_out
= DCacheToLoadStore1Type("d_out")
726 self
.m_in
= MMUToDCacheType("m_in")
727 self
.m_out
= DCacheToMMUType("m_out")
729 self
.stall_out
= Signal()
730 self
.any_stall_out
= Signal()
731 self
.dreq_when_stall
= Signal()
732 self
.mreq_when_stall
= Signal()
734 # standard naming (wired to non-standard for compatibility)
735 self
.bus
= Interface(addr_width
=32,
742 self
.log_out
= Signal(20)
744 # test if microwatt compatibility is to be enabled
745 self
.microwatt_compat
= (hasattr(pspec
, "microwatt_compat") and
746 (pspec
.microwatt_compat
== True))
748 if self
.microwatt_compat
:
749 # reduce way sizes and num lines
750 super().__init
__(NUM_LINES
= 4,
753 TLB_SET_SIZE
=4) # XXX needs device-tree entry
757 def stage_0(self
, m
, r0
, r1
, r0_full
):
758 """Latch the request in r0.req as long as we're not stalling
762 d_in
, d_out
, m_in
= self
.d_in
, self
.d_out
, self
.m_in
764 r
= RegStage0("stage0")
766 # TODO, this goes in unit tests and formal proofs
767 with m
.If(d_in
.valid
& m_in
.valid
):
768 sync
+= Display("request collision loadstore vs MMU")
770 with m
.If(m_in
.valid
):
771 comb
+= r
.req
.valid
.eq(1)
772 comb
+= r
.req
.load
.eq(~
(m_in
.tlbie | m_in
.tlbld
))# no invalidate
773 comb
+= r
.req
.dcbz
.eq(0)
774 comb
+= r
.req
.nc
.eq(0)
775 comb
+= r
.req
.reserve
.eq(0)
776 comb
+= r
.req
.virt_mode
.eq(0)
777 comb
+= r
.req
.priv_mode
.eq(1)
778 comb
+= r
.req
.addr
.eq(m_in
.addr
)
779 comb
+= r
.req
.data
.eq(m_in
.pte
)
780 comb
+= r
.req
.byte_sel
.eq(~
0) # Const -1 sets all to 0b111....
781 comb
+= r
.tlbie
.eq(m_in
.tlbie
)
782 comb
+= r
.doall
.eq(m_in
.doall
)
783 comb
+= r
.tlbld
.eq(m_in
.tlbld
)
784 comb
+= r
.mmu_req
.eq(1)
785 comb
+= r
.d_valid
.eq(1)
786 m
.d
.sync
+= Display(" DCACHE req mmu addr %x pte %x ld %d",
787 m_in
.addr
, m_in
.pte
, r
.req
.load
)
790 comb
+= r
.req
.eq(d_in
)
791 comb
+= r
.req
.data
.eq(0)
792 comb
+= r
.tlbie
.eq(0)
793 comb
+= r
.doall
.eq(0)
794 comb
+= r
.tlbld
.eq(0)
795 comb
+= r
.mmu_req
.eq(0)
796 comb
+= r
.d_valid
.eq(0)
798 sync
+= r0_full
.eq(0)
799 with m
.If((~r1
.full
& ~d_in
.hold
) | ~r0_full
):
801 sync
+= r0_full
.eq(r
.req
.valid
)
802 with m
.Elif(~r0
.d_valid
):
803 # Sample data the cycle after a request comes in from loadstore1.
804 # If another request has come in already then the data will get
805 # put directly into req.data below.
806 sync
+= r0
.req
.data
.eq(d_in
.data
)
807 sync
+= r0
.d_valid
.eq(1)
808 with m
.If(d_in
.valid
):
809 m
.d
.sync
+= Display(" DCACHE req cache "
810 "virt %d addr %x data %x ld %d",
811 r
.req
.virt_mode
, r
.req
.addr
,
812 r
.req
.data
, r
.req
.load
)
814 def tlb_read(self
, m
, r0_stall
, tlb_way
):
816 Operates in the second cycle on the request latched in r0.req.
817 TLB updates write the entry at the end of the second cycle.
821 m_in
, d_in
= self
.m_in
, self
.d_in
823 addrbits
= Signal(self
.TLB_SET_BITS
)
825 amin
= self
.TLB_LG_PGSZ
826 amax
= self
.TLB_LG_PGSZ
+ self
.TLB_SET_BITS
828 with m
.If(m_in
.valid
):
829 comb
+= addrbits
.eq(m_in
.addr
[amin
: amax
])
831 comb
+= addrbits
.eq(d_in
.addr
[amin
: amax
])
833 # If we have any op and the previous op isn't finished,
834 # then keep the same output for next cycle.
836 comb
+= d
.tlb_read_index
.eq(addrbits
)
837 comb
+= d
.tlb_read
.eq(~r0_stall
)
838 comb
+= tlb_way
.eq(d
.tlb_way
)
840 def maybe_tlb_plrus(self
, m
, r1
, tlb_plru_victim
, tlb_req_index
):
841 """Generate TLB PLRUs
846 if self
.TLB_NUM_WAYS
== 0:
849 # suite of PLRUs with a selection and output mechanism
850 tlb_plrus
= PLRUs(self
.TLB_SET_SIZE
, self
.TLB_WAY_BITS
)
851 m
.submodules
.tlb_plrus
= tlb_plrus
852 comb
+= tlb_plrus
.way
.eq(r1
.tlb_hit
.way
)
853 comb
+= tlb_plrus
.valid
.eq(r1
.tlb_hit
.valid
)
854 comb
+= tlb_plrus
.index
.eq(r1
.tlb_hit_index
)
855 comb
+= tlb_plrus
.isel
.eq(tlb_req_index
) # select victim
856 comb
+= tlb_plru_victim
.eq(tlb_plrus
.o_index
) # selected victim
858 def tlb_search(self
, m
, tlb_req_index
, r0
, r0_valid
,
860 pte
, tlb_hit
, valid_ra
, perm_attr
, ra
):
864 hitway
= Signal(self
.TLB_WAY_BITS
)
866 eatag
= Signal(self
.TLB_EA_TAG_BITS
)
868 self
.TLB_LG_END
= self
.TLB_LG_PGSZ
+ self
.TLB_SET_BITS
869 r0_req_addr
= r0
.req
.addr
[self
.TLB_LG_PGSZ
: self
.TLB_LG_END
]
870 comb
+= tlb_req_index
.eq(r0_req_addr
)
871 comb
+= eatag
.eq(r0
.req
.addr
[self
.TLB_LG_END
: 64 ])
873 for i
in range(self
.TLB_NUM_WAYS
):
874 is_tag_hit
= Signal(name
="is_tag_hit%d" % i
)
875 tlb_tag
= Signal(self
.TLB_EA_TAG_BITS
, name
="tlb_tag%d" % i
)
876 comb
+= tlb_tag
.eq(self
.read_tlb_tag(i
, tlb_way
.tag
))
877 comb
+= is_tag_hit
.eq((tlb_way
.valid
[i
]) & (tlb_tag
== eatag
))
878 with m
.If(is_tag_hit
):
882 comb
+= tlb_hit
.valid
.eq(hit
& r0_valid
)
883 comb
+= tlb_hit
.way
.eq(hitway
)
885 with m
.If(tlb_hit
.valid
):
886 comb
+= pte
.eq(self
.read_tlb_pte(hitway
, tlb_way
.pte
))
887 comb
+= valid_ra
.eq(tlb_hit
.valid | ~r0
.req
.virt_mode
)
889 with m
.If(r0
.req
.virt_mode
):
890 comb
+= ra
.eq(Cat(Const(0, self
.ROW_OFF_BITS
),
891 r0
.req
.addr
[self
.ROW_OFF_BITS
:self
.TLB_LG_PGSZ
],
892 pte
[self
.TLB_LG_PGSZ
:self
.REAL_ADDR_BITS
]))
893 comb
+= perm_attr
.reference
.eq(pte
[8])
894 comb
+= perm_attr
.changed
.eq(pte
[7])
895 comb
+= perm_attr
.nocache
.eq(pte
[5])
896 comb
+= perm_attr
.priv
.eq(pte
[3])
897 comb
+= perm_attr
.rd_perm
.eq(pte
[2])
898 comb
+= perm_attr
.wr_perm
.eq(pte
[1])
900 comb
+= ra
.eq(Cat(Const(0, self
.ROW_OFF_BITS
),
901 r0
.req
.addr
[self
.ROW_OFF_BITS
:self
.REAL_ADDR_BITS
]))
902 comb
+= perm_attr
.reference
.eq(1)
903 comb
+= perm_attr
.changed
.eq(1)
904 comb
+= perm_attr
.nocache
.eq(0)
905 comb
+= perm_attr
.priv
.eq(1)
906 comb
+= perm_attr
.rd_perm
.eq(1)
907 comb
+= perm_attr
.wr_perm
.eq(1)
910 m
.d
.sync
+= Display("DCACHE virt mode %d hit %d ra %x pte %x",
911 r0
.req
.virt_mode
, tlb_hit
.valid
, ra
, pte
)
912 m
.d
.sync
+= Display(" perm ref=%d", perm_attr
.reference
)
913 m
.d
.sync
+= Display(" perm chg=%d", perm_attr
.changed
)
914 m
.d
.sync
+= Display(" perm noc=%d", perm_attr
.nocache
)
915 m
.d
.sync
+= Display(" perm prv=%d", perm_attr
.priv
)
916 m
.d
.sync
+= Display(" perm rdp=%d", perm_attr
.rd_perm
)
917 m
.d
.sync
+= Display(" perm wrp=%d", perm_attr
.wr_perm
)
919 def tlb_update(self
, m
, r0_valid
, r0
, tlb_req_index
,
920 tlb_hit
, tlb_plru_victim
):
928 comb
+= tlbie
.eq(r0_valid
& r0
.tlbie
)
929 comb
+= tlbwe
.eq(r0_valid
& r0
.tlbld
)
933 comb
+= d
.tlbie
.eq(tlbie
)
934 comb
+= d
.tlbwe
.eq(tlbwe
)
935 comb
+= d
.doall
.eq(r0
.doall
)
936 comb
+= d
.tlb_hit
.eq(tlb_hit
)
937 comb
+= d
.tlb_req_index
.eq(tlb_req_index
)
939 with m
.If(tlb_hit
.valid
):
940 comb
+= d
.repl_way
.eq(tlb_hit
.way
)
942 comb
+= d
.repl_way
.eq(tlb_plru_victim
)
943 comb
+= d
.eatag
.eq(r0
.req
.addr
[self
.TLB_LG_PGSZ
+ self
.TLB_SET_BITS
:64])
944 comb
+= d
.pte_data
.eq(r0
.req
.data
)
946 def maybe_plrus(self
, m
, r1
, plru_victim
):
952 if self
.TLB_NUM_WAYS
== 0:
955 # suite of PLRUs with a selection and output mechanism
956 m
.submodules
.plrus
= plrus
= PLRUs(self
.NUM_LINES
, self
.WAY_BITS
)
957 comb
+= plrus
.way
.eq(r1
.hit_way
)
958 comb
+= plrus
.valid
.eq(r1
.cache_hit
)
959 comb
+= plrus
.index
.eq(r1
.hit_index
)
960 comb
+= plrus
.isel
.eq(r1
.store_index
) # select victim
961 comb
+= plru_victim
.eq(plrus
.o_index
) # selected victim
963 def cache_tag_read(self
, m
, r0_stall
, req_index
, cache_tag_set
):
964 """Cache tag RAM read port
969 m_in
, d_in
= self
.m_in
, self
.d_in
971 # synchronous tag read-port: NOT TRANSPARENT (cannot pass through
972 # write-to-a-read at the same time), seems to pass tests ok
973 m
.submodules
.rd_tag
= rd_tag
= self
.tagmem
.read_port(transparent
=False)
975 index
= Signal(self
.INDEX_BITS
)
978 comb
+= index
.eq(req_index
)
979 with m
.Elif(m_in
.valid
):
980 comb
+= index
.eq(self
.get_index(m_in
.addr
))
982 comb
+= index
.eq(self
.get_index(d_in
.addr
))
983 comb
+= rd_tag
.addr
.eq(index
)
984 comb
+= cache_tag_set
.eq(rd_tag
.data
) # read-port is a 1-clock delay
986 def dcache_request(self
, m
, r0
, ra
, req_index
, req_row
, req_tag
,
987 r0_valid
, r1
, cache_valids
, replace_way
,
988 use_forward1_next
, use_forward2_next
,
989 req_hit_way
, plru_victim
, rc_ok
, perm_attr
,
990 valid_ra
, perm_ok
, access_ok
, req_op
, req_go
,
991 tlb_hit
, tlb_way
, cache_tag_set
,
992 cancel_store
, req_same_tag
, r0_stall
, early_req_row
):
993 """Cache request parsing and hit detection
997 m_in
, d_in
= self
.m_in
, self
.d_in
1000 hit_way
= Signal(self
.WAY_BITS
)
1005 cache_i_validdx
= Signal(self
.NUM_WAYS
)
1007 # Extract line, row and tag from request
1008 comb
+= req_index
.eq(self
.get_index(r0
.req
.addr
))
1009 comb
+= req_row
.eq(self
.get_row(r0
.req
.addr
))
1010 comb
+= req_tag
.eq(self
.get_tag(ra
))
1012 if False: # display on comb is a bit... busy.
1013 comb
+= Display("dcache_req addr:%x ra: %x idx: %x tag: %x row: %x",
1014 r0
.req
.addr
, ra
, req_index
, req_tag
, req_row
)
1016 comb
+= go
.eq(r0_valid
& ~
(r0
.tlbie | r0
.tlbld
) & ~r1
.ls_error
)
1017 comb
+= cache_i_validdx
.eq(cache_valids
[req_index
])
1019 m
.submodules
.dcache_pend
= dc
= DCachePendingHit(self
, tlb_way
,
1020 cache_i_validdx
, cache_tag_set
,
1022 comb
+= dc
.tlb_hit
.eq(tlb_hit
)
1023 comb
+= dc
.reload_tag
.eq(r1
.reload_tag
)
1024 comb
+= dc
.virt_mode
.eq(r0
.req
.virt_mode
)
1025 comb
+= dc
.go
.eq(go
)
1026 comb
+= dc
.req_index
.eq(req_index
)
1028 comb
+= is_hit
.eq(dc
.is_hit
)
1029 comb
+= hit_way
.eq(dc
.hit_way
)
1030 comb
+= req_same_tag
.eq(dc
.rel_match
)
1032 # See if the request matches the line currently being reloaded
1033 with m
.If((r1
.state
== State
.RELOAD_WAIT_ACK
) &
1034 (req_index
== r1
.store_index
) & req_same_tag
):
1035 # For a store, consider this a hit even if the row isn't
1036 # valid since it will be by the time we perform the store.
1037 # For a load, check the appropriate row valid bit.
1038 rrow
= Signal(self
.ROW_LINE_BITS
)
1039 comb
+= rrow
.eq(req_row
)
1040 valid
= r1
.rows_valid
[rrow
]
1041 comb
+= is_hit
.eq((~r0
.req
.load
) | valid
)
1042 comb
+= hit_way
.eq(replace_way
)
1044 # Whether to use forwarded data for a load or not
1045 with m
.If((self
.get_row(r1
.req
.real_addr
) == req_row
) &
1046 (r1
.req
.hit_way
== hit_way
)):
1047 # Only need to consider r1.write_bram here, since if we
1048 # are writing refill data here, then we don't have a
1049 # cache hit this cycle on the line being refilled.
1050 # (There is the possibility that the load following the
1051 # load miss that started the refill could be to the old
1052 # contents of the victim line, since it is a couple of
1053 # cycles after the refill starts before we see the updated
1054 # cache tag. In that case we don't use the bypass.)
1055 comb
+= use_forward1_next
.eq(r1
.write_bram
)
1056 with m
.If((r1
.forward_row1
== req_row
) & (r1
.forward_way1
== hit_way
)):
1057 comb
+= use_forward2_next
.eq(r1
.forward_valid1
)
1059 # The way that matched on a hit
1060 comb
+= req_hit_way
.eq(hit_way
)
1062 # The way to replace on a miss
1063 with m
.If(r1
.write_tag
):
1064 comb
+= replace_way
.eq(plru_victim
)
1066 comb
+= replace_way
.eq(r1
.store_way
)
1068 # work out whether we have permission for this access
1069 # NB we don't yet implement AMR, thus no KUAP
1070 comb
+= rc_ok
.eq(perm_attr
.reference
1071 & (r0
.req
.load | perm_attr
.changed
))
1072 comb
+= perm_ok
.eq((r0
.req
.priv_mode |
(~perm_attr
.priv
)) &
1073 (perm_attr
.wr_perm |
1074 (r0
.req
.load
& perm_attr
.rd_perm
)))
1075 comb
+= access_ok
.eq(valid_ra
& perm_ok
& rc_ok
)
1077 # Combine the request and cache hit status to decide what
1078 # operation needs to be done
1079 comb
+= nc
.eq(r0
.req
.nc | perm_attr
.nocache
)
1080 comb
+= op
.eq(Op
.OP_NONE
)
1082 with m
.If(~access_ok
):
1083 m
.d
.sync
+= Display("DCACHE access fail valid_ra=%d p=%d rc=%d",
1084 valid_ra
, perm_ok
, rc_ok
)
1085 comb
+= op
.eq(Op
.OP_BAD
)
1086 with m
.Elif(cancel_store
):
1087 m
.d
.sync
+= Display("DCACHE cancel store")
1088 comb
+= op
.eq(Op
.OP_STCX_FAIL
)
1090 m
.d
.sync
+= Display("DCACHE valid_ra=%d nc=%d ld=%d",
1091 valid_ra
, nc
, r0
.req
.load
)
1092 comb
+= opsel
.eq(Cat(is_hit
, nc
, r0
.req
.load
))
1093 with m
.Switch(opsel
):
1094 with m
.Case(0b101): comb
+= op
.eq(Op
.OP_LOAD_HIT
)
1095 with m
.Case(0b100): comb
+= op
.eq(Op
.OP_LOAD_MISS
)
1096 with m
.Case(0b110): comb
+= op
.eq(Op
.OP_LOAD_NC
)
1097 with m
.Case(0b001): comb
+= op
.eq(Op
.OP_STORE_HIT
)
1098 with m
.Case(0b000): comb
+= op
.eq(Op
.OP_STORE_MISS
)
1099 with m
.Case(0b010): comb
+= op
.eq(Op
.OP_STORE_MISS
)
1100 with m
.Case(0b011): comb
+= op
.eq(Op
.OP_BAD
)
1101 with m
.Case(0b111): comb
+= op
.eq(Op
.OP_BAD
)
1102 comb
+= req_op
.eq(op
)
1103 comb
+= req_go
.eq(go
)
1105 # Version of the row number that is valid one cycle earlier
1106 # in the cases where we need to read the cache data BRAM.
1107 # If we're stalling then we need to keep reading the last
1109 with m
.If(~r0_stall
):
1110 with m
.If(m_in
.valid
):
1111 comb
+= early_req_row
.eq(self
.get_row(m_in
.addr
))
1113 comb
+= early_req_row
.eq(self
.get_row(d_in
.addr
))
1115 comb
+= early_req_row
.eq(req_row
)
1117 def reservation_comb(self
, m
, cancel_store
, set_rsrv
, clear_rsrv
,
1118 r0_valid
, r0
, reservation
):
1119 """Handle load-with-reservation and store-conditional instructions
1123 with m
.If(r0_valid
& r0
.req
.reserve
):
1124 # XXX generate alignment interrupt if address
1125 # is not aligned XXX or if r0.req.nc = '1'
1126 with m
.If(r0
.req
.load
):
1127 comb
+= set_rsrv
.eq(r0
.req
.atomic_last
) # load with reservation
1129 comb
+= clear_rsrv
.eq(r0
.req
.atomic_last
) # store conditional
1130 with m
.If((~reservation
.valid
) |
1131 (r0
.req
.addr
[self
.LINE_OFF_BITS
:64] !=
1133 comb
+= cancel_store
.eq(1)
1135 def reservation_reg(self
, m
, r0_valid
, access_ok
, set_rsrv
, clear_rsrv
,
1140 with m
.If(r0_valid
& access_ok
):
1141 with m
.If(clear_rsrv
):
1142 sync
+= reservation
.valid
.eq(0)
1143 with m
.Elif(set_rsrv
):
1144 sync
+= reservation
.valid
.eq(1)
1145 sync
+= reservation
.addr
.eq(r0
.req
.addr
[self
.LINE_OFF_BITS
:64])
1147 def writeback_control(self
, m
, r1
, cache_out_row
):
1148 """Return data for loads & completion control logic
1152 d_out
, m_out
= self
.d_out
, self
.m_out
1154 data_out
= Signal(64)
1155 data_fwd
= Signal(64)
1157 # Use the bypass if are reading the row that was
1158 # written 1 or 2 cycles ago, including for the
1159 # slow_valid = 1 case (i.e. completing a load
1160 # miss or a non-cacheable load).
1161 with m
.If(r1
.use_forward1
):
1162 comb
+= data_fwd
.eq(r1
.forward_data1
)
1164 comb
+= data_fwd
.eq(r1
.forward_data2
)
1166 comb
+= data_out
.eq(cache_out_row
)
1169 with m
.If(r1
.forward_sel
[i
]):
1170 dsel
= data_fwd
.word_select(i
, 8)
1171 comb
+= data_out
.word_select(i
, 8).eq(dsel
)
1173 # DCache output to LoadStore
1174 comb
+= d_out
.valid
.eq(r1
.ls_valid
)
1175 comb
+= d_out
.data
.eq(data_out
)
1176 comb
+= d_out
.store_done
.eq(~r1
.stcx_fail
)
1177 comb
+= d_out
.error
.eq(r1
.ls_error
)
1178 comb
+= d_out
.cache_paradox
.eq(r1
.cache_paradox
)
1181 comb
+= m_out
.done
.eq(r1
.mmu_done
)
1182 comb
+= m_out
.err
.eq(r1
.mmu_error
)
1183 comb
+= m_out
.data
.eq(data_out
)
1185 # We have a valid load or store hit or we just completed
1186 # a slow op such as a load miss, a NC load or a store
1188 # Note: the load hit is delayed by one cycle. However it
1189 # can still not collide with r.slow_valid (well unless I
1190 # miscalculated) because slow_valid can only be set on a
1191 # subsequent request and not on its first cycle (the state
1192 # machine must have advanced), which makes slow_valid
1193 # at least 2 cycles from the previous hit_load_valid.
1195 # Sanity: Only one of these must be set in any given cycle
1197 if False: # TODO: need Display to get this to work
1198 assert (r1
.slow_valid
& r1
.stcx_fail
) != 1, \
1199 "unexpected slow_valid collision with stcx_fail"
1201 assert ((r1
.slow_valid | r1
.stcx_fail
) | r1
.hit_load_valid
) != 1, \
1202 "unexpected hit_load_delayed collision with slow_valid"
1204 with m
.If(~r1
.mmu_req
):
1205 # Request came from loadstore1...
1206 # Load hit case is the standard path
1207 with m
.If(r1
.hit_load_valid
):
1208 sync
+= Display("completing load hit data=%x", data_out
)
1210 # error cases complete without stalling
1211 with m
.If(r1
.ls_error
):
1213 sync
+= Display("completing dcbz with error")
1215 sync
+= Display("completing ld/st with error")
1217 # Slow ops (load miss, NC, stores)
1218 with m
.If(r1
.slow_valid
):
1219 sync
+= Display("completing store or load miss adr=%x data=%x",
1220 r1
.req
.real_addr
, data_out
)
1223 # Request came from MMU
1224 with m
.If(r1
.hit_load_valid
):
1225 sync
+= Display("completing load hit to MMU, data=%x",
1227 # error cases complete without stalling
1228 with m
.If(r1
.mmu_error
):
1229 sync
+= Display("combpleting MMU ld with error")
1231 # Slow ops (i.e. load miss)
1232 with m
.If(r1
.slow_valid
):
1233 sync
+= Display("completing MMU load miss, adr=%x data=%x",
1234 r1
.req
.real_addr
, m_out
.data
)
1236 def rams(self
, m
, r1
, early_req_row
, cache_out_row
, replace_way
):
1238 Generate a cache RAM for each way. This handles the normal
1239 reads, writes from reloads and the special store-hit update
1242 Note: the BRAMs have an extra read buffer, meaning the output
1243 is pipelined an extra cycle. This differs from the
1244 icache. The writeback logic needs to take that into
1245 account by using 1-cycle delayed signals for load hits.
1250 # a Binary-to-Unary one-hots here. replace-way one-hot is gated
1251 # (enabled) by bus.ack, not-write-bram, and state RELOAD_WAIT_ACK
1252 m
.submodules
.rams_replace_way_e
= rwe
= Decoder(self
.NUM_WAYS
)
1253 comb
+= rwe
.n
.eq(~
((r1
.state
== State
.RELOAD_WAIT_ACK
) & bus
.ack
&
1255 comb
+= rwe
.i
.eq(replace_way
)
1257 m
.submodules
.rams_hit_way_e
= hwe
= Decoder(self
.NUM_WAYS
)
1258 comb
+= hwe
.i
.eq(r1
.hit_way
)
1260 # this one is gated with write_bram, and replace_way_e can never be
1261 # set at the same time. that means that do_write can OR the outputs
1262 m
.submodules
.rams_hit_req_way_e
= hre
= Decoder(self
.NUM_WAYS
)
1263 comb
+= hre
.n
.eq(~r1
.write_bram
) # Decoder.n is inverted
1264 comb
+= hre
.i
.eq(r1
.req
.hit_way
)
1268 wr_addr
= Signal(self
.ROW_BITS
)
1269 wr_data
= Signal(WB_DATA_BITS
)
1270 wr_sel
= Signal(self
.ROW_SIZE
)
1271 rd_addr
= Signal(self
.ROW_BITS
)
1273 comb
+= do_read
.eq(1) # always enable
1274 comb
+= rd_addr
.eq(early_req_row
)
1278 # Defaults to wishbone read responses (cache refill)
1280 # For timing, the mux on wr_data/sel/addr is not
1281 # dependent on anything other than the current state.
1283 with m
.If(r1
.write_bram
):
1284 # Write store data to BRAM. This happens one
1285 # cycle after the store is in r0.
1286 comb
+= wr_data
.eq(r1
.req
.data
)
1287 comb
+= wr_sel
.eq(r1
.req
.byte_sel
)
1288 comb
+= wr_addr
.eq(self
.get_row(r1
.req
.real_addr
))
1291 # Otherwise, we might be doing a reload or a DCBZ
1293 comb
+= wr_data
.eq(0)
1295 comb
+= wr_data
.eq(bus
.dat_r
)
1296 comb
+= wr_addr
.eq(r1
.store_row
)
1297 comb
+= wr_sel
.eq(~
0) # all 1s
1300 for i
in range(self
.NUM_WAYS
):
1301 do_write
= Signal(name
="do_wr%d" % i
)
1302 wr_sel_m
= Signal(self
.ROW_SIZE
, name
="wr_sel_m_%d" % i
)
1303 d_out
= Signal(WB_DATA_BITS
, name
="dout_%d" % i
) # cache_row_t
1305 way
= CacheRam(self
.ROW_BITS
, WB_DATA_BITS
, ADD_BUF
=True, ram_num
=i
)
1306 m
.submodules
["cacheram_%d" % i
] = way
1308 comb
+= way
.rd_en
.eq(do_read
)
1309 comb
+= way
.rd_addr
.eq(rd_addr
)
1310 comb
+= d_out
.eq(way
.rd_data_o
)
1311 comb
+= way
.wr_sel
.eq(wr_sel_m
)
1312 comb
+= way
.wr_addr
.eq(wr_addr
)
1313 comb
+= way
.wr_data
.eq(wr_data
)
1316 with m
.If(hwe
.o
[i
]):
1317 comb
+= cache_out_row
.eq(d_out
)
1319 # these are mutually-exclusive via their Decoder-enablers
1320 # (note: Decoder-enable is inverted)
1321 comb
+= do_write
.eq(hre
.o
[i
] | rwe
.o
[i
])
1323 # Mask write selects with do_write since BRAM
1324 # doesn't have a global write-enable
1325 with m
.If(do_write
):
1326 comb
+= wr_sel_m
.eq(wr_sel
)
1328 # Cache hit synchronous machine for the easy case.
1329 # This handles load hits.
1330 # It also handles error cases (TLB miss, cache paradox)
1331 def dcache_fast_hit(self
, m
, req_op
, r0_valid
, r0
, r1
,
1332 req_hit_way
, req_index
, req_tag
, access_ok
,
1333 tlb_hit
, tlb_req_index
):
1337 with m
.If(req_op
!= Op
.OP_NONE
):
1338 sync
+= Display("op:%d addr:%x nc: %d idx: %x tag: %x way: %x",
1339 req_op
, r0
.req
.addr
, r0
.req
.nc
,
1340 req_index
, req_tag
, req_hit_way
)
1342 with m
.If(r0_valid
):
1343 sync
+= r1
.mmu_req
.eq(r0
.mmu_req
)
1345 # Fast path for load/store hits.
1346 # Set signals for the writeback controls.
1347 sync
+= r1
.hit_way
.eq(req_hit_way
)
1348 sync
+= r1
.hit_index
.eq(req_index
)
1350 sync
+= r1
.hit_load_valid
.eq(req_op
== Op
.OP_LOAD_HIT
)
1351 sync
+= r1
.cache_hit
.eq((req_op
== Op
.OP_LOAD_HIT
) |
1352 (req_op
== Op
.OP_STORE_HIT
))
1354 with m
.If(req_op
== Op
.OP_BAD
):
1355 sync
+= Display("Signalling ld/st error "
1356 "ls_error=%i mmu_error=%i cache_paradox=%i",
1357 ~r0
.mmu_req
,r0
.mmu_req
,access_ok
)
1358 sync
+= r1
.ls_error
.eq(~r0
.mmu_req
)
1359 sync
+= r1
.mmu_error
.eq(r0
.mmu_req
)
1360 sync
+= r1
.cache_paradox
.eq(access_ok
)
1362 sync
+= r1
.ls_error
.eq(0)
1363 sync
+= r1
.mmu_error
.eq(0)
1364 sync
+= r1
.cache_paradox
.eq(0)
1366 sync
+= r1
.stcx_fail
.eq(req_op
== Op
.OP_STCX_FAIL
)
1368 # Record TLB hit information for updating TLB PLRU
1369 sync
+= r1
.tlb_hit
.eq(tlb_hit
)
1370 sync
+= r1
.tlb_hit_index
.eq(tlb_req_index
)
1372 # Memory accesses are handled by this state machine:
1374 # * Cache load miss/reload (in conjunction with "rams")
1375 # * Load hits for non-cachable forms
1376 # * Stores (the collision case is handled in "rams")
1378 # All wishbone requests generation is done here.
1379 # This machine operates at stage 1.
1380 def dcache_slow(self
, m
, r1
, use_forward1_next
, use_forward2_next
,
1382 req_hit_way
, req_same_tag
,
1383 r0_valid
, req_op
, cache_valids
, req_go
, ra
):
1390 m
.submodules
.wr_tag
= wr_tag
= self
.tagmem
.write_port(
1391 granularity
=self
.TAG_WIDTH
)
1393 req
= MemAccessRequest(self
, "mreq_ds")
1395 r1_next_cycle
= Signal()
1396 req_row
= Signal(self
.ROW_BITS
)
1397 req_idx
= Signal(self
.INDEX_BITS
)
1398 req_tag
= Signal(self
.TAG_BITS
)
1399 comb
+= req_idx
.eq(self
.get_index(req
.real_addr
))
1400 comb
+= req_row
.eq(self
.get_row(req
.real_addr
))
1401 comb
+= req_tag
.eq(self
.get_tag(req
.real_addr
))
1403 sync
+= r1
.use_forward1
.eq(use_forward1_next
)
1404 sync
+= r1
.forward_sel
.eq(0)
1406 with m
.If(use_forward1_next
):
1407 sync
+= r1
.forward_sel
.eq(r1
.req
.byte_sel
)
1408 with m
.Elif(use_forward2_next
):
1409 sync
+= r1
.forward_sel
.eq(r1
.forward_sel1
)
1411 sync
+= r1
.forward_data2
.eq(r1
.forward_data1
)
1412 with m
.If(r1
.write_bram
):
1413 sync
+= r1
.forward_data1
.eq(r1
.req
.data
)
1414 sync
+= r1
.forward_sel1
.eq(r1
.req
.byte_sel
)
1415 sync
+= r1
.forward_way1
.eq(r1
.req
.hit_way
)
1416 sync
+= r1
.forward_row1
.eq(self
.get_row(r1
.req
.real_addr
))
1417 sync
+= r1
.forward_valid1
.eq(1)
1420 sync
+= r1
.forward_data1
.eq(0)
1422 sync
+= r1
.forward_data1
.eq(bus
.dat_r
)
1423 sync
+= r1
.forward_sel1
.eq(~
0) # all 1s
1424 sync
+= r1
.forward_way1
.eq(replace_way
)
1425 sync
+= r1
.forward_row1
.eq(r1
.store_row
)
1426 sync
+= r1
.forward_valid1
.eq(0)
1428 # One cycle pulses reset
1429 sync
+= r1
.slow_valid
.eq(0)
1430 sync
+= r1
.write_bram
.eq(0)
1431 sync
+= r1
.inc_acks
.eq(0)
1432 sync
+= r1
.dec_acks
.eq(0)
1434 sync
+= r1
.ls_valid
.eq(0)
1435 # complete tlbies and TLB loads in the third cycle
1436 sync
+= r1
.mmu_done
.eq(r0_valid
& (r0
.tlbie | r0
.tlbld
))
1438 with m
.If((req_op
== Op
.OP_LOAD_HIT
) |
(req_op
== Op
.OP_STCX_FAIL
)):
1439 with m
.If(r0
.mmu_req
):
1440 sync
+= r1
.mmu_done
.eq(1)
1442 sync
+= r1
.ls_valid
.eq(1)
1444 with m
.If(r1
.write_tag
):
1445 # Store new tag in selected way
1446 replace_way_onehot
= Signal(self
.NUM_WAYS
)
1447 comb
+= replace_way_onehot
.eq(1<<replace_way
)
1448 ct
= Signal(self
.TAG_RAM_WIDTH
)
1449 comb
+= ct
.eq(r1
.reload_tag
<< (replace_way
*self
.TAG_WIDTH
))
1450 comb
+= wr_tag
.en
.eq(replace_way_onehot
)
1451 comb
+= wr_tag
.addr
.eq(r1
.store_index
)
1452 comb
+= wr_tag
.data
.eq(ct
)
1454 sync
+= r1
.store_way
.eq(replace_way
)
1455 sync
+= r1
.write_tag
.eq(0)
1457 # Take request from r1.req if there is one there,
1458 # else from req_op, ra, etc.
1460 comb
+= req
.eq(r1
.req
)
1462 comb
+= req
.op
.eq(req_op
)
1463 comb
+= req
.valid
.eq(req_go
)
1464 comb
+= req
.mmu_req
.eq(r0
.mmu_req
)
1465 comb
+= req
.dcbz
.eq(r0
.req
.dcbz
)
1466 comb
+= req
.real_addr
.eq(ra
)
1468 with m
.If(r0
.req
.dcbz
):
1469 # force data to 0 for dcbz
1470 comb
+= req
.data
.eq(0)
1471 with m
.Elif(r0
.d_valid
):
1472 comb
+= req
.data
.eq(r0
.req
.data
)
1474 comb
+= req
.data
.eq(d_in
.data
)
1476 # Select all bytes for dcbz
1477 # and for cacheable loads
1478 with m
.If(r0
.req
.dcbz |
(r0
.req
.load
& ~r0
.req
.nc
)):
1479 comb
+= req
.byte_sel
.eq(~
0) # all 1s
1481 comb
+= req
.byte_sel
.eq(r0
.req
.byte_sel
)
1482 comb
+= req
.hit_way
.eq(req_hit_way
)
1483 comb
+= req
.same_tag
.eq(req_same_tag
)
1485 # Store the incoming request from r0,
1486 # if it is a slow request
1487 # Note that r1.full = 1 implies req_op = OP_NONE
1488 with m
.If((req_op
== Op
.OP_LOAD_MISS
)
1489 |
(req_op
== Op
.OP_LOAD_NC
)
1490 |
(req_op
== Op
.OP_STORE_MISS
)
1491 |
(req_op
== Op
.OP_STORE_HIT
)):
1492 sync
+= r1
.req
.eq(req
)
1493 sync
+= r1
.full
.eq(1)
1494 # do not let r1.state RELOAD_WAIT_ACK or STORE_WAIT_ACK
1495 # destroy r1.req by overwriting r1.full back to zero
1496 comb
+= r1_next_cycle
.eq(1)
1498 # Main state machine
1499 with m
.Switch(r1
.state
):
1501 with m
.Case(State
.IDLE
):
1502 sync
+= r1
.wb
.adr
.eq(req
.real_addr
[self
.ROW_OFF_BITS
:])
1503 sync
+= r1
.wb
.sel
.eq(req
.byte_sel
)
1504 sync
+= r1
.wb
.dat
.eq(req
.data
)
1505 sync
+= r1
.dcbz
.eq(req
.dcbz
)
1507 # Keep track of our index and way
1508 # for subsequent stores.
1509 sync
+= r1
.store_index
.eq(req_idx
)
1510 sync
+= r1
.store_row
.eq(req_row
)
1511 sync
+= r1
.end_row_ix
.eq(self
.get_row_of_line(req_row
)-1)
1512 sync
+= r1
.reload_tag
.eq(req_tag
)
1513 sync
+= r1
.req
.same_tag
.eq(1)
1515 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
1516 sync
+= r1
.store_way
.eq(req
.hit_way
)
1518 #with m.If(r1.dec_acks):
1519 # sync += r1.acks_pending.eq(r1.acks_pending - 1)
1521 # Reset per-row valid bits,
1522 # ready for handling OP_LOAD_MISS
1523 for i
in range(self
.ROW_PER_LINE
):
1524 sync
+= r1
.rows_valid
[i
].eq(0)
1526 with m
.If(req_op
!= Op
.OP_NONE
):
1527 sync
+= Display("cache op %d", req
.op
)
1529 with m
.Switch(req
.op
):
1530 with m
.Case(Op
.OP_LOAD_HIT
):
1531 # stay in IDLE state
1534 with m
.Case(Op
.OP_LOAD_MISS
):
1535 sync
+= Display("cache miss real addr: %x " \
1537 req
.real_addr
, req_row
, req_tag
)
1539 # Start the wishbone cycle
1540 sync
+= r1
.wb
.we
.eq(0)
1541 sync
+= r1
.wb
.cyc
.eq(1)
1542 sync
+= r1
.wb
.stb
.eq(1)
1544 # Track that we had one request sent
1545 sync
+= r1
.state
.eq(State
.RELOAD_WAIT_ACK
)
1546 sync
+= r1
.write_tag
.eq(1)
1548 with m
.Case(Op
.OP_LOAD_NC
):
1549 sync
+= r1
.wb
.cyc
.eq(1)
1550 sync
+= r1
.wb
.stb
.eq(1)
1551 sync
+= r1
.wb
.we
.eq(0)
1552 sync
+= r1
.state
.eq(State
.NC_LOAD_WAIT_ACK
)
1554 with m
.Case(Op
.OP_STORE_HIT
, Op
.OP_STORE_MISS
):
1555 with m
.If(~req
.dcbz
):
1556 sync
+= r1
.state
.eq(State
.STORE_WAIT_ACK
)
1557 sync
+= r1
.acks_pending
.eq(1)
1558 sync
+= r1
.full
.eq(0)
1559 comb
+= r1_next_cycle
.eq(0)
1560 sync
+= r1
.slow_valid
.eq(1)
1562 with m
.If(req
.mmu_req
):
1563 sync
+= r1
.mmu_done
.eq(1)
1565 sync
+= r1
.ls_valid
.eq(1)
1567 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
1568 sync
+= r1
.write_bram
.eq(1)
1570 # dcbz is handled much like a load miss except
1571 # that we are writing to memory instead of reading
1572 sync
+= r1
.state
.eq(State
.RELOAD_WAIT_ACK
)
1574 with m
.If(req
.op
== Op
.OP_STORE_MISS
):
1575 sync
+= r1
.write_tag
.eq(1)
1577 sync
+= r1
.wb
.we
.eq(1)
1578 sync
+= r1
.wb
.cyc
.eq(1)
1579 sync
+= r1
.wb
.stb
.eq(1)
1581 # OP_NONE and OP_BAD do nothing
1582 # OP_BAD & OP_STCX_FAIL were
1583 # handled above already
1584 with m
.Case(Op
.OP_NONE
):
1586 with m
.Case(Op
.OP_BAD
):
1588 with m
.Case(Op
.OP_STCX_FAIL
):
1591 with m
.Case(State
.RELOAD_WAIT_ACK
):
1592 ld_stbs_done
= Signal()
1593 # Requests are all sent if stb is 0
1594 comb
+= ld_stbs_done
.eq(~r1
.wb
.stb
)
1596 # If we are still sending requests, was one accepted?
1597 with m
.If((~bus
.stall
) & r1
.wb
.stb
):
1598 # That was the last word? We are done sending.
1599 # Clear stb and set ld_stbs_done so we can handle an
1600 # eventual last ack on the same cycle.
1601 # sigh - reconstruct wb adr with 3 extra 0s at front
1602 wb_adr
= Cat(Const(0, self
.ROW_OFF_BITS
), r1
.wb
.adr
)
1603 with m
.If(self
.is_last_row_addr(wb_adr
, r1
.end_row_ix
)):
1604 sync
+= r1
.wb
.stb
.eq(0)
1605 comb
+= ld_stbs_done
.eq(1)
1607 # Calculate the next row address in the current cache line
1608 rlen
= self
.LINE_OFF_BITS
-self
.ROW_OFF_BITS
1610 comb
+= row
.eq(r1
.wb
.adr
)
1611 sync
+= r1
.wb
.adr
[:rlen
].eq(row
+1)
1613 # Incoming acks processing
1614 sync
+= r1
.forward_valid1
.eq(bus
.ack
)
1616 srow
= Signal(self
.ROW_LINE_BITS
)
1617 comb
+= srow
.eq(r1
.store_row
)
1618 sync
+= r1
.rows_valid
[srow
].eq(1)
1620 # If this is the data we were looking for,
1621 # we can complete the request next cycle.
1622 # Compare the whole address in case the
1623 # request in r1.req is not the one that
1624 # started this refill.
1627 comb
+= rowmatch
.eq(r1
.store_row
==
1628 self
.get_row(r1
.req
.real_addr
))
1629 comb
+= lastrow
.eq(self
.is_last_row(r1
.store_row
,
1631 with m
.If(r1
.full
& r1
.req
.same_tag
&
1632 ((r1
.dcbz
& req
.dcbz
) |
1633 (r1
.req
.op
== Op
.OP_LOAD_MISS
)) & rowmatch
):
1634 sync
+= r1
.full
.eq(r1_next_cycle
)
1635 sync
+= r1
.slow_valid
.eq(1)
1636 with m
.If(r1
.mmu_req
):
1637 sync
+= r1
.mmu_done
.eq(1)
1639 sync
+= r1
.ls_valid
.eq(1)
1640 sync
+= r1
.forward_sel
.eq(~
0) # all 1s
1641 sync
+= r1
.use_forward1
.eq(1)
1643 # Check for completion
1644 with m
.If(ld_stbs_done
& lastrow
):
1645 # Complete wishbone cycle
1646 sync
+= r1
.wb
.cyc
.eq(0)
1648 # Cache line is now valid
1649 cv
= Signal(self
.INDEX_BITS
)
1650 comb
+= cv
.eq(cache_valids
[r1
.store_index
])
1651 comb
+= cv
.bit_select(r1
.store_way
, 1).eq(1)
1652 sync
+= cache_valids
[r1
.store_index
].eq(cv
)
1654 sync
+= r1
.state
.eq(State
.IDLE
)
1655 sync
+= Display("cache valid set %x "
1657 cv
, r1
.store_index
, r1
.store_way
)
1659 # Increment store row counter
1660 sync
+= r1
.store_row
.eq(self
.next_row(r1
.store_row
))
1662 with m
.Case(State
.STORE_WAIT_ACK
):
1663 st_stbs_done
= Signal()
1664 adjust_acks
= Signal(3)
1666 comb
+= st_stbs_done
.eq(~r1
.wb
.stb
)
1668 with m
.If(r1
.inc_acks
!= r1
.dec_acks
):
1669 with m
.If(r1
.inc_acks
):
1670 comb
+= adjust_acks
.eq(r1
.acks_pending
+ 1)
1672 comb
+= adjust_acks
.eq(r1
.acks_pending
- 1)
1674 comb
+= adjust_acks
.eq(r1
.acks_pending
)
1676 sync
+= r1
.acks_pending
.eq(adjust_acks
)
1678 # Clear stb when slave accepted request
1679 with m
.If(~bus
.stall
):
1680 # See if there is another store waiting
1681 # to be done which is in the same real page.
1682 # (this is when same_tsg is true)
1683 with m
.If(req
.valid
):
1684 _ra
= req
.real_addr
[self
.ROW_OFF_BITS
:
1686 alen
= self
.SET_SIZE_BITS
-self
.ROW_OFF_BITS
1687 sync
+= r1
.wb
.adr
[0:alen
].eq(_ra
)
1688 sync
+= r1
.wb
.dat
.eq(req
.data
)
1689 sync
+= r1
.wb
.sel
.eq(req
.byte_sel
)
1691 with m
.If((adjust_acks
< 7) & req
.same_tag
&
1692 ((req
.op
== Op
.OP_STORE_MISS
) |
1693 (req
.op
== Op
.OP_STORE_HIT
))):
1694 sync
+= r1
.wb
.stb
.eq(1)
1695 comb
+= st_stbs_done
.eq(0)
1696 sync
+= r1
.store_way
.eq(req
.hit_way
)
1697 sync
+= r1
.store_row
.eq(self
.get_row(req
.real_addr
))
1699 with m
.If(req
.op
== Op
.OP_STORE_HIT
):
1700 sync
+= r1
.write_bram
.eq(1)
1701 sync
+= r1
.full
.eq(r1_next_cycle
)
1702 sync
+= r1
.slow_valid
.eq(1)
1704 # Store requests never come from the MMU
1705 sync
+= r1
.ls_valid
.eq(1)
1706 comb
+= st_stbs_done
.eq(0)
1707 sync
+= r1
.inc_acks
.eq(1)
1709 sync
+= r1
.wb
.stb
.eq(0)
1710 comb
+= st_stbs_done
.eq(1)
1712 # Got ack ? See if complete.
1713 sync
+= Display("got ack %d %d stbs %d adjust_acks %d",
1714 bus
.ack
, bus
.ack
, st_stbs_done
, adjust_acks
)
1716 with m
.If(st_stbs_done
& (adjust_acks
== 1)):
1717 sync
+= r1
.state
.eq(State
.IDLE
)
1718 sync
+= r1
.wb
.cyc
.eq(0)
1719 sync
+= r1
.wb
.stb
.eq(0)
1720 sync
+= r1
.dec_acks
.eq(1)
1722 with m
.Case(State
.NC_LOAD_WAIT_ACK
):
1723 # Clear stb when slave accepted request
1724 with m
.If(~bus
.stall
):
1725 sync
+= r1
.wb
.stb
.eq(0)
1727 # Got ack ? complete.
1729 sync
+= r1
.state
.eq(State
.IDLE
)
1730 sync
+= r1
.full
.eq(r1_next_cycle
)
1731 sync
+= r1
.slow_valid
.eq(1)
1733 with m
.If(r1
.mmu_req
):
1734 sync
+= r1
.mmu_done
.eq(1)
1736 sync
+= r1
.ls_valid
.eq(1)
1738 sync
+= r1
.forward_sel
.eq(~
0) # all 1s
1739 sync
+= r1
.use_forward1
.eq(1)
1740 sync
+= r1
.wb
.cyc
.eq(0)
1741 sync
+= r1
.wb
.stb
.eq(0)
1743 def dcache_log(self
, m
, r1
, valid_ra
, tlb_hit
, stall_out
):
1746 d_out
, bus
, log_out
= self
.d_out
, self
.bus
, self
.log_out
1748 sync
+= log_out
.eq(Cat(r1
.state
[:3], valid_ra
, tlb_hit
.way
[:3],
1749 stall_out
, req_op
[:3], d_out
.valid
, d_out
.error
,
1750 r1
.wb
.cyc
, r1
.wb
.stb
, bus
.ack
, bus
.stall
,
1753 def elaborate(self
, platform
):
1756 comb
, sync
= m
.d
.comb
, m
.d
.sync
1757 m_in
, d_in
= self
.m_in
, self
.d_in
1759 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1760 cache_valids
= self
.CacheValidsArray()
1761 cache_tag_set
= Signal(self
.TAG_RAM_WIDTH
)
1763 self
.tagmem
= Memory(depth
=self
.NUM_LINES
, width
=self
.TAG_RAM_WIDTH
,
1764 attrs
={'syn_ramstyle': "block_ram"})
1766 """note: these are passed to nmigen.hdl.Memory as "attributes".
1767 don't know how, just that they are.
1769 # TODO attribute ram_style of
1770 # dtlb_tags : signal is "distributed";
1771 # TODO attribute ram_style of
1772 # dtlb_ptes : signal is "distributed";
1774 r0
= RegStage0("r0")
1777 r1
= RegStage1(self
, "r1")
1779 reservation
= Reservation(self
, "rsrv")
1781 # Async signals on incoming request
1782 req_index
= Signal(self
.INDEX_BITS
)
1783 req_row
= Signal(self
.ROW_BITS
)
1784 req_hit_way
= Signal(self
.WAY_BITS
)
1785 req_tag
= Signal(self
.TAG_BITS
)
1787 req_data
= Signal(64)
1788 req_same_tag
= Signal()
1791 early_req_row
= Signal(self
.ROW_BITS
)
1793 cancel_store
= Signal()
1795 clear_rsrv
= Signal()
1800 use_forward1_next
= Signal()
1801 use_forward2_next
= Signal()
1803 cache_out_row
= Signal(WB_DATA_BITS
)
1805 plru_victim
= Signal(self
.WAY_BITS
)
1806 replace_way
= Signal(self
.WAY_BITS
)
1808 # Wishbone read/write/cache write formatting signals
1812 tlb_way
= self
.TLBRecord("tlb_way")
1813 tlb_req_index
= Signal(self
.TLB_SET_BITS
)
1814 tlb_hit
= self
.TLBHit("tlb_hit")
1815 pte
= Signal(self
.TLB_PTE_BITS
)
1816 ra
= Signal(self
.REAL_ADDR_BITS
)
1818 perm_attr
= PermAttr("dc_perms")
1821 access_ok
= Signal()
1823 tlb_plru_victim
= Signal(self
.TLB_WAY_BITS
)
1825 # we don't yet handle collisions between loadstore1 requests
1827 comb
+= self
.m_out
.stall
.eq(0)
1829 # Hold off the request in r0 when r1 has an uncompleted request
1830 comb
+= r0_stall
.eq(r0_full
& (r1
.full | d_in
.hold
))
1831 comb
+= r0_valid
.eq(r0_full
& ~r1
.full
& ~d_in
.hold
)
1832 comb
+= self
.stall_out
.eq(r0_stall
)
1833 # debugging: detect if any stall ever requested, which is fine,
1834 # but if a request comes in when stall requested, that's bad.
1835 with m
.If(r0_stall
):
1836 sync
+= self
.any_stall_out
.eq(1)
1837 with m
.If(d_in
.valid
):
1838 sync
+= self
.dreq_when_stall
.eq(1)
1839 with m
.If(m_in
.valid
):
1840 sync
+= self
.mreq_when_stall
.eq(1)
1842 # deal with litex not doing wishbone pipeline mode
1843 # XXX in wrong way. FIFOs are needed in the SRAM test
1844 # so that stb/ack match up. same thing done in icache.py
1845 if not self
.microwatt_compat
:
1846 comb
+= self
.bus
.stall
.eq(self
.bus
.cyc
& ~self
.bus
.ack
)
1848 # Wire up wishbone request latch out of stage 1
1849 comb
+= self
.bus
.we
.eq(r1
.wb
.we
)
1850 comb
+= self
.bus
.adr
.eq(r1
.wb
.adr
)
1851 comb
+= self
.bus
.sel
.eq(r1
.wb
.sel
)
1852 comb
+= self
.bus
.stb
.eq(r1
.wb
.stb
)
1853 comb
+= self
.bus
.dat_w
.eq(r1
.wb
.dat
)
1854 comb
+= self
.bus
.cyc
.eq(r1
.wb
.cyc
)
1856 # create submodule TLBUpdate
1857 m
.submodules
.dtlb_update
= self
.dtlb_update
= DTLBUpdate(self
)
1859 # call sub-functions putting everything together, using shared
1860 # signals established above
1861 self
.stage_0(m
, r0
, r1
, r0_full
)
1862 self
.tlb_read(m
, r0_stall
, tlb_way
)
1863 self
.tlb_search(m
, tlb_req_index
, r0
, r0_valid
,
1865 pte
, tlb_hit
, valid_ra
, perm_attr
, ra
)
1866 self
.tlb_update(m
, r0_valid
, r0
, tlb_req_index
,
1867 tlb_hit
, tlb_plru_victim
)
1868 self
.maybe_plrus(m
, r1
, plru_victim
)
1869 self
.maybe_tlb_plrus(m
, r1
, tlb_plru_victim
, tlb_req_index
)
1870 self
.cache_tag_read(m
, r0_stall
, req_index
, cache_tag_set
)
1871 self
.dcache_request(m
, r0
, ra
, req_index
, req_row
, req_tag
,
1872 r0_valid
, r1
, cache_valids
, replace_way
,
1873 use_forward1_next
, use_forward2_next
,
1874 req_hit_way
, plru_victim
, rc_ok
, perm_attr
,
1875 valid_ra
, perm_ok
, access_ok
, req_op
, req_go
,
1876 tlb_hit
, tlb_way
, cache_tag_set
,
1877 cancel_store
, req_same_tag
, r0_stall
, early_req_row
)
1878 self
.reservation_comb(m
, cancel_store
, set_rsrv
, clear_rsrv
,
1879 r0_valid
, r0
, reservation
)
1880 self
.reservation_reg(m
, r0_valid
, access_ok
, set_rsrv
, clear_rsrv
,
1882 self
.writeback_control(m
, r1
, cache_out_row
)
1883 self
.rams(m
, r1
, early_req_row
, cache_out_row
, replace_way
)
1884 self
.dcache_fast_hit(m
, req_op
, r0_valid
, r0
, r1
,
1885 req_hit_way
, req_index
, req_tag
, access_ok
,
1886 tlb_hit
, tlb_req_index
)
1887 self
.dcache_slow(m
, r1
, use_forward1_next
, use_forward2_next
,
1889 req_hit_way
, req_same_tag
,
1890 r0_valid
, req_op
, cache_valids
, req_go
, ra
)
1891 #self.dcache_log(m, r1, valid_ra, tlb_hit, stall_out)
1896 if __name__
== '__main__':
1898 vl
= rtlil
.convert(dut
, ports
=[])
1899 with
open("test_dcache.il", "w") as f
: