remove unneeded parameter
[soc.git] / src / soc / experiment / icache.py
1 """ICache
2
3 based on Anton Blanchard microwatt icache.vhdl
4
5 Set associative icache
6
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
20
21 Links:
22
23 * https://bugs.libre-soc.org/show_bug.cgi?id=485
24 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
25 (discussion about brams for ECP5)
26
27 """
28
29 from enum import (Enum, unique)
30 from nmigen import (Module, Signal, Elaboratable, Cat, Array, Const, Repl,
31 Record)
32 from nmigen.cli import main, rtlil
33 from nmutil.iocontrol import RecordObject
34 from nmigen.utils import log2_int
35 from nmigen.lib.coding import Decoder
36 from nmutil.util import Display
37
38 #from nmutil.plru import PLRU
39 from soc.experiment.plru import PLRU, PLRUs
40 from soc.experiment.cache_ram import CacheRam
41
42 from soc.experiment.mem_types import (Fetch1ToICacheType,
43 ICacheToDecode1Type,
44 MMUToICacheType)
45
46 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS,
47 WB_SEL_BITS, WBAddrType, WBDataType,
48 WBSelType, WBMasterOut, WBSlaveOut,
49 )
50
51 from nmigen_soc.wishbone.bus import Interface
52 from soc.minerva.units.fetch import FetchUnitInterface
53
54
55 # for test
56 from soc.bus.sram import SRAM
57 from nmigen import Memory
58 from nmutil.util import wrap
59 from nmigen.cli import main, rtlil
60
61 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
62 # Also, check out the cxxsim nmigen branch, and latest yosys from git
63 from nmutil.sim_tmp_alternative import Simulator, Settle
64
65
66 SIM = 0
67 LINE_SIZE = 64
68 # BRAM organisation: We never access more than wishbone_data_bits
69 # at a time so to save resources we make the array only that wide,
70 # and use consecutive indices for to make a cache "line"
71 #
72 # ROW_SIZE is the width in bytes of the BRAM (based on WB, so 64-bits)
73 ROW_SIZE = WB_DATA_BITS // 8
74 # Number of lines in a set
75 NUM_LINES = 64
76 # Number of ways
77 NUM_WAYS = 2
78 # L1 ITLB number of entries (direct mapped)
79 TLB_SIZE = 64
80 # L1 ITLB log_2(page_size)
81 TLB_LG_PGSZ = 12
82 # Number of real address bits that we store
83 REAL_ADDR_BITS = 56
84 # Non-zero to enable log data collection
85 LOG_LENGTH = 0
86
87 ROW_SIZE_BITS = ROW_SIZE * 8
88 # ROW_PER_LINE is the number of row (wishbone) transactions in a line
89 ROW_PER_LINE = LINE_SIZE // ROW_SIZE
90 # BRAM_ROWS is the number of rows in BRAM needed to represent the full icache
91 BRAM_ROWS = NUM_LINES * ROW_PER_LINE
92 # INSN_PER_ROW is the number of 32bit instructions per BRAM row
93 INSN_PER_ROW = ROW_SIZE_BITS // 32
94
95 # Bit fields counts in the address
96 #
97 # INSN_BITS is the number of bits to select an instruction in a row
98 INSN_BITS = log2_int(INSN_PER_ROW)
99 # ROW_BITS is the number of bits to select a row
100 ROW_BITS = log2_int(BRAM_ROWS)
101 # ROW_LINE_BITS is the number of bits to select a row within a line
102 ROW_LINE_BITS = log2_int(ROW_PER_LINE)
103 # LINE_OFF_BITS is the number of bits for the offset in a cache line
104 LINE_OFF_BITS = log2_int(LINE_SIZE)
105 # ROW_OFF_BITS is the number of bits for the offset in a row
106 ROW_OFF_BITS = log2_int(ROW_SIZE)
107 # INDEX_BITS is the number of bits to select a cache line
108 INDEX_BITS = log2_int(NUM_LINES)
109 # SET_SIZE_BITS is the log base 2 of the set size
110 SET_SIZE_BITS = LINE_OFF_BITS + INDEX_BITS
111 # TAG_BITS is the number of bits of the tag part of the address
112 TAG_BITS = REAL_ADDR_BITS - SET_SIZE_BITS
113 # TAG_WIDTH is the width in bits of each way of the tag RAM
114 TAG_WIDTH = TAG_BITS + 7 - ((TAG_BITS + 7) % 8)
115
116 # WAY_BITS is the number of bits to select a way
117 WAY_BITS = log2_int(NUM_WAYS)
118 TAG_RAM_WIDTH = TAG_BITS * NUM_WAYS
119
120 # L1 ITLB
121 TLB_BITS = log2_int(TLB_SIZE)
122 TLB_EA_TAG_BITS = 64 - (TLB_LG_PGSZ + TLB_BITS)
123 TLB_PTE_BITS = 64
124
125 print("BRAM_ROWS =", BRAM_ROWS)
126 print("INDEX_BITS =", INDEX_BITS)
127 print("INSN_BITS =", INSN_BITS)
128 print("INSN_PER_ROW =", INSN_PER_ROW)
129 print("LINE_SIZE =", LINE_SIZE)
130 print("LINE_OFF_BITS =", LINE_OFF_BITS)
131 print("LOG_LENGTH =", LOG_LENGTH)
132 print("NUM_LINES =", NUM_LINES)
133 print("NUM_WAYS =", NUM_WAYS)
134 print("REAL_ADDR_BITS =", REAL_ADDR_BITS)
135 print("ROW_BITS =", ROW_BITS)
136 print("ROW_OFF_BITS =", ROW_OFF_BITS)
137 print("ROW_LINE_BITS =", ROW_LINE_BITS)
138 print("ROW_PER_LINE =", ROW_PER_LINE)
139 print("ROW_SIZE =", ROW_SIZE)
140 print("ROW_SIZE_BITS =", ROW_SIZE_BITS)
141 print("SET_SIZE_BITS =", SET_SIZE_BITS)
142 print("SIM =", SIM)
143 print("TAG_BITS =", TAG_BITS)
144 print("TAG_RAM_WIDTH =", TAG_RAM_WIDTH)
145 print("TAG_BITS =", TAG_BITS)
146 print("TLB_BITS =", TLB_BITS)
147 print("TLB_EA_TAG_BITS =", TLB_EA_TAG_BITS)
148 print("TLB_LG_PGSZ =", TLB_LG_PGSZ)
149 print("TLB_PTE_BITS =", TLB_PTE_BITS)
150 print("TLB_SIZE =", TLB_SIZE)
151 print("WAY_BITS =", WAY_BITS)
152
153 # from microwatt/utils.vhdl
154 def ispow2(n):
155 return n != 0 and (n & (n - 1)) == 0
156
157 assert LINE_SIZE % ROW_SIZE == 0
158 assert ispow2(LINE_SIZE), "LINE_SIZE not power of 2"
159 assert ispow2(NUM_LINES), "NUM_LINES not power of 2"
160 assert ispow2(ROW_PER_LINE), "ROW_PER_LINE not power of 2"
161 assert ispow2(INSN_PER_ROW), "INSN_PER_ROW not power of 2"
162 assert (ROW_BITS == (INDEX_BITS + ROW_LINE_BITS)), \
163 "geometry bits don't add up"
164 assert (LINE_OFF_BITS == (ROW_OFF_BITS + ROW_LINE_BITS)), \
165 "geometry bits don't add up"
166 assert (REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS + LINE_OFF_BITS)), \
167 "geometry bits don't add up"
168 assert (REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS)), \
169 "geometry bits don't add up"
170
171 # Example of layout for 32 lines of 64 bytes:
172 #
173 # .. tag |index| line |
174 # .. | row | |
175 # .. | | | |00| zero (2)
176 # .. | | |-| | INSN_BITS (1)
177 # .. | |---| | ROW_LINE_BITS (3)
178 # .. | |--- - --| LINE_OFF_BITS (6)
179 # .. | |- --| ROW_OFF_BITS (3)
180 # .. |----- ---| | ROW_BITS (8)
181 # .. |-----| | INDEX_BITS (5)
182 # .. --------| | TAG_BITS (53)
183
184 # The cache data BRAM organized as described above for each way
185 #subtype cache_row_t is std_ulogic_vector(ROW_SIZE_BITS-1 downto 0);
186 #
187 # The cache tags LUTRAM has a row per set. Vivado is a pain and will
188 # not handle a clean (commented) definition of the cache tags as a 3d
189 # memory. For now, work around it by putting all the tags
190 def CacheTagArray():
191 return Array(Signal(TAG_RAM_WIDTH, name="tag%d" % x) \
192 for x in range(NUM_LINES))
193
194 def CacheValidsArray():
195 return Array(Signal(NUM_WAYS, name="tag_valids%d" % x) \
196 for x in range(NUM_LINES))
197
198 def RowPerLineValidArray():
199 return Array(Signal(name="rows_valid_%d" %x) \
200 for x in range(ROW_PER_LINE))
201
202
203 # TODO to be passed to nigmen as ram attributes
204 # attribute ram_style : string;
205 # attribute ram_style of cache_tags : signal is "distributed";
206
207 def TLBValidArray():
208 return Array(Signal(name="tlb_valid%d" % x)
209 for x in range(TLB_SIZE))
210
211 def TLBRecord(name):
212 tlb_layout = [ ('tag', TLB_EA_TAG_BITS),
213 ('pte', TLB_PTE_BITS)
214 ]
215 return Record(tlb_layout, name=name)
216
217 def TLBArray():
218 return Array(TLBRecord("tlb%d" % x) for x in range(TLB_SIZE))
219
220 # PLRU output interface
221 def PLRUOut():
222 return Array(Signal(WAY_BITS, name="plru_out_%d" %x) \
223 for x in range(NUM_LINES))
224
225 # Return the cache line index (tag index) for an address
226 def get_index(addr):
227 return addr[LINE_OFF_BITS:SET_SIZE_BITS]
228
229 # Return the cache row index (data memory) for an address
230 def get_row(addr):
231 return addr[ROW_OFF_BITS:SET_SIZE_BITS]
232
233 # Return the index of a row within a line
234 def get_row_of_line(row):
235 return row[:ROW_BITS][:ROW_LINE_BITS]
236
237 # Returns whether this is the last row of a line
238 def is_last_row_addr(addr, last):
239 return addr[ROW_OFF_BITS:LINE_OFF_BITS] == last
240
241 # Returns whether this is the last row of a line
242 def is_last_row(row, last):
243 return get_row_of_line(row) == last
244
245 # Return the next row in the current cache line. We use a dedicated
246 # function in order to limit the size of the generated adder to be
247 # only the bits within a cache line (3 bits with default settings)
248 def next_row(row):
249 row_v = row[0:ROW_LINE_BITS] + 1
250 return Cat(row_v[:ROW_LINE_BITS], row[ROW_LINE_BITS:])
251
252 # Read the instruction word for the given address
253 # in the current cache row
254 def read_insn_word(addr, data):
255 word = addr[2:INSN_BITS+2]
256 return data.word_select(word, 32)
257
258 # Get the tag value from the address
259 def get_tag(addr):
260 return addr[SET_SIZE_BITS:REAL_ADDR_BITS]
261
262 # Read a tag from a tag memory row
263 def read_tag(way, tagset):
264 return tagset.word_select(way, TAG_WIDTH)[:TAG_BITS]
265
266 # Write a tag to tag memory row
267 def write_tag(way, tagset, tag):
268 return read_tag(way, tagset).eq(tag)
269
270 # Simple hash for direct-mapped TLB index
271 def hash_ea(addr):
272 hsh = (addr[TLB_LG_PGSZ:TLB_LG_PGSZ + TLB_BITS] ^
273 addr[TLB_LG_PGSZ + TLB_BITS:TLB_LG_PGSZ + 2 * TLB_BITS ] ^
274 addr[TLB_LG_PGSZ + 2 * TLB_BITS:TLB_LG_PGSZ + 3 * TLB_BITS])
275 return hsh
276
277
278 # Cache reload state machine
279 @unique
280 class State(Enum):
281 IDLE = 0
282 CLR_TAG = 1
283 WAIT_ACK = 2
284
285
286 class RegInternal(RecordObject):
287 def __init__(self):
288 super().__init__()
289 # Cache hit state (Latches for 1 cycle BRAM access)
290 self.hit_way = Signal(WAY_BITS)
291 self.hit_nia = Signal(64)
292 self.hit_smark = Signal()
293 self.hit_valid = Signal()
294
295 # Cache miss state (reload state machine)
296 self.state = Signal(State, reset=State.IDLE)
297 self.wb = WBMasterOut("wb")
298 self.req_adr = Signal(64)
299 self.store_way = Signal(WAY_BITS)
300 self.store_index = Signal(INDEX_BITS)
301 self.store_row = Signal(ROW_BITS)
302 self.store_tag = Signal(TAG_BITS)
303 self.store_valid = Signal()
304 self.end_row_ix = Signal(ROW_LINE_BITS)
305 self.rows_valid = RowPerLineValidArray()
306
307 # TLB miss state
308 self.fetch_failed = Signal()
309
310
311 class ICache(FetchUnitInterface, Elaboratable):
312 """64 bit direct mapped icache. All instructions are 4B aligned."""
313 def __init__(self, pspec):
314 FetchUnitInterface.__init__(self, pspec)
315 self.i_in = Fetch1ToICacheType(name="i_in")
316 self.i_out = ICacheToDecode1Type(name="i_out")
317
318 self.m_in = MMUToICacheType(name="m_in")
319
320 self.stall_in = Signal()
321 self.stall_out = Signal()
322 self.flush_in = Signal()
323 self.inval_in = Signal()
324
325 # standard naming (wired to non-standard for compatibility)
326 self.bus = Interface(addr_width=32,
327 data_width=64,
328 granularity=8,
329 features={'stall'},
330 alignment=0,
331 name="icache_wb")
332
333 self.log_out = Signal(54)
334
335 # use FetchUnitInterface, helps keep some unit tests running
336 self.use_fetch_iface = False
337
338 def use_fetch_interface(self):
339 self.use_fetch_iface = True
340
341 # Generate a cache RAM for each way
342 def rams(self, m, r, cache_out_row, use_previous,
343 replace_way, req_row):
344
345 comb = m.d.comb
346 sync = m.d.sync
347
348 bus, stall_in = self.bus, self.stall_in
349
350 # read condition (for every cache ram)
351 do_read = Signal()
352 comb += do_read.eq(~(stall_in | use_previous))
353
354 rd_addr = Signal(ROW_BITS)
355 wr_addr = Signal(ROW_BITS)
356 comb += rd_addr.eq(req_row)
357 comb += wr_addr.eq(r.store_row)
358
359 # binary-to-unary converters: replace-way enabled by bus.ack,
360 # hit-way left permanently enabled
361 m.submodules.replace_way_e = re = Decoder(NUM_WAYS)
362 m.submodules.hit_way_e = he = Decoder(NUM_WAYS)
363 comb += re.i.eq(replace_way)
364 comb += re.n.eq(~bus.ack)
365 comb += he.i.eq(r.hit_way)
366
367 for i in range(NUM_WAYS):
368 do_write = Signal(name="do_wr_%d" % i)
369 d_out = Signal(ROW_SIZE_BITS, name="d_out_%d" % i)
370 wr_sel = Signal(ROW_SIZE, name="wr_sel_%d" % i)
371
372 way = CacheRam(ROW_BITS, ROW_SIZE_BITS, TRACE=True, ram_num=i)
373 m.submodules["cacheram_%d" % i] = way
374
375 comb += way.rd_en.eq(do_read)
376 comb += way.rd_addr.eq(rd_addr)
377 comb += d_out.eq(way.rd_data_o)
378 comb += way.wr_sel.eq(wr_sel)
379 comb += way.wr_addr.eq(wr_addr)
380 comb += way.wr_data.eq(bus.dat_r)
381
382 comb += do_write.eq(re.o[i])
383
384 with m.If(do_write):
385 sync += Display("cache write adr: %x data: %lx",
386 wr_addr, way.wr_data)
387
388 with m.If(he.o[i]):
389 comb += cache_out_row.eq(d_out)
390 with m.If(do_read):
391 sync += Display("cache read adr: %x data: %x",
392 req_row, d_out)
393
394 comb += wr_sel.eq(Repl(do_write, ROW_SIZE))
395
396 # Generate PLRUs
397 def maybe_plrus(self, m, r, plru_victim):
398 comb = m.d.comb
399
400 if NUM_WAYS == 0:
401 return
402
403
404 m.submodules.plrus = plru = PLRUs(NUM_LINES, WAY_BITS)
405 comb += plru.way.eq(r.hit_way)
406 comb += plru.valid.eq(r.hit_valid)
407 comb += plru.index.eq(get_index(r.hit_nia))
408 comb += plru.isel.eq(r.store_index) # select victim
409 comb += plru_victim.eq(plru.o_index) # selected victim
410
411 # TLB hit detection and real address generation
412 def itlb_lookup(self, m, tlb_req_index, itlb, itlb_valid,
413 real_addr, ra_valid, eaa_priv,
414 priv_fault, access_ok):
415
416 comb = m.d.comb
417
418 i_in = self.i_in
419
420 # use an *asynchronous* Memory read port here (combinatorial)
421 m.submodules.rd_tlb = rd_tlb = self.tlbmem.read_port(domain="comb")
422 tlb = TLBRecord("tlb_rdport")
423 pte, ttag = tlb.pte, tlb.tag
424
425 comb += tlb_req_index.eq(hash_ea(i_in.nia))
426 comb += rd_tlb.addr.eq(tlb_req_index)
427 comb += tlb.eq(rd_tlb.data)
428
429 with m.If(i_in.virt_mode):
430 comb += real_addr.eq(Cat(i_in.nia[:TLB_LG_PGSZ],
431 pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
432
433 with m.If(ttag == i_in.nia[TLB_LG_PGSZ + TLB_BITS:64]):
434 comb += ra_valid.eq(itlb_valid[tlb_req_index])
435
436 comb += eaa_priv.eq(pte[3])
437
438 with m.Else():
439 comb += real_addr.eq(i_in.nia[:REAL_ADDR_BITS])
440 comb += ra_valid.eq(1)
441 comb += eaa_priv.eq(1)
442
443 # No IAMR, so no KUEP support for now
444 comb += priv_fault.eq(eaa_priv & ~i_in.priv_mode)
445 comb += access_ok.eq(ra_valid & ~priv_fault)
446
447 # iTLB update
448 def itlb_update(self, m, itlb, itlb_valid):
449 comb = m.d.comb
450 sync = m.d.sync
451
452 m_in = self.m_in
453
454 wr_index = Signal(TLB_SIZE)
455 comb += wr_index.eq(hash_ea(m_in.addr))
456
457 m.submodules.wr_tlb = wr_tlb = self.tlbmem.write_port()
458
459 with m.If(m_in.tlbie & m_in.doall):
460 # Clear all valid bits
461 for i in range(TLB_SIZE):
462 sync += itlb_valid[i].eq(0)
463
464 with m.Elif(m_in.tlbie):
465 # Clear entry regardless of hit or miss
466 sync += itlb_valid[wr_index].eq(0)
467
468 with m.Elif(m_in.tlbld):
469 tlb = TLBRecord("tlb_wrport")
470 comb += tlb.tag.eq(m_in.addr[TLB_LG_PGSZ + TLB_BITS:64])
471 comb += tlb.pte.eq(m_in.pte)
472 comb += wr_tlb.en.eq(1)
473 comb += wr_tlb.addr.eq(wr_index)
474 comb += wr_tlb.data.eq(tlb)
475 sync += itlb_valid[wr_index].eq(1)
476
477 # Cache hit detection, output to fetch2 and other misc logic
478 def icache_comb(self, m, use_previous, r, req_index, req_row,
479 req_hit_way, req_tag, real_addr, req_laddr,
480 cache_tags, cache_valids, access_ok,
481 req_is_hit, req_is_miss, replace_way,
482 plru_victim, cache_out_row):
483
484 comb = m.d.comb
485
486 i_in, i_out, bus = self.i_in, self.i_out, self.bus
487 flush_in, stall_out = self.flush_in, self.stall_out
488
489 is_hit = Signal()
490 hit_way = Signal(WAY_BITS)
491
492 # i_in.sequential means that i_in.nia this cycle is 4 more than
493 # last cycle. If we read more than 32 bits at a time, had a
494 # cache hit last cycle, and we don't want the first 32-bit chunk
495 # then we can keep the data we read last cycle and just use that.
496 with m.If(i_in.nia[2:INSN_BITS+2] != 0):
497 comb += use_previous.eq(i_in.sequential & r.hit_valid)
498
499 # Extract line, row and tag from request
500 comb += req_index.eq(get_index(i_in.nia))
501 comb += req_row.eq(get_row(i_in.nia))
502 comb += req_tag.eq(get_tag(real_addr))
503
504 # Calculate address of beginning of cache row, will be
505 # used for cache miss processing if needed
506 comb += req_laddr.eq(Cat(
507 Const(0, ROW_OFF_BITS),
508 real_addr[ROW_OFF_BITS:REAL_ADDR_BITS],
509 ))
510
511 # Test if pending request is a hit on any way
512 hitcond = Signal()
513 comb += hitcond.eq((r.state == State.WAIT_ACK)
514 & (req_index == r.store_index)
515 & r.rows_valid[req_row % ROW_PER_LINE]
516 )
517 # i_in.req asserts Decoder active
518 cvb = Signal(NUM_WAYS)
519 ctag = Signal(TAG_RAM_WIDTH)
520 comb += ctag.eq(cache_tags[req_index])
521 comb += cvb.eq(cache_valids[req_index])
522 m.submodules.store_way_e = se = Decoder(NUM_WAYS)
523 comb += se.i.eq(r.store_way)
524 comb += se.n.eq(~i_in.req)
525 for i in range(NUM_WAYS):
526 tagi = Signal(TAG_BITS, name="tag_i%d" % i)
527 hit_test = Signal(name="hit_test%d" % i)
528 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
529 comb += tagi.eq(read_tag(i, ctag))
530 comb += hit_test.eq(se.o[i])
531 comb += is_tag_hit.eq((cvb[i] | (hitcond & hit_test)) &
532 (tagi == req_tag))
533 with m.If(is_tag_hit):
534 comb += hit_way.eq(i)
535 comb += is_hit.eq(1)
536
537 # Generate the "hit" and "miss" signals
538 # for the synchronous blocks
539 with m.If(i_in.req & access_ok & ~flush_in):
540 comb += req_is_hit.eq(is_hit)
541 comb += req_is_miss.eq(~is_hit)
542
543 comb += req_hit_way.eq(hit_way)
544
545 # The way to replace on a miss
546 with m.If(r.state == State.CLR_TAG):
547 comb += replace_way.eq(plru_victim)
548 with m.Else():
549 comb += replace_way.eq(r.store_way)
550
551 # Output instruction from current cache row
552 #
553 # Note: This is a mild violation of our design principle of
554 # having pipeline stages output from a clean latch. In this
555 # case we output the result of a mux. The alternative would
556 # be output an entire row which I prefer not to do just yet
557 # as it would force fetch2 to know about some of the cache
558 # geometry information.
559 comb += i_out.insn.eq(read_insn_word(r.hit_nia, cache_out_row))
560 comb += i_out.valid.eq(r.hit_valid)
561 comb += i_out.nia.eq(r.hit_nia)
562 comb += i_out.stop_mark.eq(r.hit_smark)
563 comb += i_out.fetch_failed.eq(r.fetch_failed)
564
565 # Stall fetch1 if we have a miss on cache or TLB
566 # or a protection fault
567 comb += stall_out.eq(~(is_hit & access_ok))
568
569 # Wishbone requests output (from the cache miss reload machine)
570 comb += bus.we.eq(r.wb.we)
571 comb += bus.adr.eq(r.wb.adr)
572 comb += bus.sel.eq(r.wb.sel)
573 comb += bus.stb.eq(r.wb.stb)
574 comb += bus.dat_w.eq(r.wb.dat)
575 comb += bus.cyc.eq(r.wb.cyc)
576
577 # Cache hit synchronous machine
578 def icache_hit(self, m, use_previous, r, req_is_hit, req_hit_way,
579 req_index, req_tag, real_addr):
580 sync = m.d.sync
581
582 i_in, stall_in = self.i_in, self.stall_in
583 flush_in = self.flush_in
584
585 # keep outputs to fetch2 unchanged on a stall
586 # except that flush or reset sets valid to 0
587 # If use_previous, keep the same data as last
588 # cycle and use the second half
589 with m.If(stall_in | use_previous):
590 with m.If(flush_in):
591 sync += r.hit_valid.eq(0)
592 with m.Else():
593 # On a hit, latch the request for the next cycle,
594 # when the BRAM data will be available on the
595 # cache_out output of the corresponding way
596 sync += r.hit_valid.eq(req_is_hit)
597
598 with m.If(req_is_hit):
599 sync += r.hit_way.eq(req_hit_way)
600 sync += Display("cache hit nia:%x IR:%x SM:%x idx:%x tag:%x "
601 "way:%x RA:%x", i_in.nia, i_in.virt_mode,
602 i_in.stop_mark, req_index, req_tag,
603 req_hit_way, real_addr)
604
605 with m.If(~stall_in):
606 # Send stop marks and NIA down regardless of validity
607 sync += r.hit_smark.eq(i_in.stop_mark)
608 sync += r.hit_nia.eq(i_in.nia)
609
610 def icache_miss_idle(self, m, r, req_is_miss, req_laddr,
611 req_index, req_tag, replace_way, real_addr):
612 comb = m.d.comb
613 sync = m.d.sync
614
615 i_in = self.i_in
616
617 # Reset per-row valid flags, only used in WAIT_ACK
618 for i in range(ROW_PER_LINE):
619 sync += r.rows_valid[i].eq(0)
620
621 # We need to read a cache line
622 with m.If(req_is_miss):
623 sync += Display(
624 "cache miss nia:%x IR:%x SM:%x idx:%x "
625 " way:%x tag:%x RA:%x", i_in.nia,
626 i_in.virt_mode, i_in.stop_mark, req_index,
627 replace_way, req_tag, real_addr)
628
629 # Keep track of our index and way for subsequent stores
630 st_row = Signal(ROW_BITS)
631 comb += st_row.eq(get_row(req_laddr))
632 sync += r.store_index.eq(req_index)
633 sync += r.store_row.eq(st_row)
634 sync += r.store_tag.eq(req_tag)
635 sync += r.store_valid.eq(1)
636 sync += r.end_row_ix.eq(get_row_of_line(st_row) - 1)
637
638 # Prep for first wishbone read. We calculate the address
639 # of the start of the cache line and start the WB cycle.
640 sync += r.req_adr.eq(req_laddr)
641 sync += r.wb.cyc.eq(1)
642 sync += r.wb.stb.eq(1)
643
644 # Track that we had one request sent
645 sync += r.state.eq(State.CLR_TAG)
646
647 def icache_miss_clr_tag(self, m, r, replace_way,
648 req_index,
649 cache_tags, cache_valids):
650 comb = m.d.comb
651 sync = m.d.sync
652
653 # Get victim way from plru
654 sync += r.store_way.eq(replace_way)
655
656 # Force misses on that way while reloading that line
657 cv = Signal(INDEX_BITS)
658 comb += cv.eq(cache_valids[req_index])
659 comb += cv.bit_select(replace_way, 1).eq(0)
660 sync += cache_valids[req_index].eq(cv)
661
662 for i in range(NUM_WAYS):
663 with m.If(i == replace_way):
664 tagset = Signal(TAG_RAM_WIDTH)
665 comb += tagset.eq(cache_tags[r.store_index])
666 comb += write_tag(i, tagset, r.store_tag)
667 sync += cache_tags[r.store_index].eq(tagset)
668
669 sync += r.state.eq(State.WAIT_ACK)
670
671 def icache_miss_wait_ack(self, m, r, replace_way, inval_in,
672 cache_valids, stbs_done):
673 comb = m.d.comb
674 sync = m.d.sync
675
676 bus = self.bus
677
678 # Requests are all sent if stb is 0
679 stbs_zero = Signal()
680 comb += stbs_zero.eq(r.wb.stb == 0)
681 comb += stbs_done.eq(stbs_zero)
682
683 # If we are still sending requests, was one accepted?
684 with m.If(~bus.stall & ~stbs_zero):
685 # That was the last word? We are done sending.
686 # Clear stb and set stbs_done so we can handle
687 # an eventual last ack on the same cycle.
688 with m.If(is_last_row_addr(r.req_adr, r.end_row_ix)):
689 sync += Display("IS_LAST_ROW_ADDR r.wb.addr:%x "
690 "r.end_row_ix:%x r.wb.stb:%x stbs_zero:%x "
691 "stbs_done:%x", r.wb.adr, r.end_row_ix,
692 r.wb.stb, stbs_zero, stbs_done)
693 sync += r.wb.stb.eq(0)
694 comb += stbs_done.eq(1)
695
696 # Calculate the next row address
697 rarange = Signal(LINE_OFF_BITS - ROW_OFF_BITS)
698 comb += rarange.eq(r.req_adr[ROW_OFF_BITS:LINE_OFF_BITS] + 1)
699 sync += r.req_adr[ROW_OFF_BITS:LINE_OFF_BITS].eq(rarange)
700 sync += Display("RARANGE r.req_adr:%x rarange:%x "
701 "stbs_zero:%x stbs_done:%x",
702 r.req_adr, rarange, stbs_zero, stbs_done)
703
704 # Incoming acks processing
705 with m.If(bus.ack):
706 sync += Display("WB_IN_ACK data:%x stbs_zero:%x "
707 "stbs_done:%x",
708 bus.dat_r, stbs_zero, stbs_done)
709
710 sync += r.rows_valid[r.store_row % ROW_PER_LINE].eq(1)
711
712 # Check for completion
713 with m.If(stbs_done & is_last_row(r.store_row, r.end_row_ix)):
714 # Complete wishbone cycle
715 sync += r.wb.cyc.eq(0)
716 # be nice, clear addr
717 sync += r.req_adr.eq(0)
718
719 # Cache line is now valid
720 cv = Signal(INDEX_BITS)
721 comb += cv.eq(cache_valids[r.store_index])
722 comb += cv.bit_select(replace_way, 1).eq(
723 r.store_valid & ~inval_in)
724 sync += cache_valids[r.store_index].eq(cv)
725
726 sync += r.state.eq(State.IDLE)
727
728 # move on to next request in row
729 # Increment store row counter
730 sync += r.store_row.eq(next_row(r.store_row))
731
732 # Cache miss/reload synchronous machine
733 def icache_miss(self, m, r, req_is_miss,
734 req_index, req_laddr, req_tag, replace_way,
735 cache_tags, cache_valids, access_ok, real_addr):
736 comb = m.d.comb
737 sync = m.d.sync
738
739 i_in, bus, m_in = self.i_in, self.bus, self.m_in
740 stall_in, flush_in = self.stall_in, self.flush_in
741 inval_in = self.inval_in
742
743 stbs_done = Signal()
744
745 comb += r.wb.sel.eq(-1)
746 comb += r.wb.adr.eq(r.req_adr[3:])
747
748 # Process cache invalidations
749 with m.If(inval_in):
750 for i in range(NUM_LINES):
751 sync += cache_valids[i].eq(0)
752 sync += r.store_valid.eq(0)
753
754 # Main state machine
755 with m.Switch(r.state):
756
757 with m.Case(State.IDLE):
758 self.icache_miss_idle(m, r, req_is_miss, req_laddr,
759 req_index, req_tag, replace_way,
760 real_addr)
761
762 with m.Case(State.CLR_TAG, State.WAIT_ACK):
763 with m.If(r.state == State.CLR_TAG):
764 self.icache_miss_clr_tag(m, r, replace_way,
765 req_index,
766 cache_tags, cache_valids)
767
768 self.icache_miss_wait_ack(m, r, replace_way, inval_in,
769 cache_valids, stbs_done)
770
771 # TLB miss and protection fault processing
772 with m.If(flush_in | m_in.tlbld):
773 sync += r.fetch_failed.eq(0)
774 with m.Elif(i_in.req & ~access_ok & ~stall_in):
775 sync += r.fetch_failed.eq(1)
776
777 # icache_log: if LOG_LENGTH > 0 generate
778 def icache_log(self, m, req_hit_way, ra_valid, access_ok,
779 req_is_miss, req_is_hit, lway, wstate, r):
780 comb = m.d.comb
781 sync = m.d.sync
782
783 bus, i_out = self.bus, self.i_out
784 log_out, stall_out = self.log_out, self.stall_out
785
786 # Output data to logger
787 for i in range(LOG_LENGTH):
788 log_data = Signal(54)
789 lway = Signal(WAY_BITS)
790 wstate = Signal()
791
792 sync += lway.eq(req_hit_way)
793 sync += wstate.eq(0)
794
795 with m.If(r.state != State.IDLE):
796 sync += wstate.eq(1)
797
798 sync += log_data.eq(Cat(
799 ra_valid, access_ok, req_is_miss, req_is_hit,
800 lway, wstate, r.hit_nia[2:6], r.fetch_failed,
801 stall_out, bus.stall, r.wb.cyc, r.wb.stb,
802 r.real_addr[3:6], bus.ack, i_out.insn, i_out.valid
803 ))
804 comb += log_out.eq(log_data)
805
806 def elaborate(self, platform):
807
808 m = Module()
809 comb = m.d.comb
810
811 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
812 cache_tags = CacheTagArray()
813 cache_valids = CacheValidsArray()
814
815 # TLB Array
816 itlb = TLBArray()
817 itlb_valid = TLBValidArray()
818
819 # TODO to be passed to nmigen as ram attributes
820 # attribute ram_style of itlb_tags : signal is "distributed";
821 # attribute ram_style of itlb_ptes : signal is "distributed";
822
823 # Privilege bit from PTE EAA field
824 eaa_priv = Signal()
825
826 r = RegInternal()
827
828 # Async signal on incoming request
829 req_index = Signal(INDEX_BITS)
830 req_row = Signal(ROW_BITS)
831 req_hit_way = Signal(WAY_BITS)
832 req_tag = Signal(TAG_BITS)
833 req_is_hit = Signal()
834 req_is_miss = Signal()
835 req_laddr = Signal(64)
836
837 tlb_req_index = Signal(TLB_BITS)
838 real_addr = Signal(REAL_ADDR_BITS)
839 ra_valid = Signal()
840 priv_fault = Signal()
841 access_ok = Signal()
842 use_previous = Signal()
843
844 cache_out_row = Signal(ROW_SIZE_BITS)
845
846 plru_victim = Signal(WAY_BITS)
847 replace_way = Signal(WAY_BITS)
848
849 self.tlbmem = Memory(depth=TLB_SIZE, width=TLB_EA_TAG_BITS+TLB_PTE_BITS)
850
851 # call sub-functions putting everything together,
852 # using shared signals established above
853 self.rams(m, r, cache_out_row, use_previous, replace_way, req_row)
854 self.maybe_plrus(m, r, plru_victim)
855 self.itlb_lookup(m, tlb_req_index, itlb, itlb_valid, real_addr,
856 ra_valid, eaa_priv, priv_fault,
857 access_ok)
858 self.itlb_update(m, itlb, itlb_valid)
859 self.icache_comb(m, use_previous, r, req_index, req_row, req_hit_way,
860 req_tag, real_addr, req_laddr,
861 cache_tags, cache_valids,
862 access_ok, req_is_hit, req_is_miss,
863 replace_way, plru_victim, cache_out_row)
864 self.icache_hit(m, use_previous, r, req_is_hit, req_hit_way,
865 req_index, req_tag, real_addr)
866 self.icache_miss(m, r, req_is_miss, req_index,
867 req_laddr, req_tag, replace_way,
868 cache_tags, cache_valids,
869 access_ok, real_addr)
870 #self.icache_log(m, log_out, req_hit_way, ra_valid, access_ok,
871 # req_is_miss, req_is_hit, lway, wstate, r)
872
873 # don't connect up to FetchUnitInterface so that some unit tests
874 # can continue to operate
875 if not self.use_fetch_iface:
876 return m
877
878 # connect to FetchUnitInterface. FetchUnitInterface is undocumented
879 # so needs checking and iterative revising
880 i_in, bus, i_out = self.i_in, self.bus, self.i_out
881 comb += i_in.req.eq(self.a_i_valid)
882 comb += i_in.nia.eq(self.a_pc_i)
883 comb += self.stall_in.eq(self.a_stall_i)
884 comb += self.f_fetch_err_o.eq(i_out.fetch_failed)
885 comb += self.f_badaddr_o.eq(i_out.nia)
886 comb += self.f_instr_o.eq(i_out.insn)
887 comb += self.f_busy_o.eq(~i_out.valid) # probably
888
889 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
890 ibus = self.ibus
891 comb += ibus.adr.eq(self.bus.adr)
892 comb += ibus.dat_w.eq(self.bus.dat_w)
893 comb += ibus.sel.eq(self.bus.sel)
894 comb += ibus.cyc.eq(self.bus.cyc)
895 comb += ibus.stb.eq(self.bus.stb)
896 comb += ibus.we.eq(self.bus.we)
897
898 comb += self.bus.dat_r.eq(ibus.dat_r)
899 comb += self.bus.ack.eq(ibus.ack)
900 if hasattr(ibus, "stall"):
901 comb += self.bus.stall.eq(ibus.stall)
902 else:
903 # fake-up the wishbone stall signal to comply with pipeline mode
904 # same thing is done in dcache.py
905 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
906
907 return m
908
909
910 def icache_sim(dut):
911 i_in = dut.i_in
912 i_out = dut.i_out
913 m_out = dut.m_in
914
915 yield i_in.priv_mode.eq(1)
916 yield i_in.req.eq(0)
917 yield i_in.nia.eq(0)
918 yield i_in.stop_mark.eq(0)
919 yield m_out.tlbld.eq(0)
920 yield m_out.tlbie.eq(0)
921 yield m_out.addr.eq(0)
922 yield m_out.pte.eq(0)
923 yield
924 yield
925 yield
926 yield
927
928 # miss, stalls for a bit
929 yield i_in.req.eq(1)
930 yield i_in.nia.eq(Const(0x0000000000000004, 64))
931 yield
932 valid = yield i_out.valid
933 while not valid:
934 yield
935 valid = yield i_out.valid
936 yield i_in.req.eq(0)
937
938 insn = yield i_out.insn
939 nia = yield i_out.nia
940 assert insn == 0x00000001, \
941 "insn @%x=%x expected 00000001" % (nia, insn)
942 yield i_in.req.eq(0)
943 yield
944
945 # hit
946 yield i_in.req.eq(1)
947 yield i_in.nia.eq(Const(0x0000000000000008, 64))
948 yield
949 valid = yield i_out.valid
950 while not valid:
951 yield
952 valid = yield i_out.valid
953 yield i_in.req.eq(0)
954
955 nia = yield i_out.nia
956 insn = yield i_out.insn
957 yield
958 assert insn == 0x00000002, \
959 "insn @%x=%x expected 00000002" % (nia, insn)
960
961 # another miss
962 yield i_in.req.eq(1)
963 yield i_in.nia.eq(Const(0x0000000000000040, 64))
964 yield
965 valid = yield i_out.valid
966 while not valid:
967 yield
968 valid = yield i_out.valid
969 yield i_in.req.eq(0)
970
971 nia = yield i_in.nia
972 insn = yield i_out.insn
973 assert insn == 0x00000010, \
974 "insn @%x=%x expected 00000010" % (nia, insn)
975
976 # test something that aliases (this only works because
977 # the unit test SRAM is a depth of 512)
978 yield i_in.req.eq(1)
979 yield i_in.nia.eq(Const(0x0000000000000100, 64))
980 yield
981 yield
982 valid = yield i_out.valid
983 assert ~valid
984 for i in range(30):
985 yield
986 yield
987 insn = yield i_out.insn
988 valid = yield i_out.valid
989 insn = yield i_out.insn
990 assert valid
991 assert insn == 0x00000040, \
992 "insn @%x=%x expected 00000040" % (nia, insn)
993 yield i_in.req.eq(0)
994
995
996 def test_icache(mem):
997 from soc.config.test.test_loadstore import TestMemPspec
998 pspec = TestMemPspec(addr_wid=32,
999 mask_wid=8,
1000 reg_wid=64,
1001 )
1002 dut = ICache(pspec)
1003
1004 memory = Memory(width=64, depth=512, init=mem)
1005 sram = SRAM(memory=memory, granularity=8)
1006
1007 m = Module()
1008
1009 m.submodules.icache = dut
1010 m.submodules.sram = sram
1011
1012 m.d.comb += sram.bus.cyc.eq(dut.bus.cyc)
1013 m.d.comb += sram.bus.stb.eq(dut.bus.stb)
1014 m.d.comb += sram.bus.we.eq(dut.bus.we)
1015 m.d.comb += sram.bus.sel.eq(dut.bus.sel)
1016 m.d.comb += sram.bus.adr.eq(dut.bus.adr)
1017 m.d.comb += sram.bus.dat_w.eq(dut.bus.dat_w)
1018
1019 m.d.comb += dut.bus.ack.eq(sram.bus.ack)
1020 m.d.comb += dut.bus.dat_r.eq(sram.bus.dat_r)
1021
1022 # nmigen Simulation
1023 sim = Simulator(m)
1024 sim.add_clock(1e-6)
1025
1026 sim.add_sync_process(wrap(icache_sim(dut)))
1027 with sim.write_vcd('test_icache.vcd'):
1028 sim.run()
1029
1030
1031 if __name__ == '__main__':
1032 from soc.config.test.test_loadstore import TestMemPspec
1033 pspec = TestMemPspec(addr_wid=64,
1034 mask_wid=8,
1035 reg_wid=64,
1036 )
1037 dut = ICache(pspec)
1038 vl = rtlil.convert(dut, ports=[])
1039 with open("test_icache.il", "w") as f:
1040 f.write(vl)
1041
1042 # set up memory every 32-bits with incrementing values 0 1 2 ...
1043 mem = []
1044 for i in range(512):
1045 mem.append((i*2) | ((i*2+1)<<32))
1046
1047 test_icache(mem)