put itlb_valid back, ready for conversion to Memory, in ICache
[soc.git] / src / soc / experiment / icache.py
1 """ICache
2
3 based on Anton Blanchard microwatt icache.vhdl
4
5 Set associative icache
6
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
20
21 Links:
22
23 * https://bugs.libre-soc.org/show_bug.cgi?id=485
24 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
25 (discussion about brams for ECP5)
26
27 """
28
29 from enum import (Enum, unique)
30 from nmigen import (Module, Signal, Elaboratable, Cat, Array, Const, Repl,
31 Record)
32 from nmigen.cli import main, rtlil
33 from nmutil.iocontrol import RecordObject
34 from nmigen.utils import log2_int
35 from nmigen.lib.coding import Decoder
36 from nmutil.util import Display
37
38 #from nmutil.plru import PLRU
39 from soc.experiment.plru import PLRU, PLRUs
40 from soc.experiment.cache_ram import CacheRam
41
42 from soc.experiment.mem_types import (Fetch1ToICacheType,
43 ICacheToDecode1Type,
44 MMUToICacheType)
45
46 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS,
47 WB_SEL_BITS, WBAddrType, WBDataType,
48 WBSelType, WBMasterOut, WBSlaveOut,
49 )
50
51 from nmigen_soc.wishbone.bus import Interface
52 from soc.minerva.units.fetch import FetchUnitInterface
53
54
55 # for test
56 from soc.bus.sram import SRAM
57 from nmigen import Memory
58 from nmutil.util import wrap
59 from nmigen.cli import main, rtlil
60
61 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
62 # Also, check out the cxxsim nmigen branch, and latest yosys from git
63 from nmutil.sim_tmp_alternative import Simulator, Settle
64
65
66 SIM = 0
67 LINE_SIZE = 64
68 # BRAM organisation: We never access more than wishbone_data_bits
69 # at a time so to save resources we make the array only that wide,
70 # and use consecutive indices for to make a cache "line"
71 #
72 # ROW_SIZE is the width in bytes of the BRAM (based on WB, so 64-bits)
73 ROW_SIZE = WB_DATA_BITS // 8
74 # Number of lines in a set
75 NUM_LINES = 64
76 # Number of ways
77 NUM_WAYS = 2
78 # L1 ITLB number of entries (direct mapped)
79 TLB_SIZE = 64
80 # L1 ITLB log_2(page_size)
81 TLB_LG_PGSZ = 12
82 # Number of real address bits that we store
83 REAL_ADDR_BITS = 56
84 # Non-zero to enable log data collection
85 LOG_LENGTH = 0
86
87 ROW_SIZE_BITS = ROW_SIZE * 8
88 # ROW_PER_LINE is the number of row (wishbone) transactions in a line
89 ROW_PER_LINE = LINE_SIZE // ROW_SIZE
90 # BRAM_ROWS is the number of rows in BRAM needed to represent the full icache
91 BRAM_ROWS = NUM_LINES * ROW_PER_LINE
92 # INSN_PER_ROW is the number of 32bit instructions per BRAM row
93 INSN_PER_ROW = ROW_SIZE_BITS // 32
94
95 # Bit fields counts in the address
96 #
97 # INSN_BITS is the number of bits to select an instruction in a row
98 INSN_BITS = log2_int(INSN_PER_ROW)
99 # ROW_BITS is the number of bits to select a row
100 ROW_BITS = log2_int(BRAM_ROWS)
101 # ROW_LINE_BITS is the number of bits to select a row within a line
102 ROW_LINE_BITS = log2_int(ROW_PER_LINE)
103 # LINE_OFF_BITS is the number of bits for the offset in a cache line
104 LINE_OFF_BITS = log2_int(LINE_SIZE)
105 # ROW_OFF_BITS is the number of bits for the offset in a row
106 ROW_OFF_BITS = log2_int(ROW_SIZE)
107 # INDEX_BITS is the number of bits to select a cache line
108 INDEX_BITS = log2_int(NUM_LINES)
109 # SET_SIZE_BITS is the log base 2 of the set size
110 SET_SIZE_BITS = LINE_OFF_BITS + INDEX_BITS
111 # TAG_BITS is the number of bits of the tag part of the address
112 TAG_BITS = REAL_ADDR_BITS - SET_SIZE_BITS
113 # TAG_WIDTH is the width in bits of each way of the tag RAM
114 TAG_WIDTH = TAG_BITS + 7 - ((TAG_BITS + 7) % 8)
115
116 # WAY_BITS is the number of bits to select a way
117 WAY_BITS = log2_int(NUM_WAYS)
118 TAG_RAM_WIDTH = TAG_BITS * NUM_WAYS
119
120 # L1 ITLB
121 TLB_BITS = log2_int(TLB_SIZE)
122 TLB_EA_TAG_BITS = 64 - (TLB_LG_PGSZ + TLB_BITS)
123 TLB_PTE_BITS = 64
124
125 print("BRAM_ROWS =", BRAM_ROWS)
126 print("INDEX_BITS =", INDEX_BITS)
127 print("INSN_BITS =", INSN_BITS)
128 print("INSN_PER_ROW =", INSN_PER_ROW)
129 print("LINE_SIZE =", LINE_SIZE)
130 print("LINE_OFF_BITS =", LINE_OFF_BITS)
131 print("LOG_LENGTH =", LOG_LENGTH)
132 print("NUM_LINES =", NUM_LINES)
133 print("NUM_WAYS =", NUM_WAYS)
134 print("REAL_ADDR_BITS =", REAL_ADDR_BITS)
135 print("ROW_BITS =", ROW_BITS)
136 print("ROW_OFF_BITS =", ROW_OFF_BITS)
137 print("ROW_LINE_BITS =", ROW_LINE_BITS)
138 print("ROW_PER_LINE =", ROW_PER_LINE)
139 print("ROW_SIZE =", ROW_SIZE)
140 print("ROW_SIZE_BITS =", ROW_SIZE_BITS)
141 print("SET_SIZE_BITS =", SET_SIZE_BITS)
142 print("SIM =", SIM)
143 print("TAG_BITS =", TAG_BITS)
144 print("TAG_RAM_WIDTH =", TAG_RAM_WIDTH)
145 print("TAG_BITS =", TAG_BITS)
146 print("TLB_BITS =", TLB_BITS)
147 print("TLB_EA_TAG_BITS =", TLB_EA_TAG_BITS)
148 print("TLB_LG_PGSZ =", TLB_LG_PGSZ)
149 print("TLB_PTE_BITS =", TLB_PTE_BITS)
150 print("TLB_SIZE =", TLB_SIZE)
151 print("WAY_BITS =", WAY_BITS)
152
153 # from microwatt/utils.vhdl
154 def ispow2(n):
155 return n != 0 and (n & (n - 1)) == 0
156
157 assert LINE_SIZE % ROW_SIZE == 0
158 assert ispow2(LINE_SIZE), "LINE_SIZE not power of 2"
159 assert ispow2(NUM_LINES), "NUM_LINES not power of 2"
160 assert ispow2(ROW_PER_LINE), "ROW_PER_LINE not power of 2"
161 assert ispow2(INSN_PER_ROW), "INSN_PER_ROW not power of 2"
162 assert (ROW_BITS == (INDEX_BITS + ROW_LINE_BITS)), \
163 "geometry bits don't add up"
164 assert (LINE_OFF_BITS == (ROW_OFF_BITS + ROW_LINE_BITS)), \
165 "geometry bits don't add up"
166 assert (REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS + LINE_OFF_BITS)), \
167 "geometry bits don't add up"
168 assert (REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS)), \
169 "geometry bits don't add up"
170
171 # Example of layout for 32 lines of 64 bytes:
172 #
173 # .. tag |index| line |
174 # .. | row | |
175 # .. | | | |00| zero (2)
176 # .. | | |-| | INSN_BITS (1)
177 # .. | |---| | ROW_LINE_BITS (3)
178 # .. | |--- - --| LINE_OFF_BITS (6)
179 # .. | |- --| ROW_OFF_BITS (3)
180 # .. |----- ---| | ROW_BITS (8)
181 # .. |-----| | INDEX_BITS (5)
182 # .. --------| | TAG_BITS (53)
183
184 # The cache data BRAM organized as described above for each way
185 #subtype cache_row_t is std_ulogic_vector(ROW_SIZE_BITS-1 downto 0);
186 #
187 # The cache tags LUTRAM has a row per set. Vivado is a pain and will
188 # not handle a clean (commented) definition of the cache tags as a 3d
189 # memory. For now, work around it by putting all the tags
190 def CacheTagArray():
191 tag_layout = [('valid', NUM_WAYS),
192 ('tag', TAG_RAM_WIDTH),
193 ]
194 return Array(Record(tag_layout, name="tag%d" % x) for x in range(NUM_LINES))
195
196 def RowPerLineValidArray():
197 return Array(Signal(name="rows_valid_%d" %x) \
198 for x in range(ROW_PER_LINE))
199
200
201 # TODO to be passed to nigmen as ram attributes
202 # attribute ram_style : string;
203 # attribute ram_style of cache_tags : signal is "distributed";
204
205 def TLBValidArray():
206 return Array(Signal(name="tlb_valid%d" % x)
207 for x in range(TLB_SIZE))
208
209 def TLBArray():
210 tlb_layout = [
211 ('tag', TLB_EA_TAG_BITS),
212 ('pte', TLB_PTE_BITS)
213 ]
214 return Array(Record(tlb_layout, name="tlb%d" % x) for x in range(TLB_SIZE))
215
216 # PLRU output interface
217 def PLRUOut():
218 return Array(Signal(WAY_BITS, name="plru_out_%d" %x) \
219 for x in range(NUM_LINES))
220
221 # Return the cache line index (tag index) for an address
222 def get_index(addr):
223 return addr[LINE_OFF_BITS:SET_SIZE_BITS]
224
225 # Return the cache row index (data memory) for an address
226 def get_row(addr):
227 return addr[ROW_OFF_BITS:SET_SIZE_BITS]
228
229 # Return the index of a row within a line
230 def get_row_of_line(row):
231 return row[:ROW_BITS][:ROW_LINE_BITS]
232
233 # Returns whether this is the last row of a line
234 def is_last_row_addr(addr, last):
235 return addr[ROW_OFF_BITS:LINE_OFF_BITS] == last
236
237 # Returns whether this is the last row of a line
238 def is_last_row(row, last):
239 return get_row_of_line(row) == last
240
241 # Return the next row in the current cache line. We use a dedicated
242 # function in order to limit the size of the generated adder to be
243 # only the bits within a cache line (3 bits with default settings)
244 def next_row(row):
245 row_v = row[0:ROW_LINE_BITS] + 1
246 return Cat(row_v[:ROW_LINE_BITS], row[ROW_LINE_BITS:])
247
248 # Read the instruction word for the given address
249 # in the current cache row
250 def read_insn_word(addr, data):
251 word = addr[2:INSN_BITS+2]
252 return data.word_select(word, 32)
253
254 # Get the tag value from the address
255 def get_tag(addr):
256 return addr[SET_SIZE_BITS:REAL_ADDR_BITS]
257
258 # Read a tag from a tag memory row
259 def read_tag(way, tagset):
260 return tagset.word_select(way, TAG_WIDTH)[:TAG_BITS]
261
262 # Write a tag to tag memory row
263 def write_tag(way, tagset, tag):
264 return read_tag(way, tagset).eq(tag)
265
266 # Simple hash for direct-mapped TLB index
267 def hash_ea(addr):
268 hsh = (addr[TLB_LG_PGSZ:TLB_LG_PGSZ + TLB_BITS] ^
269 addr[TLB_LG_PGSZ + TLB_BITS:TLB_LG_PGSZ + 2 * TLB_BITS ] ^
270 addr[TLB_LG_PGSZ + 2 * TLB_BITS:TLB_LG_PGSZ + 3 * TLB_BITS])
271 return hsh
272
273
274 # Cache reload state machine
275 @unique
276 class State(Enum):
277 IDLE = 0
278 CLR_TAG = 1
279 WAIT_ACK = 2
280
281
282 class RegInternal(RecordObject):
283 def __init__(self):
284 super().__init__()
285 # Cache hit state (Latches for 1 cycle BRAM access)
286 self.hit_way = Signal(WAY_BITS)
287 self.hit_nia = Signal(64)
288 self.hit_smark = Signal()
289 self.hit_valid = Signal()
290
291 # Cache miss state (reload state machine)
292 self.state = Signal(State, reset=State.IDLE)
293 self.wb = WBMasterOut("wb")
294 self.req_adr = Signal(64)
295 self.store_way = Signal(WAY_BITS)
296 self.store_index = Signal(INDEX_BITS)
297 self.store_row = Signal(ROW_BITS)
298 self.store_tag = Signal(TAG_BITS)
299 self.store_valid = Signal()
300 self.end_row_ix = Signal(ROW_LINE_BITS)
301 self.rows_valid = RowPerLineValidArray()
302
303 # TLB miss state
304 self.fetch_failed = Signal()
305
306
307 class ICache(FetchUnitInterface, Elaboratable):
308 """64 bit direct mapped icache. All instructions are 4B aligned."""
309 def __init__(self, pspec):
310 FetchUnitInterface.__init__(self, pspec)
311 self.i_in = Fetch1ToICacheType(name="i_in")
312 self.i_out = ICacheToDecode1Type(name="i_out")
313
314 self.m_in = MMUToICacheType(name="m_in")
315
316 self.stall_in = Signal()
317 self.stall_out = Signal()
318 self.flush_in = Signal()
319 self.inval_in = Signal()
320
321 # standard naming (wired to non-standard for compatibility)
322 self.bus = Interface(addr_width=32,
323 data_width=64,
324 granularity=8,
325 features={'stall'},
326 alignment=0,
327 name="icache_wb")
328
329 self.log_out = Signal(54)
330
331 # use FetchUnitInterface, helps keep some unit tests running
332 self.use_fetch_iface = False
333
334 def use_fetch_interface(self):
335 self.use_fetch_iface = True
336
337 # Generate a cache RAM for each way
338 def rams(self, m, r, cache_out_row, use_previous,
339 replace_way, req_row):
340
341 comb = m.d.comb
342 sync = m.d.sync
343
344 bus, stall_in = self.bus, self.stall_in
345
346 # read condition (for every cache ram)
347 do_read = Signal()
348 comb += do_read.eq(~(stall_in | use_previous))
349
350 rd_addr = Signal(ROW_BITS)
351 wr_addr = Signal(ROW_BITS)
352 comb += rd_addr.eq(req_row)
353 comb += wr_addr.eq(r.store_row)
354
355 # binary-to-unary converters: replace-way enabled by bus.ack,
356 # hit-way left permanently enabled
357 m.submodules.replace_way_e = re = Decoder(NUM_WAYS)
358 m.submodules.hit_way_e = he = Decoder(NUM_WAYS)
359 comb += re.i.eq(replace_way)
360 comb += re.n.eq(~bus.ack)
361 comb += he.i.eq(r.hit_way)
362
363 for i in range(NUM_WAYS):
364 do_write = Signal(name="do_wr_%d" % i)
365 d_out = Signal(ROW_SIZE_BITS, name="d_out_%d" % i)
366 wr_sel = Signal(ROW_SIZE, name="wr_sel_%d" % i)
367
368 way = CacheRam(ROW_BITS, ROW_SIZE_BITS, TRACE=True, ram_num=i)
369 m.submodules["cacheram_%d" % i] = way
370
371 comb += way.rd_en.eq(do_read)
372 comb += way.rd_addr.eq(rd_addr)
373 comb += d_out.eq(way.rd_data_o)
374 comb += way.wr_sel.eq(wr_sel)
375 comb += way.wr_addr.eq(wr_addr)
376 comb += way.wr_data.eq(bus.dat_r)
377
378 comb += do_write.eq(re.o[i])
379
380 with m.If(do_write):
381 sync += Display("cache write adr: %x data: %lx",
382 wr_addr, way.wr_data)
383
384 with m.If(he.o[i]):
385 comb += cache_out_row.eq(d_out)
386 with m.If(do_read):
387 sync += Display("cache read adr: %x data: %x",
388 req_row, d_out)
389
390 comb += wr_sel.eq(Repl(do_write, ROW_SIZE))
391
392 # Generate PLRUs
393 def maybe_plrus(self, m, r, plru_victim):
394 comb = m.d.comb
395
396 if NUM_WAYS == 0:
397 return
398
399
400 m.submodules.plrus = plru = PLRUs(NUM_LINES, WAY_BITS)
401 comb += plru.way.eq(r.hit_way)
402 comb += plru.valid.eq(r.hit_valid)
403 comb += plru.index.eq(get_index(r.hit_nia))
404 comb += plru.isel.eq(r.store_index) # select victim
405 comb += plru_victim.eq(plru.o_index) # selected victim
406
407 # TLB hit detection and real address generation
408 def itlb_lookup(self, m, tlb_req_index, itlb, itlb_valid,
409 real_addr, ra_valid, eaa_priv,
410 priv_fault, access_ok):
411
412 comb = m.d.comb
413
414 i_in = self.i_in
415
416 pte = Signal(TLB_PTE_BITS)
417 ttag = Signal(TLB_EA_TAG_BITS)
418
419 comb += tlb_req_index.eq(hash_ea(i_in.nia))
420 comb += pte.eq(itlb[tlb_req_index].pte)
421 comb += ttag.eq(itlb[tlb_req_index].tag)
422
423 with m.If(i_in.virt_mode):
424 comb += real_addr.eq(Cat(i_in.nia[:TLB_LG_PGSZ],
425 pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
426
427 with m.If(ttag == i_in.nia[TLB_LG_PGSZ + TLB_BITS:64]):
428 comb += ra_valid.eq(itlb_valid[tlb_req_index])
429
430 comb += eaa_priv.eq(pte[3])
431
432 with m.Else():
433 comb += real_addr.eq(i_in.nia[:REAL_ADDR_BITS])
434 comb += ra_valid.eq(1)
435 comb += eaa_priv.eq(1)
436
437 # No IAMR, so no KUEP support for now
438 comb += priv_fault.eq(eaa_priv & ~i_in.priv_mode)
439 comb += access_ok.eq(ra_valid & ~priv_fault)
440
441 # iTLB update
442 def itlb_update(self, m, itlb, itlb_valid):
443 comb = m.d.comb
444 sync = m.d.sync
445
446 m_in = self.m_in
447
448 wr_index = Signal(TLB_SIZE)
449 comb += wr_index.eq(hash_ea(m_in.addr))
450
451 with m.If(m_in.tlbie & m_in.doall):
452 # Clear all valid bits
453 for i in range(TLB_SIZE):
454 sync += itlb_valid[i].eq(0)
455
456 with m.Elif(m_in.tlbie):
457 # Clear entry regardless of hit or miss
458 sync += itlb_valid[wr_index].eq(0)
459
460 with m.Elif(m_in.tlbld):
461 sync += itlb[wr_index].tag.eq(m_in.addr[TLB_LG_PGSZ + TLB_BITS:64])
462 sync += itlb[wr_index].pte.eq(m_in.pte)
463 sync += itlb_valid[wr_index].eq(1)
464
465 # Cache hit detection, output to fetch2 and other misc logic
466 def icache_comb(self, m, use_previous, r, req_index, req_row,
467 req_hit_way, req_tag, real_addr, req_laddr,
468 cache_tags, access_ok,
469 req_is_hit, req_is_miss, replace_way,
470 plru_victim, cache_out_row):
471
472 comb = m.d.comb
473
474 i_in, i_out, bus = self.i_in, self.i_out, self.bus
475 flush_in, stall_out = self.flush_in, self.stall_out
476
477 is_hit = Signal()
478 hit_way = Signal(WAY_BITS)
479
480 # i_in.sequential means that i_in.nia this cycle is 4 more than
481 # last cycle. If we read more than 32 bits at a time, had a
482 # cache hit last cycle, and we don't want the first 32-bit chunk
483 # then we can keep the data we read last cycle and just use that.
484 with m.If(i_in.nia[2:INSN_BITS+2] != 0):
485 comb += use_previous.eq(i_in.sequential & r.hit_valid)
486
487 # Extract line, row and tag from request
488 comb += req_index.eq(get_index(i_in.nia))
489 comb += req_row.eq(get_row(i_in.nia))
490 comb += req_tag.eq(get_tag(real_addr))
491
492 # Calculate address of beginning of cache row, will be
493 # used for cache miss processing if needed
494 comb += req_laddr.eq(Cat(
495 Const(0, ROW_OFF_BITS),
496 real_addr[ROW_OFF_BITS:REAL_ADDR_BITS],
497 ))
498
499 # Test if pending request is a hit on any way
500 hitcond = Signal()
501 comb += hitcond.eq((r.state == State.WAIT_ACK)
502 & (req_index == r.store_index)
503 & r.rows_valid[req_row % ROW_PER_LINE]
504 )
505 # i_in.req asserts Decoder active
506 cvb = Signal(NUM_WAYS)
507 ctag = Signal(TAG_RAM_WIDTH)
508 comb += ctag.eq(cache_tags[req_index].tag)
509 comb += cvb.eq(cache_tags[req_index].valid)
510 m.submodules.store_way_e = se = Decoder(NUM_WAYS)
511 comb += se.i.eq(r.store_way)
512 comb += se.n.eq(~i_in.req)
513 for i in range(NUM_WAYS):
514 tagi = Signal(TAG_BITS, name="tag_i%d" % i)
515 hit_test = Signal(name="hit_test%d" % i)
516 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
517 comb += tagi.eq(read_tag(i, ctag))
518 comb += hit_test.eq(se.o[i])
519 comb += is_tag_hit.eq((cvb[i] | (hitcond & hit_test)) &
520 (tagi == req_tag))
521 with m.If(is_tag_hit):
522 comb += hit_way.eq(i)
523 comb += is_hit.eq(1)
524
525 # Generate the "hit" and "miss" signals
526 # for the synchronous blocks
527 with m.If(i_in.req & access_ok & ~flush_in):
528 comb += req_is_hit.eq(is_hit)
529 comb += req_is_miss.eq(~is_hit)
530
531 comb += req_hit_way.eq(hit_way)
532
533 # The way to replace on a miss
534 with m.If(r.state == State.CLR_TAG):
535 comb += replace_way.eq(plru_victim)
536 with m.Else():
537 comb += replace_way.eq(r.store_way)
538
539 # Output instruction from current cache row
540 #
541 # Note: This is a mild violation of our design principle of
542 # having pipeline stages output from a clean latch. In this
543 # case we output the result of a mux. The alternative would
544 # be output an entire row which I prefer not to do just yet
545 # as it would force fetch2 to know about some of the cache
546 # geometry information.
547 comb += i_out.insn.eq(read_insn_word(r.hit_nia, cache_out_row))
548 comb += i_out.valid.eq(r.hit_valid)
549 comb += i_out.nia.eq(r.hit_nia)
550 comb += i_out.stop_mark.eq(r.hit_smark)
551 comb += i_out.fetch_failed.eq(r.fetch_failed)
552
553 # Stall fetch1 if we have a miss on cache or TLB
554 # or a protection fault
555 comb += stall_out.eq(~(is_hit & access_ok))
556
557 # Wishbone requests output (from the cache miss reload machine)
558 comb += bus.we.eq(r.wb.we)
559 comb += bus.adr.eq(r.wb.adr)
560 comb += bus.sel.eq(r.wb.sel)
561 comb += bus.stb.eq(r.wb.stb)
562 comb += bus.dat_w.eq(r.wb.dat)
563 comb += bus.cyc.eq(r.wb.cyc)
564
565 # Cache hit synchronous machine
566 def icache_hit(self, m, use_previous, r, req_is_hit, req_hit_way,
567 req_index, req_tag, real_addr):
568 sync = m.d.sync
569
570 i_in, stall_in = self.i_in, self.stall_in
571 flush_in = self.flush_in
572
573 # keep outputs to fetch2 unchanged on a stall
574 # except that flush or reset sets valid to 0
575 # If use_previous, keep the same data as last
576 # cycle and use the second half
577 with m.If(stall_in | use_previous):
578 with m.If(flush_in):
579 sync += r.hit_valid.eq(0)
580 with m.Else():
581 # On a hit, latch the request for the next cycle,
582 # when the BRAM data will be available on the
583 # cache_out output of the corresponding way
584 sync += r.hit_valid.eq(req_is_hit)
585
586 with m.If(req_is_hit):
587 sync += r.hit_way.eq(req_hit_way)
588 sync += Display("cache hit nia:%x IR:%x SM:%x idx:%x tag:%x "
589 "way:%x RA:%x", i_in.nia, i_in.virt_mode,
590 i_in.stop_mark, req_index, req_tag,
591 req_hit_way, real_addr)
592
593 with m.If(~stall_in):
594 # Send stop marks and NIA down regardless of validity
595 sync += r.hit_smark.eq(i_in.stop_mark)
596 sync += r.hit_nia.eq(i_in.nia)
597
598 def icache_miss_idle(self, m, r, req_is_miss, req_laddr,
599 req_index, req_tag, replace_way, real_addr):
600 comb = m.d.comb
601 sync = m.d.sync
602
603 i_in = self.i_in
604
605 # Reset per-row valid flags, only used in WAIT_ACK
606 for i in range(ROW_PER_LINE):
607 sync += r.rows_valid[i].eq(0)
608
609 # We need to read a cache line
610 with m.If(req_is_miss):
611 sync += Display(
612 "cache miss nia:%x IR:%x SM:%x idx:%x "
613 " way:%x tag:%x RA:%x", i_in.nia,
614 i_in.virt_mode, i_in.stop_mark, req_index,
615 replace_way, req_tag, real_addr)
616
617 # Keep track of our index and way for subsequent stores
618 st_row = Signal(ROW_BITS)
619 comb += st_row.eq(get_row(req_laddr))
620 sync += r.store_index.eq(req_index)
621 sync += r.store_row.eq(st_row)
622 sync += r.store_tag.eq(req_tag)
623 sync += r.store_valid.eq(1)
624 sync += r.end_row_ix.eq(get_row_of_line(st_row) - 1)
625
626 # Prep for first wishbone read. We calculate the address
627 # of the start of the cache line and start the WB cycle.
628 sync += r.req_adr.eq(req_laddr)
629 sync += r.wb.cyc.eq(1)
630 sync += r.wb.stb.eq(1)
631
632 # Track that we had one request sent
633 sync += r.state.eq(State.CLR_TAG)
634
635 def icache_miss_clr_tag(self, m, r, replace_way,
636 req_index,
637 tagset, cache_tags):
638 comb = m.d.comb
639 sync = m.d.sync
640
641 # Get victim way from plru
642 sync += r.store_way.eq(replace_way)
643
644 # Force misses on that way while reloading that line
645 cv = Signal(INDEX_BITS)
646 comb += cv.eq(cache_tags[req_index].valid)
647 comb += cv.bit_select(replace_way, 1).eq(0)
648 sync += cache_tags[req_index].valid.eq(cv)
649
650 for i in range(NUM_WAYS):
651 with m.If(i == replace_way):
652 comb += tagset.eq(cache_tags[r.store_index].tag)
653 comb += write_tag(i, tagset, r.store_tag)
654 sync += cache_tags[r.store_index].tag.eq(tagset)
655
656 sync += r.state.eq(State.WAIT_ACK)
657
658 def icache_miss_wait_ack(self, m, r, replace_way, inval_in,
659 cache_tags, stbs_done):
660 comb = m.d.comb
661 sync = m.d.sync
662
663 bus = self.bus
664
665 # Requests are all sent if stb is 0
666 stbs_zero = Signal()
667 comb += stbs_zero.eq(r.wb.stb == 0)
668 comb += stbs_done.eq(stbs_zero)
669
670 # If we are still sending requests, was one accepted?
671 with m.If(~bus.stall & ~stbs_zero):
672 # That was the last word? We are done sending.
673 # Clear stb and set stbs_done so we can handle
674 # an eventual last ack on the same cycle.
675 with m.If(is_last_row_addr(r.req_adr, r.end_row_ix)):
676 sync += Display("IS_LAST_ROW_ADDR r.wb.addr:%x "
677 "r.end_row_ix:%x r.wb.stb:%x stbs_zero:%x "
678 "stbs_done:%x", r.wb.adr, r.end_row_ix,
679 r.wb.stb, stbs_zero, stbs_done)
680 sync += r.wb.stb.eq(0)
681 comb += stbs_done.eq(1)
682
683 # Calculate the next row address
684 rarange = Signal(LINE_OFF_BITS - ROW_OFF_BITS)
685 comb += rarange.eq(r.req_adr[ROW_OFF_BITS:LINE_OFF_BITS] + 1)
686 sync += r.req_adr[ROW_OFF_BITS:LINE_OFF_BITS].eq(rarange)
687 sync += Display("RARANGE r.req_adr:%x rarange:%x "
688 "stbs_zero:%x stbs_done:%x",
689 r.req_adr, rarange, stbs_zero, stbs_done)
690
691 # Incoming acks processing
692 with m.If(bus.ack):
693 sync += Display("WB_IN_ACK data:%x stbs_zero:%x "
694 "stbs_done:%x",
695 bus.dat_r, stbs_zero, stbs_done)
696
697 sync += r.rows_valid[r.store_row % ROW_PER_LINE].eq(1)
698
699 # Check for completion
700 with m.If(stbs_done & is_last_row(r.store_row, r.end_row_ix)):
701 # Complete wishbone cycle
702 sync += r.wb.cyc.eq(0)
703 # be nice, clear addr
704 sync += r.req_adr.eq(0)
705
706 # Cache line is now valid
707 cv = Signal(INDEX_BITS)
708 comb += cv.eq(cache_tags[r.store_index].valid)
709 comb += cv.bit_select(replace_way, 1).eq(
710 r.store_valid & ~inval_in)
711 sync += cache_tags[r.store_index].valid.eq(cv)
712
713 sync += r.state.eq(State.IDLE)
714
715 # move on to next request in row
716 # Increment store row counter
717 sync += r.store_row.eq(next_row(r.store_row))
718
719 # Cache miss/reload synchronous machine
720 def icache_miss(self, m, r, req_is_miss,
721 req_index, req_laddr, req_tag, replace_way,
722 cache_tags, access_ok, real_addr):
723 comb = m.d.comb
724 sync = m.d.sync
725
726 i_in, bus, m_in = self.i_in, self.bus, self.m_in
727 stall_in, flush_in = self.stall_in, self.flush_in
728 inval_in = self.inval_in
729
730 tagset = Signal(TAG_RAM_WIDTH)
731 stbs_done = Signal()
732
733 comb += r.wb.sel.eq(-1)
734 comb += r.wb.adr.eq(r.req_adr[3:])
735
736 # Process cache invalidations
737 with m.If(inval_in):
738 for i in range(NUM_LINES):
739 sync += cache_tags[i].valid.eq(0)
740 sync += r.store_valid.eq(0)
741
742 # Main state machine
743 with m.Switch(r.state):
744
745 with m.Case(State.IDLE):
746 self.icache_miss_idle(m, r, req_is_miss, req_laddr,
747 req_index, req_tag, replace_way,
748 real_addr)
749
750 with m.Case(State.CLR_TAG, State.WAIT_ACK):
751 with m.If(r.state == State.CLR_TAG):
752 self.icache_miss_clr_tag(m, r, replace_way,
753 req_index, tagset, cache_tags)
754
755 self.icache_miss_wait_ack(m, r, replace_way, inval_in,
756 cache_tags, stbs_done)
757
758 # TLB miss and protection fault processing
759 with m.If(flush_in | m_in.tlbld):
760 sync += r.fetch_failed.eq(0)
761 with m.Elif(i_in.req & ~access_ok & ~stall_in):
762 sync += r.fetch_failed.eq(1)
763
764 # icache_log: if LOG_LENGTH > 0 generate
765 def icache_log(self, m, req_hit_way, ra_valid, access_ok,
766 req_is_miss, req_is_hit, lway, wstate, r):
767 comb = m.d.comb
768 sync = m.d.sync
769
770 bus, i_out = self.bus, self.i_out
771 log_out, stall_out = self.log_out, self.stall_out
772
773 # Output data to logger
774 for i in range(LOG_LENGTH):
775 log_data = Signal(54)
776 lway = Signal(WAY_BITS)
777 wstate = Signal()
778
779 sync += lway.eq(req_hit_way)
780 sync += wstate.eq(0)
781
782 with m.If(r.state != State.IDLE):
783 sync += wstate.eq(1)
784
785 sync += log_data.eq(Cat(
786 ra_valid, access_ok, req_is_miss, req_is_hit,
787 lway, wstate, r.hit_nia[2:6], r.fetch_failed,
788 stall_out, bus.stall, r.wb.cyc, r.wb.stb,
789 r.real_addr[3:6], bus.ack, i_out.insn, i_out.valid
790 ))
791 comb += log_out.eq(log_data)
792
793 def elaborate(self, platform):
794
795 m = Module()
796 comb = m.d.comb
797
798 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
799 cache_tags = CacheTagArray()
800
801 # TLB Array
802 itlb = TLBArray()
803 itlb_valid = TLBValidArray()
804
805 # TODO to be passed to nmigen as ram attributes
806 # attribute ram_style of itlb_tags : signal is "distributed";
807 # attribute ram_style of itlb_ptes : signal is "distributed";
808
809 # Privilege bit from PTE EAA field
810 eaa_priv = Signal()
811
812 r = RegInternal()
813
814 # Async signal on incoming request
815 req_index = Signal(INDEX_BITS)
816 req_row = Signal(ROW_BITS)
817 req_hit_way = Signal(WAY_BITS)
818 req_tag = Signal(TAG_BITS)
819 req_is_hit = Signal()
820 req_is_miss = Signal()
821 req_laddr = Signal(64)
822
823 tlb_req_index = Signal(TLB_BITS)
824 real_addr = Signal(REAL_ADDR_BITS)
825 ra_valid = Signal()
826 priv_fault = Signal()
827 access_ok = Signal()
828 use_previous = Signal()
829
830 cache_out_row = Signal(ROW_SIZE_BITS)
831
832 plru_victim = Signal(WAY_BITS)
833 replace_way = Signal(WAY_BITS)
834
835 # call sub-functions putting everything together,
836 # using shared signals established above
837 self.rams(m, r, cache_out_row, use_previous, replace_way, req_row)
838 self.maybe_plrus(m, r, plru_victim)
839 self.itlb_lookup(m, tlb_req_index, itlb, itlb_valid, real_addr,
840 ra_valid, eaa_priv, priv_fault,
841 access_ok)
842 self.itlb_update(m, itlb, itlb_valid)
843 self.icache_comb(m, use_previous, r, req_index, req_row, req_hit_way,
844 req_tag, real_addr, req_laddr,
845 cache_tags, access_ok, req_is_hit, req_is_miss,
846 replace_way, plru_victim, cache_out_row)
847 self.icache_hit(m, use_previous, r, req_is_hit, req_hit_way,
848 req_index, req_tag, real_addr)
849 self.icache_miss(m, r, req_is_miss, req_index,
850 req_laddr, req_tag, replace_way, cache_tags,
851 access_ok, real_addr)
852 #self.icache_log(m, log_out, req_hit_way, ra_valid, access_ok,
853 # req_is_miss, req_is_hit, lway, wstate, r)
854
855 # don't connect up to FetchUnitInterface so that some unit tests
856 # can continue to operate
857 if not self.use_fetch_iface:
858 return m
859
860 # connect to FetchUnitInterface. FetchUnitInterface is undocumented
861 # so needs checking and iterative revising
862 i_in, bus, i_out = self.i_in, self.bus, self.i_out
863 comb += i_in.req.eq(self.a_i_valid)
864 comb += i_in.nia.eq(self.a_pc_i)
865 comb += self.stall_in.eq(self.a_stall_i)
866 comb += self.f_fetch_err_o.eq(i_out.fetch_failed)
867 comb += self.f_badaddr_o.eq(i_out.nia)
868 comb += self.f_instr_o.eq(i_out.insn)
869 comb += self.f_busy_o.eq(~i_out.valid) # probably
870
871 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
872 ibus = self.ibus
873 comb += ibus.adr.eq(self.bus.adr)
874 comb += ibus.dat_w.eq(self.bus.dat_w)
875 comb += ibus.sel.eq(self.bus.sel)
876 comb += ibus.cyc.eq(self.bus.cyc)
877 comb += ibus.stb.eq(self.bus.stb)
878 comb += ibus.we.eq(self.bus.we)
879
880 comb += self.bus.dat_r.eq(ibus.dat_r)
881 comb += self.bus.ack.eq(ibus.ack)
882 if hasattr(ibus, "stall"):
883 comb += self.bus.stall.eq(ibus.stall)
884 else:
885 # fake-up the wishbone stall signal to comply with pipeline mode
886 # same thing is done in dcache.py
887 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
888
889 return m
890
891
892 def icache_sim(dut):
893 i_in = dut.i_in
894 i_out = dut.i_out
895 m_out = dut.m_in
896
897 yield i_in.priv_mode.eq(1)
898 yield i_in.req.eq(0)
899 yield i_in.nia.eq(0)
900 yield i_in.stop_mark.eq(0)
901 yield m_out.tlbld.eq(0)
902 yield m_out.tlbie.eq(0)
903 yield m_out.addr.eq(0)
904 yield m_out.pte.eq(0)
905 yield
906 yield
907 yield
908 yield
909
910 # miss, stalls for a bit
911 yield i_in.req.eq(1)
912 yield i_in.nia.eq(Const(0x0000000000000004, 64))
913 yield
914 valid = yield i_out.valid
915 while not valid:
916 yield
917 valid = yield i_out.valid
918 yield i_in.req.eq(0)
919
920 insn = yield i_out.insn
921 nia = yield i_out.nia
922 assert insn == 0x00000001, \
923 "insn @%x=%x expected 00000001" % (nia, insn)
924 yield i_in.req.eq(0)
925 yield
926
927 # hit
928 yield i_in.req.eq(1)
929 yield i_in.nia.eq(Const(0x0000000000000008, 64))
930 yield
931 valid = yield i_out.valid
932 while not valid:
933 yield
934 valid = yield i_out.valid
935 yield i_in.req.eq(0)
936
937 nia = yield i_out.nia
938 insn = yield i_out.insn
939 yield
940 assert insn == 0x00000002, \
941 "insn @%x=%x expected 00000002" % (nia, insn)
942
943 # another miss
944 yield i_in.req.eq(1)
945 yield i_in.nia.eq(Const(0x0000000000000040, 64))
946 yield
947 valid = yield i_out.valid
948 while not valid:
949 yield
950 valid = yield i_out.valid
951 yield i_in.req.eq(0)
952
953 nia = yield i_in.nia
954 insn = yield i_out.insn
955 assert insn == 0x00000010, \
956 "insn @%x=%x expected 00000010" % (nia, insn)
957
958 # test something that aliases (this only works because
959 # the unit test SRAM is a depth of 512)
960 yield i_in.req.eq(1)
961 yield i_in.nia.eq(Const(0x0000000000000100, 64))
962 yield
963 yield
964 valid = yield i_out.valid
965 assert ~valid
966 for i in range(30):
967 yield
968 yield
969 insn = yield i_out.insn
970 valid = yield i_out.valid
971 insn = yield i_out.insn
972 assert valid
973 assert insn == 0x00000040, \
974 "insn @%x=%x expected 00000040" % (nia, insn)
975 yield i_in.req.eq(0)
976
977
978 def test_icache(mem):
979 from soc.config.test.test_loadstore import TestMemPspec
980 pspec = TestMemPspec(addr_wid=32,
981 mask_wid=8,
982 reg_wid=64,
983 )
984 dut = ICache(pspec)
985
986 memory = Memory(width=64, depth=512, init=mem)
987 sram = SRAM(memory=memory, granularity=8)
988
989 m = Module()
990
991 m.submodules.icache = dut
992 m.submodules.sram = sram
993
994 m.d.comb += sram.bus.cyc.eq(dut.bus.cyc)
995 m.d.comb += sram.bus.stb.eq(dut.bus.stb)
996 m.d.comb += sram.bus.we.eq(dut.bus.we)
997 m.d.comb += sram.bus.sel.eq(dut.bus.sel)
998 m.d.comb += sram.bus.adr.eq(dut.bus.adr)
999 m.d.comb += sram.bus.dat_w.eq(dut.bus.dat_w)
1000
1001 m.d.comb += dut.bus.ack.eq(sram.bus.ack)
1002 m.d.comb += dut.bus.dat_r.eq(sram.bus.dat_r)
1003
1004 # nmigen Simulation
1005 sim = Simulator(m)
1006 sim.add_clock(1e-6)
1007
1008 sim.add_sync_process(wrap(icache_sim(dut)))
1009 with sim.write_vcd('test_icache.vcd'):
1010 sim.run()
1011
1012
1013 if __name__ == '__main__':
1014 from soc.config.test.test_loadstore import TestMemPspec
1015 pspec = TestMemPspec(addr_wid=64,
1016 mask_wid=8,
1017 reg_wid=64,
1018 )
1019 dut = ICache(pspec)
1020 vl = rtlil.convert(dut, ports=[])
1021 with open("test_icache.il", "w") as f:
1022 f.write(vl)
1023
1024 # set up memory every 32-bits with incrementing values 0 1 2 ...
1025 mem = []
1026 for i in range(512):
1027 mem.append((i*2) | ((i*2+1)<<32))
1028
1029 test_icache(mem)