use Decoder (binary-to-unary) in icache.py to deal with CAM creation
[soc.git] / src / soc / experiment / icache.py
1 """ICache
2
3 based on Anton Blanchard microwatt icache.vhdl
4
5 Set associative icache
6
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
20 """
21
22 from enum import (Enum, unique)
23 from nmigen import (Module, Signal, Elaboratable, Cat, Array, Const, Repl,
24 Record)
25 from nmigen.cli import main, rtlil
26 from nmutil.iocontrol import RecordObject
27 from nmigen.utils import log2_int
28 from nmigen.lib.coding import Decoder
29 from nmutil.util import Display
30
31 #from nmutil.plru import PLRU
32 from soc.experiment.cache_ram import CacheRam
33 from soc.experiment.plru import PLRU
34
35 from soc.experiment.mem_types import (Fetch1ToICacheType,
36 ICacheToDecode1Type,
37 MMUToICacheType)
38
39 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS,
40 WB_SEL_BITS, WBAddrType, WBDataType,
41 WBSelType, WBMasterOut, WBSlaveOut,
42 )
43
44 from nmigen_soc.wishbone.bus import Interface
45
46 # for test
47 from soc.bus.sram import SRAM
48 from nmigen import Memory
49 from nmutil.util import wrap
50 from nmigen.cli import main, rtlil
51
52 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
53 # Also, check out the cxxsim nmigen branch, and latest yosys from git
54 from nmutil.sim_tmp_alternative import Simulator, Settle
55
56
57 SIM = 0
58 LINE_SIZE = 64
59 # BRAM organisation: We never access more than wishbone_data_bits
60 # at a time so to save resources we make the array only that wide,
61 # and use consecutive indices for to make a cache "line"
62 #
63 # ROW_SIZE is the width in bytes of the BRAM (based on WB, so 64-bits)
64 ROW_SIZE = WB_DATA_BITS // 8
65 # Number of lines in a set
66 NUM_LINES = 16
67 # Number of ways
68 NUM_WAYS = 4
69 # L1 ITLB number of entries (direct mapped)
70 TLB_SIZE = 64
71 # L1 ITLB log_2(page_size)
72 TLB_LG_PGSZ = 12
73 # Number of real address bits that we store
74 REAL_ADDR_BITS = 56
75 # Non-zero to enable log data collection
76 LOG_LENGTH = 0
77
78 ROW_SIZE_BITS = ROW_SIZE * 8
79 # ROW_PER_LINE is the number of row (wishbone) transactions in a line
80 ROW_PER_LINE = LINE_SIZE // ROW_SIZE
81 # BRAM_ROWS is the number of rows in BRAM needed to represent the full icache
82 BRAM_ROWS = NUM_LINES * ROW_PER_LINE
83 # INSN_PER_ROW is the number of 32bit instructions per BRAM row
84 INSN_PER_ROW = ROW_SIZE_BITS // 32
85
86 # Bit fields counts in the address
87 #
88 # INSN_BITS is the number of bits to select an instruction in a row
89 INSN_BITS = log2_int(INSN_PER_ROW)
90 # ROW_BITS is the number of bits to select a row
91 ROW_BITS = log2_int(BRAM_ROWS)
92 # ROW_LINE_BITS is the number of bits to select a row within a line
93 ROW_LINE_BITS = log2_int(ROW_PER_LINE)
94 # LINE_OFF_BITS is the number of bits for the offset in a cache line
95 LINE_OFF_BITS = log2_int(LINE_SIZE)
96 # ROW_OFF_BITS is the number of bits for the offset in a row
97 ROW_OFF_BITS = log2_int(ROW_SIZE)
98 # INDEX_BITS is the number of bits to select a cache line
99 INDEX_BITS = log2_int(NUM_LINES)
100 # SET_SIZE_BITS is the log base 2 of the set size
101 SET_SIZE_BITS = LINE_OFF_BITS + INDEX_BITS
102 # TAG_BITS is the number of bits of the tag part of the address
103 TAG_BITS = REAL_ADDR_BITS - SET_SIZE_BITS
104 # TAG_WIDTH is the width in bits of each way of the tag RAM
105 TAG_WIDTH = TAG_BITS + 7 - ((TAG_BITS + 7) % 8)
106
107 # WAY_BITS is the number of bits to select a way
108 WAY_BITS = log2_int(NUM_WAYS)
109 TAG_RAM_WIDTH = TAG_BITS * NUM_WAYS
110
111 # L1 ITLB
112 TLB_BITS = log2_int(TLB_SIZE)
113 TLB_EA_TAG_BITS = 64 - (TLB_LG_PGSZ + TLB_BITS)
114 TLB_PTE_BITS = 64
115
116 print("BRAM_ROWS =", BRAM_ROWS)
117 print("INDEX_BITS =", INDEX_BITS)
118 print("INSN_BITS =", INSN_BITS)
119 print("INSN_PER_ROW =", INSN_PER_ROW)
120 print("LINE_SIZE =", LINE_SIZE)
121 print("LINE_OFF_BITS =", LINE_OFF_BITS)
122 print("LOG_LENGTH =", LOG_LENGTH)
123 print("NUM_LINES =", NUM_LINES)
124 print("NUM_WAYS =", NUM_WAYS)
125 print("REAL_ADDR_BITS =", REAL_ADDR_BITS)
126 print("ROW_BITS =", ROW_BITS)
127 print("ROW_OFF_BITS =", ROW_OFF_BITS)
128 print("ROW_LINE_BITS =", ROW_LINE_BITS)
129 print("ROW_PER_LINE =", ROW_PER_LINE)
130 print("ROW_SIZE =", ROW_SIZE)
131 print("ROW_SIZE_BITS =", ROW_SIZE_BITS)
132 print("SET_SIZE_BITS =", SET_SIZE_BITS)
133 print("SIM =", SIM)
134 print("TAG_BITS =", TAG_BITS)
135 print("TAG_RAM_WIDTH =", TAG_RAM_WIDTH)
136 print("TAG_BITS =", TAG_BITS)
137 print("TLB_BITS =", TLB_BITS)
138 print("TLB_EA_TAG_BITS =", TLB_EA_TAG_BITS)
139 print("TLB_LG_PGSZ =", TLB_LG_PGSZ)
140 print("TLB_PTE_BITS =", TLB_PTE_BITS)
141 print("TLB_SIZE =", TLB_SIZE)
142 print("WAY_BITS =", WAY_BITS)
143
144 # from microwatt/utils.vhdl
145 def ispow2(n):
146 return n != 0 and (n & (n - 1)) == 0
147
148 assert LINE_SIZE % ROW_SIZE == 0
149 assert ispow2(LINE_SIZE), "LINE_SIZE not power of 2"
150 assert ispow2(NUM_LINES), "NUM_LINES not power of 2"
151 assert ispow2(ROW_PER_LINE), "ROW_PER_LINE not power of 2"
152 assert ispow2(INSN_PER_ROW), "INSN_PER_ROW not power of 2"
153 assert (ROW_BITS == (INDEX_BITS + ROW_LINE_BITS)), \
154 "geometry bits don't add up"
155 assert (LINE_OFF_BITS == (ROW_OFF_BITS + ROW_LINE_BITS)), \
156 "geometry bits don't add up"
157 assert (REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS + LINE_OFF_BITS)), \
158 "geometry bits don't add up"
159 assert (REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS)), \
160 "geometry bits don't add up"
161
162 # Example of layout for 32 lines of 64 bytes:
163 #
164 # .. tag |index| line |
165 # .. | row | |
166 # .. | | | |00| zero (2)
167 # .. | | |-| | INSN_BITS (1)
168 # .. | |---| | ROW_LINE_BITS (3)
169 # .. | |--- - --| LINE_OFF_BITS (6)
170 # .. | |- --| ROW_OFF_BITS (3)
171 # .. |----- ---| | ROW_BITS (8)
172 # .. |-----| | INDEX_BITS (5)
173 # .. --------| | TAG_BITS (53)
174
175 # The cache data BRAM organized as described above for each way
176 #subtype cache_row_t is std_ulogic_vector(ROW_SIZE_BITS-1 downto 0);
177 #
178 # The cache tags LUTRAM has a row per set. Vivado is a pain and will
179 # not handle a clean (commented) definition of the cache tags as a 3d
180 # memory. For now, work around it by putting all the tags
181 def CacheTagArray():
182 tag_layout = [('valid', 1),
183 ('tag', TAG_RAM_WIDTH),
184 ]
185 return Array(Record(tag_layout, name="tag%d" % x) for x in range(NUM_LINES))
186
187 def RowPerLineValidArray():
188 return Array(Signal(name="rows_valid_%d" %x) \
189 for x in range(ROW_PER_LINE))
190
191
192 # TODO to be passed to nigmen as ram attributes
193 # attribute ram_style : string;
194 # attribute ram_style of cache_tags : signal is "distributed";
195
196 def TLBArray():
197 tlb_layout = [('valid', 1),
198 ('tag', TLB_EA_TAG_BITS),
199 ('pte', TLB_PTE_BITS)
200 ]
201 return Array(Record(tlb_layout, name="tlb%d" % x) for x in range(TLB_SIZE))
202
203 # Cache RAM interface
204 def CacheRamOut():
205 return Array(Signal(ROW_SIZE_BITS, name="cache_out_%d" %x) \
206 for x in range(NUM_WAYS))
207
208 # PLRU output interface
209 def PLRUOut():
210 return Array(Signal(WAY_BITS, name="plru_out_%d" %x) \
211 for x in range(NUM_LINES))
212
213 # Return the cache line index (tag index) for an address
214 def get_index(addr):
215 return addr[LINE_OFF_BITS:SET_SIZE_BITS]
216
217 # Return the cache row index (data memory) for an address
218 def get_row(addr):
219 return addr[ROW_OFF_BITS:SET_SIZE_BITS]
220
221 # Return the index of a row within a line
222 def get_row_of_line(row):
223 return row[:ROW_BITS][:ROW_LINE_BITS]
224
225 # Returns whether this is the last row of a line
226 def is_last_row_addr(addr, last):
227 return addr[ROW_OFF_BITS:LINE_OFF_BITS] == last
228
229 # Returns whether this is the last row of a line
230 def is_last_row(row, last):
231 return get_row_of_line(row) == last
232
233 # Return the next row in the current cache line. We use a dedicated
234 # function in order to limit the size of the generated adder to be
235 # only the bits within a cache line (3 bits with default settings)
236 def next_row(row):
237 row_v = row[0:ROW_LINE_BITS] + 1
238 return Cat(row_v[:ROW_LINE_BITS], row[ROW_LINE_BITS:])
239
240 # Read the instruction word for the given address
241 # in the current cache row
242 def read_insn_word(addr, data):
243 word = addr[2:INSN_BITS+2]
244 return data.word_select(word, 32)
245
246 # Get the tag value from the address
247 def get_tag(addr):
248 return addr[SET_SIZE_BITS:REAL_ADDR_BITS]
249
250 # Read a tag from a tag memory row
251 def read_tag(way, tagset):
252 return tagset.word_select(way, TAG_WIDTH)[:TAG_BITS]
253
254 # Write a tag to tag memory row
255 def write_tag(way, tagset, tag):
256 return read_tag(way, tagset).eq(tag)
257
258 # Simple hash for direct-mapped TLB index
259 def hash_ea(addr):
260 hsh = addr[TLB_LG_PGSZ:TLB_LG_PGSZ + TLB_BITS] ^ addr[
261 TLB_LG_PGSZ + TLB_BITS:TLB_LG_PGSZ + 2 * TLB_BITS
262 ] ^ addr[
263 TLB_LG_PGSZ + 2 * TLB_BITS:TLB_LG_PGSZ + 3 * TLB_BITS
264 ]
265 return hsh
266
267
268 # Cache reload state machine
269 @unique
270 class State(Enum):
271 IDLE = 0
272 CLR_TAG = 1
273 WAIT_ACK = 2
274
275
276 class RegInternal(RecordObject):
277 def __init__(self):
278 super().__init__()
279 # Cache hit state (Latches for 1 cycle BRAM access)
280 self.hit_way = Signal(WAY_BITS)
281 self.hit_nia = Signal(64)
282 self.hit_smark = Signal()
283 self.hit_valid = Signal()
284
285 # Cache miss state (reload state machine)
286 self.state = Signal(State, reset=State.IDLE)
287 self.wb = WBMasterOut("wb")
288 self.req_adr = Signal(64)
289 self.store_way = Signal(WAY_BITS)
290 self.store_index = Signal(INDEX_BITS)
291 self.store_row = Signal(ROW_BITS)
292 self.store_tag = Signal(TAG_BITS)
293 self.store_valid = Signal()
294 self.end_row_ix = Signal(ROW_LINE_BITS)
295 self.rows_valid = RowPerLineValidArray()
296
297 # TLB miss state
298 self.fetch_failed = Signal()
299
300
301 class ICache(Elaboratable):
302 """64 bit direct mapped icache. All instructions are 4B aligned."""
303 def __init__(self):
304 self.i_in = Fetch1ToICacheType(name="i_in")
305 self.i_out = ICacheToDecode1Type(name="i_out")
306
307 self.m_in = MMUToICacheType(name="m_in")
308
309 self.stall_in = Signal()
310 self.stall_out = Signal()
311 self.flush_in = Signal()
312 self.inval_in = Signal()
313
314 # standard naming (wired to non-standard for compatibility)
315 self.bus = Interface(addr_width=32,
316 data_width=64,
317 granularity=8,
318 features={'stall'},
319 alignment=0,
320 name="dcache")
321
322 self.log_out = Signal(54)
323
324
325 # Generate a cache RAM for each way
326 def rams(self, m, r, cache_out_row, use_previous,
327 replace_way, req_row):
328
329 comb = m.d.comb
330 sync = m.d.sync
331
332 bus, stall_in = self.bus, self.stall_in
333 m.submodules.replace_way_e = re = Decoder(NUM_WAYS)
334 m.submodules.hit_way_e = he = Decoder(NUM_WAYS)
335 comb += re.i.eq(replace_way)
336 comb += he.i.eq(r.hit_way)
337
338 for i in range(NUM_WAYS):
339 do_read = Signal(name="do_rd_%d" % i)
340 do_write = Signal(name="do_wr_%d" % i)
341 rd_addr = Signal(ROW_BITS)
342 wr_addr = Signal(ROW_BITS)
343 d_out = Signal(ROW_SIZE_BITS, name="d_out_%d" % i)
344 wr_sel = Signal(ROW_SIZE)
345
346 way = CacheRam(ROW_BITS, ROW_SIZE_BITS, TRACE=True, ram_num=i)
347 m.submodules["cacheram_%d" % i] = way
348
349 comb += way.rd_en.eq(do_read)
350 comb += way.rd_addr.eq(rd_addr)
351 comb += d_out.eq(way.rd_data_o)
352 comb += way.wr_sel.eq(wr_sel)
353 comb += way.wr_addr.eq(wr_addr)
354 comb += way.wr_data.eq(bus.dat_r)
355
356 comb += do_read.eq(~(stall_in | use_previous))
357 comb += do_write.eq(bus.ack & re.o[i])
358
359 with m.If(do_write):
360 sync += Display("cache write adr: %x data: %lx",
361 wr_addr, way.wr_data)
362
363 with m.If(he.o[i]):
364 comb += cache_out_row.eq(d_out)
365 with m.If(do_read):
366 sync += Display("cache read adr: %x data: %x",
367 req_row, d_out)
368
369 comb += rd_addr.eq(req_row)
370 comb += wr_addr.eq(r.store_row)
371 comb += wr_sel.eq(Repl(do_write, ROW_SIZE))
372
373 # Generate PLRUs
374 def maybe_plrus(self, m, r, plru_victim):
375 comb = m.d.comb
376
377 with m.If(NUM_WAYS > 1):
378 m.submodules.plru_e = e = Decoder(NUM_LINES)
379 comb += e.i.eq(get_index(r.hit_nia))
380
381 for i in range(NUM_LINES):
382 plru = PLRU(WAY_BITS)
383 m.submodules["plru_%d" % i] = plru
384
385 # PLRU interface
386 with m.If(e.o[i]):
387 comb += plru.acc_en.eq(r.hit_valid)
388
389 comb += plru.acc_i.eq(r.hit_way)
390 comb += plru_victim[i].eq(plru.lru_o)
391
392 # TLB hit detection and real address generation
393 def itlb_lookup(self, m, tlb_req_index, itlb,
394 real_addr, ra_valid, eaa_priv,
395 priv_fault, access_ok):
396
397 comb = m.d.comb
398
399 i_in = self.i_in
400
401 pte = Signal(TLB_PTE_BITS)
402 ttag = Signal(TLB_EA_TAG_BITS)
403
404 comb += tlb_req_index.eq(hash_ea(i_in.nia))
405 comb += pte.eq(itlb[tlb_req_index].pte)
406 comb += ttag.eq(itlb[tlb_req_index].tag)
407
408 with m.If(i_in.virt_mode):
409 comb += real_addr.eq(Cat(
410 i_in.nia[:TLB_LG_PGSZ],
411 pte[TLB_LG_PGSZ:REAL_ADDR_BITS]
412 ))
413
414 with m.If(ttag == i_in.nia[TLB_LG_PGSZ + TLB_BITS:64]):
415 comb += ra_valid.eq(itlb[tlb_req_index].valid)
416
417 comb += eaa_priv.eq(pte[3])
418
419 with m.Else():
420 comb += real_addr.eq(i_in.nia[:REAL_ADDR_BITS])
421 comb += ra_valid.eq(1)
422 comb += eaa_priv.eq(1)
423
424 # No IAMR, so no KUEP support for now
425 comb += priv_fault.eq(eaa_priv & ~i_in.priv_mode)
426 comb += access_ok.eq(ra_valid & ~priv_fault)
427
428 # iTLB update
429 def itlb_update(self, m, itlb):
430 comb = m.d.comb
431 sync = m.d.sync
432
433 m_in = self.m_in
434
435 wr_index = Signal(TLB_SIZE)
436 comb += wr_index.eq(hash_ea(m_in.addr))
437
438 with m.If(m_in.tlbie & m_in.doall):
439 # Clear all valid bits
440 for i in range(TLB_SIZE):
441 sync += itlb[i].valid.eq(0)
442
443 with m.Elif(m_in.tlbie):
444 # Clear entry regardless of hit or miss
445 sync += itlb[wr_index].valid.eq(0)
446
447 with m.Elif(m_in.tlbld):
448 sync += itlb[wr_index].tag.eq(m_in.addr[TLB_LG_PGSZ + TLB_BITS:64])
449 sync += itlb[wr_index].pte.eq(m_in.pte)
450 sync += itlb[wr_index].valid.eq(1)
451
452 # Cache hit detection, output to fetch2 and other misc logic
453 def icache_comb(self, m, use_previous, r, req_index, req_row,
454 req_hit_way, req_tag, real_addr, req_laddr,
455 cache_tags, access_ok,
456 req_is_hit, req_is_miss, replace_way,
457 plru_victim, cache_out_row):
458
459 comb = m.d.comb
460
461 i_in, i_out, bus = self.i_in, self.i_out, self.bus
462 flush_in, stall_out = self.flush_in, self.stall_out
463
464 is_hit = Signal()
465 hit_way = Signal(WAY_BITS)
466
467 # i_in.sequential means that i_in.nia this cycle is 4 more than
468 # last cycle. If we read more than 32 bits at a time, had a
469 # cache hit last cycle, and we don't want the first 32-bit chunk
470 # then we can keep the data we read last cycle and just use that.
471 with m.If(i_in.nia[2:INSN_BITS+2] != 0):
472 comb += use_previous.eq(i_in.sequential & r.hit_valid)
473
474 # Extract line, row and tag from request
475 comb += req_index.eq(get_index(i_in.nia))
476 comb += req_row.eq(get_row(i_in.nia))
477 comb += req_tag.eq(get_tag(real_addr))
478
479 # Calculate address of beginning of cache row, will be
480 # used for cache miss processing if needed
481 comb += req_laddr.eq(Cat(
482 Const(0, ROW_OFF_BITS),
483 real_addr[ROW_OFF_BITS:REAL_ADDR_BITS],
484 ))
485
486 # Test if pending request is a hit on any way
487 hitcond = Signal()
488 comb += hitcond.eq((r.state == State.WAIT_ACK)
489 & (req_index == r.store_index)
490 & r.rows_valid[req_row % ROW_PER_LINE]
491 )
492 with m.If(i_in.req):
493 cvb = Signal(NUM_WAYS)
494 ctag = Signal(TAG_RAM_WIDTH)
495 comb += ctag.eq(cache_tags[req_index].tag)
496 comb += cvb.eq(cache_tags[req_index].valid)
497 m.submodules.store_way_e = se = Decoder(NUM_WAYS)
498 comb += se.i.eq(r.store_way)
499 for i in range(NUM_WAYS):
500 tagi = Signal(TAG_BITS, name="tag_i%d" % i)
501 hit_test = Signal(name="hit_test%d" % i)
502 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
503 comb += tagi.eq(read_tag(i, ctag))
504 comb += hit_test.eq(se.o[i])
505 comb += is_tag_hit.eq((cvb[i] | (hitcond & hit_test)) &
506 (tagi == req_tag))
507 with m.If(is_tag_hit):
508 comb += hit_way.eq(i)
509 comb += is_hit.eq(1)
510
511 # Generate the "hit" and "miss" signals
512 # for the synchronous blocks
513 with m.If(i_in.req & access_ok & ~flush_in):
514 comb += req_is_hit.eq(is_hit)
515 comb += req_is_miss.eq(~is_hit)
516
517 comb += req_hit_way.eq(hit_way)
518
519 # The way to replace on a miss
520 with m.If(r.state == State.CLR_TAG):
521 comb += replace_way.eq(plru_victim[r.store_index])
522 with m.Else():
523 comb += replace_way.eq(r.store_way)
524
525 # Output instruction from current cache row
526 #
527 # Note: This is a mild violation of our design principle of
528 # having pipeline stages output from a clean latch. In this
529 # case we output the result of a mux. The alternative would
530 # be output an entire row which I prefer not to do just yet
531 # as it would force fetch2 to know about some of the cache
532 # geometry information.
533 comb += i_out.insn.eq(read_insn_word(r.hit_nia, cache_out_row))
534 comb += i_out.valid.eq(r.hit_valid)
535 comb += i_out.nia.eq(r.hit_nia)
536 comb += i_out.stop_mark.eq(r.hit_smark)
537 comb += i_out.fetch_failed.eq(r.fetch_failed)
538
539 # Stall fetch1 if we have a miss on cache or TLB
540 # or a protection fault
541 comb += stall_out.eq(~(is_hit & access_ok))
542
543 # Wishbone requests output (from the cache miss reload machine)
544 comb += bus.we.eq(r.wb.we)
545 comb += bus.adr.eq(r.wb.adr)
546 comb += bus.sel.eq(r.wb.sel)
547 comb += bus.stb.eq(r.wb.stb)
548 comb += bus.dat_w.eq(r.wb.dat)
549 comb += bus.cyc.eq(r.wb.cyc)
550
551 # Cache hit synchronous machine
552 def icache_hit(self, m, use_previous, r, req_is_hit, req_hit_way,
553 req_index, req_tag, real_addr):
554 sync = m.d.sync
555
556 i_in, stall_in = self.i_in, self.stall_in
557 flush_in = self.flush_in
558
559 # keep outputs to fetch2 unchanged on a stall
560 # except that flush or reset sets valid to 0
561 # If use_previous, keep the same data as last
562 # cycle and use the second half
563 with m.If(stall_in | use_previous):
564 with m.If(flush_in):
565 sync += r.hit_valid.eq(0)
566 with m.Else():
567 # On a hit, latch the request for the next cycle,
568 # when the BRAM data will be available on the
569 # cache_out output of the corresponding way
570 sync += r.hit_valid.eq(req_is_hit)
571
572 with m.If(req_is_hit):
573 sync += r.hit_way.eq(req_hit_way)
574 sync += Display("cache hit nia:%x IR:%x SM:%x idx:%x tag:%x "
575 "way:%x RA:%x", i_in.nia, i_in.virt_mode,
576 i_in.stop_mark, req_index, req_tag,
577 req_hit_way, real_addr)
578
579 with m.If(~stall_in):
580 # Send stop marks and NIA down regardless of validity
581 sync += r.hit_smark.eq(i_in.stop_mark)
582 sync += r.hit_nia.eq(i_in.nia)
583
584 def icache_miss_idle(self, m, r, req_is_miss, req_laddr,
585 req_index, req_tag, replace_way, real_addr):
586 comb = m.d.comb
587 sync = m.d.sync
588
589 i_in = self.i_in
590
591 # Reset per-row valid flags, only used in WAIT_ACK
592 for i in range(ROW_PER_LINE):
593 sync += r.rows_valid[i].eq(0)
594
595 # We need to read a cache line
596 with m.If(req_is_miss):
597 sync += Display(
598 "cache miss nia:%x IR:%x SM:%x idx:%x "
599 " way:%x tag:%x RA:%x", i_in.nia,
600 i_in.virt_mode, i_in.stop_mark, req_index,
601 replace_way, req_tag, real_addr)
602
603 # Keep track of our index and way for subsequent stores
604 st_row = Signal(ROW_BITS)
605 comb += st_row.eq(get_row(req_laddr))
606 sync += r.store_index.eq(req_index)
607 sync += r.store_row.eq(st_row)
608 sync += r.store_tag.eq(req_tag)
609 sync += r.store_valid.eq(1)
610 sync += r.end_row_ix.eq(get_row_of_line(st_row) - 1)
611
612 # Prep for first wishbone read. We calculate the address
613 # of the start of the cache line and start the WB cycle.
614 sync += r.req_adr.eq(req_laddr)
615 sync += r.wb.cyc.eq(1)
616 sync += r.wb.stb.eq(1)
617
618 # Track that we had one request sent
619 sync += r.state.eq(State.CLR_TAG)
620
621 def icache_miss_clr_tag(self, m, r, replace_way,
622 req_index,
623 tagset, cache_tags):
624 comb = m.d.comb
625 sync = m.d.sync
626
627 # Get victim way from plru
628 sync += r.store_way.eq(replace_way)
629
630 # Force misses on that way while reloading that line
631 cv = Signal(INDEX_BITS)
632 comb += cv.eq(cache_tags[req_index].valid)
633 comb += cv.bit_select(replace_way, 1).eq(0)
634 sync += cache_tags[req_index].valid.eq(cv)
635
636 for i in range(NUM_WAYS):
637 with m.If(i == replace_way):
638 comb += tagset.eq(cache_tags[r.store_index].tag)
639 comb += write_tag(i, tagset, r.store_tag)
640 sync += cache_tags[r.store_index].tag.eq(tagset)
641
642 sync += r.state.eq(State.WAIT_ACK)
643
644 def icache_miss_wait_ack(self, m, r, replace_way, inval_in,
645 cache_tags, stbs_done):
646 comb = m.d.comb
647 sync = m.d.sync
648
649 bus = self.bus
650
651 # Requests are all sent if stb is 0
652 stbs_zero = Signal()
653 comb += stbs_zero.eq(r.wb.stb == 0)
654 comb += stbs_done.eq(stbs_zero)
655
656 # If we are still sending requests, was one accepted?
657 with m.If(~bus.stall & ~stbs_zero):
658 # That was the last word? We are done sending.
659 # Clear stb and set stbs_done so we can handle
660 # an eventual last ack on the same cycle.
661 with m.If(is_last_row_addr(r.req_adr, r.end_row_ix)):
662 sync += Display("IS_LAST_ROW_ADDR r.wb.addr:%x "
663 "r.end_row_ix:%x r.wb.stb:%x stbs_zero:%x "
664 "stbs_done:%x", r.wb.adr, r.end_row_ix,
665 r.wb.stb, stbs_zero, stbs_done)
666 sync += r.wb.stb.eq(0)
667 comb += stbs_done.eq(1)
668
669 # Calculate the next row address
670 rarange = Signal(LINE_OFF_BITS - ROW_OFF_BITS)
671 comb += rarange.eq(r.req_adr[ROW_OFF_BITS:LINE_OFF_BITS] + 1)
672 sync += r.req_adr[ROW_OFF_BITS:LINE_OFF_BITS].eq(rarange)
673 sync += Display("RARANGE r.req_adr:%x rarange:%x "
674 "stbs_zero:%x stbs_done:%x",
675 r.req_adr, rarange, stbs_zero, stbs_done)
676
677 # Incoming acks processing
678 with m.If(bus.ack):
679 sync += Display("WB_IN_ACK data:%x stbs_zero:%x "
680 "stbs_done:%x",
681 bus.dat_r, stbs_zero, stbs_done)
682
683 sync += r.rows_valid[r.store_row % ROW_PER_LINE].eq(1)
684
685 # Check for completion
686 with m.If(stbs_done & is_last_row(r.store_row, r.end_row_ix)):
687 # Complete wishbone cycle
688 sync += r.wb.cyc.eq(0)
689 # be nice, clear addr
690 sync += r.req_adr.eq(0)
691
692 # Cache line is now valid
693 cv = Signal(INDEX_BITS)
694 comb += cv.eq(cache_tags[r.store_index].valid)
695 comb += cv.bit_select(replace_way, 1).eq(
696 r.store_valid & ~inval_in)
697 sync += cache_tags[r.store_index].valid.eq(cv)
698
699 sync += r.state.eq(State.IDLE)
700
701 # move on to next request in row
702 # Increment store row counter
703 sync += r.store_row.eq(next_row(r.store_row))
704
705 # Cache miss/reload synchronous machine
706 def icache_miss(self, m, r, req_is_miss,
707 req_index, req_laddr, req_tag, replace_way,
708 cache_tags, access_ok, real_addr):
709 comb = m.d.comb
710 sync = m.d.sync
711
712 i_in, bus, m_in = self.i_in, self.bus, self.m_in
713 stall_in, flush_in = self.stall_in, self.flush_in
714 inval_in = self.inval_in
715
716 tagset = Signal(TAG_RAM_WIDTH)
717 stbs_done = Signal()
718
719 comb += r.wb.sel.eq(-1)
720 comb += r.wb.adr.eq(r.req_adr[3:])
721
722 # Process cache invalidations
723 with m.If(inval_in):
724 for i in range(NUM_LINES):
725 sync += cache_tags[i].valid.eq(0)
726 sync += r.store_valid.eq(0)
727
728 # Main state machine
729 with m.Switch(r.state):
730
731 with m.Case(State.IDLE):
732 self.icache_miss_idle(m, r, req_is_miss, req_laddr,
733 req_index, req_tag, replace_way,
734 real_addr)
735
736 with m.Case(State.CLR_TAG, State.WAIT_ACK):
737 with m.If(r.state == State.CLR_TAG):
738 self.icache_miss_clr_tag(m, r, replace_way,
739 req_index, tagset, cache_tags)
740
741 self.icache_miss_wait_ack(m, r, replace_way, inval_in,
742 cache_tags, stbs_done)
743
744 # TLB miss and protection fault processing
745 with m.If(flush_in | m_in.tlbld):
746 sync += r.fetch_failed.eq(0)
747 with m.Elif(i_in.req & ~access_ok & ~stall_in):
748 sync += r.fetch_failed.eq(1)
749
750 # icache_log: if LOG_LENGTH > 0 generate
751 def icache_log(self, m, req_hit_way, ra_valid, access_ok,
752 req_is_miss, req_is_hit, lway, wstate, r):
753 comb = m.d.comb
754 sync = m.d.sync
755
756 bus, i_out = self.bus, self.i_out
757 log_out, stall_out = self.log_out, self.stall_out
758
759 # Output data to logger
760 for i in range(LOG_LENGTH):
761 log_data = Signal(54)
762 lway = Signal(WAY_BITS)
763 wstate = Signal()
764
765 sync += lway.eq(req_hit_way)
766 sync += wstate.eq(0)
767
768 with m.If(r.state != State.IDLE):
769 sync += wstate.eq(1)
770
771 sync += log_data.eq(Cat(
772 ra_valid, access_ok, req_is_miss, req_is_hit,
773 lway, wstate, r.hit_nia[2:6], r.fetch_failed,
774 stall_out, bus.stall, r.wb.cyc, r.wb.stb,
775 r.real_addr[3:6], bus.ack, i_out.insn, i_out.valid
776 ))
777 comb += log_out.eq(log_data)
778
779 def elaborate(self, platform):
780
781 m = Module()
782 comb = m.d.comb
783
784 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
785 cache_tags = CacheTagArray()
786
787 # TLB Array
788 itlb = TLBArray()
789
790 # TODO to be passed to nmigen as ram attributes
791 # attribute ram_style of itlb_tags : signal is "distributed";
792 # attribute ram_style of itlb_ptes : signal is "distributed";
793
794 # Privilege bit from PTE EAA field
795 eaa_priv = Signal()
796
797 r = RegInternal()
798
799 # Async signal on incoming request
800 req_index = Signal(INDEX_BITS)
801 req_row = Signal(ROW_BITS)
802 req_hit_way = Signal(WAY_BITS)
803 req_tag = Signal(TAG_BITS)
804 req_is_hit = Signal()
805 req_is_miss = Signal()
806 req_laddr = Signal(64)
807
808 tlb_req_index = Signal(TLB_SIZE)
809 real_addr = Signal(REAL_ADDR_BITS)
810 ra_valid = Signal()
811 priv_fault = Signal()
812 access_ok = Signal()
813 use_previous = Signal()
814
815 cache_out_row = Signal(ROW_SIZE_BITS)
816
817 plru_victim = PLRUOut()
818 replace_way = Signal(WAY_BITS)
819
820 # fake-up the wishbone stall signal to comply with pipeline mode
821 # same thing is done in dcache.py
822 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
823
824 # call sub-functions putting everything together,
825 # using shared signals established above
826 self.rams(m, r, cache_out_row, use_previous, replace_way, req_row)
827 self.maybe_plrus(m, r, plru_victim)
828 self.itlb_lookup(m, tlb_req_index, itlb, real_addr,
829 ra_valid, eaa_priv, priv_fault,
830 access_ok)
831 self.itlb_update(m, itlb)
832 self.icache_comb(m, use_previous, r, req_index, req_row, req_hit_way,
833 req_tag, real_addr, req_laddr,
834 cache_tags, access_ok, req_is_hit, req_is_miss,
835 replace_way, plru_victim, cache_out_row)
836 self.icache_hit(m, use_previous, r, req_is_hit, req_hit_way,
837 req_index, req_tag, real_addr)
838 self.icache_miss(m, r, req_is_miss, req_index,
839 req_laddr, req_tag, replace_way, cache_tags,
840 access_ok, real_addr)
841 #self.icache_log(m, log_out, req_hit_way, ra_valid, access_ok,
842 # req_is_miss, req_is_hit, lway, wstate, r)
843
844 return m
845
846
847 def icache_sim(dut):
848 i_in = dut.i_in
849 i_out = dut.i_out
850 m_out = dut.m_in
851
852 yield i_in.priv_mode.eq(1)
853 yield i_in.req.eq(0)
854 yield i_in.nia.eq(0)
855 yield i_in.stop_mark.eq(0)
856 yield m_out.tlbld.eq(0)
857 yield m_out.tlbie.eq(0)
858 yield m_out.addr.eq(0)
859 yield m_out.pte.eq(0)
860 yield
861 yield
862 yield
863 yield
864
865 # miss, stalls for a bit
866 yield i_in.req.eq(1)
867 yield i_in.nia.eq(Const(0x0000000000000004, 64))
868 yield
869 valid = yield i_out.valid
870 while not valid:
871 yield
872 valid = yield i_out.valid
873 yield i_in.req.eq(0)
874
875 insn = yield i_out.insn
876 nia = yield i_out.nia
877 assert insn == 0x00000001, \
878 "insn @%x=%x expected 00000001" % (nia, insn)
879 yield i_in.req.eq(0)
880 yield
881
882 # hit
883 yield i_in.req.eq(1)
884 yield i_in.nia.eq(Const(0x0000000000000008, 64))
885 yield
886 valid = yield i_out.valid
887 while not valid:
888 yield
889 valid = yield i_out.valid
890 yield i_in.req.eq(0)
891
892 nia = yield i_out.nia
893 insn = yield i_out.insn
894 yield
895 assert insn == 0x00000002, \
896 "insn @%x=%x expected 00000002" % (nia, insn)
897
898 # another miss
899 yield i_in.req.eq(1)
900 yield i_in.nia.eq(Const(0x0000000000000040, 64))
901 yield
902 valid = yield i_out.valid
903 while not valid:
904 yield
905 valid = yield i_out.valid
906 yield i_in.req.eq(0)
907
908 nia = yield i_in.nia
909 insn = yield i_out.insn
910 assert insn == 0x00000010, \
911 "insn @%x=%x expected 00000010" % (nia, insn)
912
913 # test something that aliases (this only works because
914 # the unit test SRAM is a depth of 512)
915 yield i_in.req.eq(1)
916 yield i_in.nia.eq(Const(0x0000000000000100, 64))
917 yield
918 yield
919 valid = yield i_out.valid
920 assert ~valid
921 for i in range(30):
922 yield
923 yield
924 insn = yield i_out.insn
925 valid = yield i_out.valid
926 insn = yield i_out.insn
927 assert valid
928 assert insn == 0x00000040, \
929 "insn @%x=%x expected 00000040" % (nia, insn)
930 yield i_in.req.eq(0)
931
932
933 def test_icache(mem):
934 dut = ICache()
935
936 memory = Memory(width=64, depth=512, init=mem)
937 sram = SRAM(memory=memory, granularity=8)
938
939 m = Module()
940
941 m.submodules.icache = dut
942 m.submodules.sram = sram
943
944 m.d.comb += sram.bus.cyc.eq(dut.bus.cyc)
945 m.d.comb += sram.bus.stb.eq(dut.bus.stb)
946 m.d.comb += sram.bus.we.eq(dut.bus.we)
947 m.d.comb += sram.bus.sel.eq(dut.bus.sel)
948 m.d.comb += sram.bus.adr.eq(dut.bus.adr)
949 m.d.comb += sram.bus.dat_w.eq(dut.bus.dat_w)
950
951 m.d.comb += dut.bus.ack.eq(sram.bus.ack)
952 m.d.comb += dut.bus.dat_r.eq(sram.bus.dat_r)
953
954 # nmigen Simulation
955 sim = Simulator(m)
956 sim.add_clock(1e-6)
957
958 sim.add_sync_process(wrap(icache_sim(dut)))
959 with sim.write_vcd('test_icache.vcd'):
960 sim.run()
961
962
963 if __name__ == '__main__':
964 dut = ICache()
965 vl = rtlil.convert(dut, ports=[])
966 with open("test_icache.il", "w") as f:
967 f.write(vl)
968
969 # set up memory every 32-bits with incrementing values 0 1 2 ...
970 mem = []
971 for i in range(512):
972 mem.append((i*2) | ((i*2+1)<<32))
973
974 test_icache(mem)