convert icache.py to standard wishbone Interface
[soc.git] / src / soc / experiment / icache.py
1 """ICache
2
3 based on Anton Blanchard microwatt icache.vhdl
4
5 Set associative icache
6
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
20 """
21
22 from enum import (Enum, unique)
23 from nmigen import (Module, Signal, Elaboratable, Cat, Array, Const, Repl)
24 from nmigen.cli import main, rtlil
25 from nmutil.iocontrol import RecordObject
26 from nmigen.utils import log2_int
27 from nmutil.util import Display
28
29 #from nmutil.plru import PLRU
30 from soc.experiment.cache_ram import CacheRam
31 from soc.experiment.plru import PLRU
32
33 from soc.experiment.mem_types import (Fetch1ToICacheType,
34 ICacheToDecode1Type,
35 MMUToICacheType)
36
37 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS,
38 WB_SEL_BITS, WBAddrType, WBDataType,
39 WBSelType, WBMasterOut, WBSlaveOut,
40 )
41
42 from nmigen_soc.wishbone.bus import Interface
43
44 # for test
45 from soc.bus.sram import SRAM
46 from nmigen import Memory
47 from nmutil.util import wrap
48 from nmigen.cli import main, rtlil
49
50 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
51 # Also, check out the cxxsim nmigen branch, and latest yosys from git
52 from nmutil.sim_tmp_alternative import Simulator, Settle
53
54
55 SIM = 0
56 LINE_SIZE = 64
57 # BRAM organisation: We never access more than wishbone_data_bits
58 # at a time so to save resources we make the array only that wide,
59 # and use consecutive indices for to make a cache "line"
60 #
61 # ROW_SIZE is the width in bytes of the BRAM (based on WB, so 64-bits)
62 ROW_SIZE = WB_DATA_BITS // 8
63 # Number of lines in a set
64 NUM_LINES = 16
65 # Number of ways
66 NUM_WAYS = 4
67 # L1 ITLB number of entries (direct mapped)
68 TLB_SIZE = 64
69 # L1 ITLB log_2(page_size)
70 TLB_LG_PGSZ = 12
71 # Number of real address bits that we store
72 REAL_ADDR_BITS = 56
73 # Non-zero to enable log data collection
74 LOG_LENGTH = 0
75
76 ROW_SIZE_BITS = ROW_SIZE * 8
77 # ROW_PER_LINE is the number of row (wishbone) transactions in a line
78 ROW_PER_LINE = LINE_SIZE // ROW_SIZE
79 # BRAM_ROWS is the number of rows in BRAM needed to represent the full icache
80 BRAM_ROWS = NUM_LINES * ROW_PER_LINE
81 # INSN_PER_ROW is the number of 32bit instructions per BRAM row
82 INSN_PER_ROW = ROW_SIZE_BITS // 32
83
84 # Bit fields counts in the address
85 #
86 # INSN_BITS is the number of bits to select an instruction in a row
87 INSN_BITS = log2_int(INSN_PER_ROW)
88 # ROW_BITS is the number of bits to select a row
89 ROW_BITS = log2_int(BRAM_ROWS)
90 # ROW_LINE_BITS is the number of bits to select a row within a line
91 ROW_LINE_BITS = log2_int(ROW_PER_LINE)
92 # LINE_OFF_BITS is the number of bits for the offset in a cache line
93 LINE_OFF_BITS = log2_int(LINE_SIZE)
94 # ROW_OFF_BITS is the number of bits for the offset in a row
95 ROW_OFF_BITS = log2_int(ROW_SIZE)
96 # INDEX_BITS is the number of bits to select a cache line
97 INDEX_BITS = log2_int(NUM_LINES)
98 # SET_SIZE_BITS is the log base 2 of the set size
99 SET_SIZE_BITS = LINE_OFF_BITS + INDEX_BITS
100 # TAG_BITS is the number of bits of the tag part of the address
101 TAG_BITS = REAL_ADDR_BITS - SET_SIZE_BITS
102 # TAG_WIDTH is the width in bits of each way of the tag RAM
103 TAG_WIDTH = TAG_BITS + 7 - ((TAG_BITS + 7) % 8)
104
105 # WAY_BITS is the number of bits to select a way
106 WAY_BITS = log2_int(NUM_WAYS)
107 TAG_RAM_WIDTH = TAG_BITS * NUM_WAYS
108
109 # L1 ITLB
110 TLB_BITS = log2_int(TLB_SIZE)
111 TLB_EA_TAG_BITS = 64 - (TLB_LG_PGSZ + TLB_BITS)
112 TLB_PTE_BITS = 64
113
114 print("BRAM_ROWS =", BRAM_ROWS)
115 print("INDEX_BITS =", INDEX_BITS)
116 print("INSN_BITS =", INSN_BITS)
117 print("INSN_PER_ROW =", INSN_PER_ROW)
118 print("LINE_SIZE =", LINE_SIZE)
119 print("LINE_OFF_BITS =", LINE_OFF_BITS)
120 print("LOG_LENGTH =", LOG_LENGTH)
121 print("NUM_LINES =", NUM_LINES)
122 print("NUM_WAYS =", NUM_WAYS)
123 print("REAL_ADDR_BITS =", REAL_ADDR_BITS)
124 print("ROW_BITS =", ROW_BITS)
125 print("ROW_OFF_BITS =", ROW_OFF_BITS)
126 print("ROW_LINE_BITS =", ROW_LINE_BITS)
127 print("ROW_PER_LINE =", ROW_PER_LINE)
128 print("ROW_SIZE =", ROW_SIZE)
129 print("ROW_SIZE_BITS =", ROW_SIZE_BITS)
130 print("SET_SIZE_BITS =", SET_SIZE_BITS)
131 print("SIM =", SIM)
132 print("TAG_BITS =", TAG_BITS)
133 print("TAG_RAM_WIDTH =", TAG_RAM_WIDTH)
134 print("TAG_BITS =", TAG_BITS)
135 print("TLB_BITS =", TLB_BITS)
136 print("TLB_EA_TAG_BITS =", TLB_EA_TAG_BITS)
137 print("TLB_LG_PGSZ =", TLB_LG_PGSZ)
138 print("TLB_PTE_BITS =", TLB_PTE_BITS)
139 print("TLB_SIZE =", TLB_SIZE)
140 print("WAY_BITS =", WAY_BITS)
141
142 # from microwatt/utils.vhdl
143 def ispow2(n):
144 return n != 0 and (n & (n - 1)) == 0
145
146 assert LINE_SIZE % ROW_SIZE == 0
147 assert ispow2(LINE_SIZE), "LINE_SIZE not power of 2"
148 assert ispow2(NUM_LINES), "NUM_LINES not power of 2"
149 assert ispow2(ROW_PER_LINE), "ROW_PER_LINE not power of 2"
150 assert ispow2(INSN_PER_ROW), "INSN_PER_ROW not power of 2"
151 assert (ROW_BITS == (INDEX_BITS + ROW_LINE_BITS)), \
152 "geometry bits don't add up"
153 assert (LINE_OFF_BITS == (ROW_OFF_BITS + ROW_LINE_BITS)), \
154 "geometry bits don't add up"
155 assert (REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS + LINE_OFF_BITS)), \
156 "geometry bits don't add up"
157 assert (REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS)), \
158 "geometry bits don't add up"
159
160 # Example of layout for 32 lines of 64 bytes:
161 #
162 # .. tag |index| line |
163 # .. | row | |
164 # .. | | | |00| zero (2)
165 # .. | | |-| | INSN_BITS (1)
166 # .. | |---| | ROW_LINE_BITS (3)
167 # .. | |--- - --| LINE_OFF_BITS (6)
168 # .. | |- --| ROW_OFF_BITS (3)
169 # .. |----- ---| | ROW_BITS (8)
170 # .. |-----| | INDEX_BITS (5)
171 # .. --------| | TAG_BITS (53)
172
173 # The cache data BRAM organized as described above for each way
174 #subtype cache_row_t is std_ulogic_vector(ROW_SIZE_BITS-1 downto 0);
175 #
176 # The cache tags LUTRAM has a row per set. Vivado is a pain and will
177 # not handle a clean (commented) definition of the cache tags as a 3d
178 # memory. For now, work around it by putting all the tags
179 def CacheTagArray():
180 return Array(Signal(TAG_RAM_WIDTH, name="cachetag_%d" %x) \
181 for x in range(NUM_LINES))
182
183 # The cache valid bits
184 def CacheValidBitsArray():
185 return Array(Signal(NUM_WAYS, name="cachevalid_%d" %x) \
186 for x in range(NUM_LINES))
187
188 def RowPerLineValidArray():
189 return Array(Signal(name="rows_valid_%d" %x) \
190 for x in range(ROW_PER_LINE))
191
192
193 # TODO to be passed to nigmen as ram attributes
194 # attribute ram_style : string;
195 # attribute ram_style of cache_tags : signal is "distributed";
196
197
198 def TLBValidBitsArray():
199 return Array(Signal(name="tlbvalid_%d" %x) \
200 for x in range(TLB_SIZE))
201
202 def TLBTagArray():
203 return Array(Signal(TLB_EA_TAG_BITS, name="tlbtag_%d" %x) \
204 for x in range(TLB_SIZE))
205
206 def TLBPtesArray():
207 return Array(Signal(TLB_PTE_BITS, name="tlbptes_%d" %x) \
208 for x in range(TLB_SIZE))
209
210 # Cache RAM interface
211 def CacheRamOut():
212 return Array(Signal(ROW_SIZE_BITS, name="cache_out_%d" %x) \
213 for x in range(NUM_WAYS))
214
215 # PLRU output interface
216 def PLRUOut():
217 return Array(Signal(WAY_BITS, name="plru_out_%d" %x) \
218 for x in range(NUM_LINES))
219
220 # Return the cache line index (tag index) for an address
221 def get_index(addr):
222 return addr[LINE_OFF_BITS:SET_SIZE_BITS]
223
224 # Return the cache row index (data memory) for an address
225 def get_row(addr):
226 return addr[ROW_OFF_BITS:SET_SIZE_BITS]
227
228 # Return the index of a row within a line
229 def get_row_of_line(row):
230 return row[:ROW_BITS][:ROW_LINE_BITS]
231
232 # Returns whether this is the last row of a line
233 def is_last_row_addr(addr, last):
234 return addr[ROW_OFF_BITS:LINE_OFF_BITS] == last
235
236 # Returns whether this is the last row of a line
237 def is_last_row(row, last):
238 return get_row_of_line(row) == last
239
240 # Return the next row in the current cache line. We use a dedicated
241 # function in order to limit the size of the generated adder to be
242 # only the bits within a cache line (3 bits with default settings)
243 def next_row(row):
244 row_v = row[0:ROW_LINE_BITS] + 1
245 return Cat(row_v[:ROW_LINE_BITS], row[ROW_LINE_BITS:])
246
247 # Read the instruction word for the given address
248 # in the current cache row
249 def read_insn_word(addr, data):
250 word = addr[2:INSN_BITS+2]
251 return data.word_select(word, 32)
252
253 # Get the tag value from the address
254 def get_tag(addr):
255 return addr[SET_SIZE_BITS:REAL_ADDR_BITS]
256
257 # Read a tag from a tag memory row
258 def read_tag(way, tagset):
259 return tagset.word_select(way, TAG_WIDTH)[:TAG_BITS]
260
261 # Write a tag to tag memory row
262 def write_tag(way, tagset, tag):
263 return read_tag(way, tagset).eq(tag)
264
265 # Simple hash for direct-mapped TLB index
266 def hash_ea(addr):
267 hsh = addr[TLB_LG_PGSZ:TLB_LG_PGSZ + TLB_BITS] ^ addr[
268 TLB_LG_PGSZ + TLB_BITS:TLB_LG_PGSZ + 2 * TLB_BITS
269 ] ^ addr[
270 TLB_LG_PGSZ + 2 * TLB_BITS:TLB_LG_PGSZ + 3 * TLB_BITS
271 ]
272 return hsh
273
274
275 # Cache reload state machine
276 @unique
277 class State(Enum):
278 IDLE = 0
279 CLR_TAG = 1
280 WAIT_ACK = 2
281
282
283 class RegInternal(RecordObject):
284 def __init__(self):
285 super().__init__()
286 # Cache hit state (Latches for 1 cycle BRAM access)
287 self.hit_way = Signal(WAY_BITS)
288 self.hit_nia = Signal(64)
289 self.hit_smark = Signal()
290 self.hit_valid = Signal()
291
292 # Cache miss state (reload state machine)
293 self.state = Signal(State, reset=State.IDLE)
294 self.wb = WBMasterOut("wb")
295 self.req_adr = Signal(64)
296 self.store_way = Signal(WAY_BITS)
297 self.store_index = Signal(INDEX_BITS)
298 self.store_row = Signal(ROW_BITS)
299 self.store_tag = Signal(TAG_BITS)
300 self.store_valid = Signal()
301 self.end_row_ix = Signal(ROW_LINE_BITS)
302 self.rows_valid = RowPerLineValidArray()
303
304 # TLB miss state
305 self.fetch_failed = Signal()
306
307
308 class ICache(Elaboratable):
309 """64 bit direct mapped icache. All instructions are 4B aligned."""
310 def __init__(self):
311 self.i_in = Fetch1ToICacheType(name="i_in")
312 self.i_out = ICacheToDecode1Type(name="i_out")
313
314 self.m_in = MMUToICacheType(name="m_in")
315
316 self.stall_in = Signal()
317 self.stall_out = Signal()
318 self.flush_in = Signal()
319 self.inval_in = Signal()
320
321 # standard naming (wired to non-standard for compatibility)
322 self.bus = Interface(addr_width=32,
323 data_width=64,
324 granularity=8,
325 features={'stall'},
326 alignment=0,
327 name="dcache")
328
329 self.log_out = Signal(54)
330
331
332 # Generate a cache RAM for each way
333 def rams(self, m, r, cache_out_row, use_previous,
334 replace_way, req_row):
335
336 comb = m.d.comb
337 sync = m.d.sync
338
339 bus, stall_in = self.bus, self.stall_in
340
341 for i in range(NUM_WAYS):
342 do_read = Signal(name="do_rd_%d" % i)
343 do_write = Signal(name="do_wr_%d" % i)
344 rd_addr = Signal(ROW_BITS)
345 wr_addr = Signal(ROW_BITS)
346 d_out = Signal(ROW_SIZE_BITS, name="d_out_%d" % i)
347 wr_sel = Signal(ROW_SIZE)
348
349 way = CacheRam(ROW_BITS, ROW_SIZE_BITS, True, ram_num=i)
350 setattr(m.submodules, "cacheram_%d" % i, way)
351
352 comb += way.rd_en.eq(do_read)
353 comb += way.rd_addr.eq(rd_addr)
354 comb += d_out.eq(way.rd_data_o)
355 comb += way.wr_sel.eq(wr_sel)
356 comb += way.wr_addr.eq(wr_addr)
357 comb += way.wr_data.eq(bus.dat_r)
358
359 comb += do_read.eq(~(stall_in | use_previous))
360 comb += do_write.eq(bus.ack & (replace_way == i))
361
362 with m.If(do_write):
363 sync += Display("cache write adr: %x data: %lx",
364 wr_addr, way.wr_data)
365
366 with m.If(r.hit_way == i):
367 comb += cache_out_row.eq(d_out)
368 with m.If(do_read):
369 sync += Display("cache read adr: %x data: %x",
370 req_row, d_out)
371
372 comb += rd_addr.eq(req_row)
373 comb += wr_addr.eq(r.store_row)
374 comb += wr_sel.eq(Repl(do_write, ROW_SIZE))
375
376 # Generate PLRUs
377 def maybe_plrus(self, m, r, plru_victim):
378 comb = m.d.comb
379
380 with m.If(NUM_WAYS > 1):
381 for i in range(NUM_LINES):
382 plru_acc_i = Signal(WAY_BITS)
383 plru_acc_en = Signal()
384 plru = PLRU(WAY_BITS)
385 setattr(m.submodules, "plru_%d" % i, plru)
386
387 comb += plru.acc_i.eq(plru_acc_i)
388 comb += plru.acc_en.eq(plru_acc_en)
389
390 # PLRU interface
391 with m.If(get_index(r.hit_nia) == i):
392 comb += plru.acc_en.eq(r.hit_valid)
393
394 comb += plru.acc_i.eq(r.hit_way)
395 comb += plru_victim[i].eq(plru.lru_o)
396
397 # TLB hit detection and real address generation
398 def itlb_lookup(self, m, tlb_req_index, itlb_ptes, itlb_tags,
399 real_addr, itlb_valid_bits, ra_valid, eaa_priv,
400 priv_fault, access_ok):
401
402 comb = m.d.comb
403
404 i_in = self.i_in
405
406 pte = Signal(TLB_PTE_BITS)
407 ttag = Signal(TLB_EA_TAG_BITS)
408
409 comb += tlb_req_index.eq(hash_ea(i_in.nia))
410 comb += pte.eq(itlb_ptes[tlb_req_index])
411 comb += ttag.eq(itlb_tags[tlb_req_index])
412
413 with m.If(i_in.virt_mode):
414 comb += real_addr.eq(Cat(
415 i_in.nia[:TLB_LG_PGSZ],
416 pte[TLB_LG_PGSZ:REAL_ADDR_BITS]
417 ))
418
419 with m.If(ttag == i_in.nia[TLB_LG_PGSZ + TLB_BITS:64]):
420 comb += ra_valid.eq(itlb_valid_bits[tlb_req_index])
421
422 comb += eaa_priv.eq(pte[3])
423
424 with m.Else():
425 comb += real_addr.eq(i_in.nia[:REAL_ADDR_BITS])
426 comb += ra_valid.eq(1)
427 comb += eaa_priv.eq(1)
428
429 # No IAMR, so no KUEP support for now
430 comb += priv_fault.eq(eaa_priv & ~i_in.priv_mode)
431 comb += access_ok.eq(ra_valid & ~priv_fault)
432
433 # iTLB update
434 def itlb_update(self, m, itlb_valid_bits, itlb_tags, itlb_ptes):
435 comb = m.d.comb
436 sync = m.d.sync
437
438 m_in = self.m_in
439
440 wr_index = Signal(TLB_SIZE)
441 comb += wr_index.eq(hash_ea(m_in.addr))
442
443 with m.If(m_in.tlbie & m_in.doall):
444 # Clear all valid bits
445 for i in range(TLB_SIZE):
446 sync += itlb_valid_bits[i].eq(0)
447
448 with m.Elif(m_in.tlbie):
449 # Clear entry regardless of hit or miss
450 sync += itlb_valid_bits[wr_index].eq(0)
451
452 with m.Elif(m_in.tlbld):
453 sync += itlb_tags[wr_index].eq(
454 m_in.addr[TLB_LG_PGSZ + TLB_BITS:64]
455 )
456 sync += itlb_ptes[wr_index].eq(m_in.pte)
457 sync += itlb_valid_bits[wr_index].eq(1)
458
459 # Cache hit detection, output to fetch2 and other misc logic
460 def icache_comb(self, m, use_previous, r, req_index, req_row,
461 req_hit_way, req_tag, real_addr, req_laddr,
462 cache_valid_bits, cache_tags, access_ok,
463 req_is_hit, req_is_miss, replace_way,
464 plru_victim, cache_out_row):
465
466 comb = m.d.comb
467
468 i_in, i_out, bus = self.i_in, self.i_out, self.bus
469 flush_in, stall_out = self.flush_in, self.stall_out
470
471 is_hit = Signal()
472 hit_way = Signal(WAY_BITS)
473
474 # i_in.sequential means that i_in.nia this cycle is 4 more than
475 # last cycle. If we read more than 32 bits at a time, had a
476 # cache hit last cycle, and we don't want the first 32-bit chunk
477 # then we can keep the data we read last cycle and just use that.
478 with m.If(i_in.nia[2:INSN_BITS+2] != 0):
479 comb += use_previous.eq(i_in.sequential & r.hit_valid)
480
481 # Extract line, row and tag from request
482 comb += req_index.eq(get_index(i_in.nia))
483 comb += req_row.eq(get_row(i_in.nia))
484 comb += req_tag.eq(get_tag(real_addr))
485
486 # Calculate address of beginning of cache row, will be
487 # used for cache miss processing if needed
488 comb += req_laddr.eq(Cat(
489 Const(0, ROW_OFF_BITS),
490 real_addr[ROW_OFF_BITS:REAL_ADDR_BITS],
491 ))
492
493 # Test if pending request is a hit on any way
494 hitcond = Signal()
495 comb += hitcond.eq((r.state == State.WAIT_ACK)
496 & (req_index == r.store_index)
497 & r.rows_valid[req_row % ROW_PER_LINE]
498 )
499 with m.If(i_in.req):
500 cvb = Signal(NUM_WAYS)
501 ctag = Signal(TAG_RAM_WIDTH)
502 comb += ctag.eq(cache_tags[req_index])
503 comb += cvb.eq(cache_valid_bits[req_index])
504 for i in range(NUM_WAYS):
505 tagi = Signal(TAG_BITS, name="tag_i%d" % i)
506 comb += tagi.eq(read_tag(i, ctag))
507 hit_test = Signal(name="hit_test%d" % i)
508 comb += hit_test.eq(i == r.store_way)
509 with m.If((cvb[i] | (hitcond & hit_test))
510 & (tagi == req_tag)):
511 comb += hit_way.eq(i)
512 comb += is_hit.eq(1)
513
514 # Generate the "hit" and "miss" signals
515 # for the synchronous blocks
516 with m.If(i_in.req & access_ok & ~flush_in):
517 comb += req_is_hit.eq(is_hit)
518 comb += req_is_miss.eq(~is_hit)
519
520 comb += req_hit_way.eq(hit_way)
521
522 # The way to replace on a miss
523 with m.If(r.state == State.CLR_TAG):
524 comb += replace_way.eq(plru_victim[r.store_index])
525 with m.Else():
526 comb += replace_way.eq(r.store_way)
527
528 # Output instruction from current cache row
529 #
530 # Note: This is a mild violation of our design principle of
531 # having pipeline stages output from a clean latch. In this
532 # case we output the result of a mux. The alternative would
533 # be output an entire row which I prefer not to do just yet
534 # as it would force fetch2 to know about some of the cache
535 # geometry information.
536 comb += i_out.insn.eq(read_insn_word(r.hit_nia, cache_out_row))
537 comb += i_out.valid.eq(r.hit_valid)
538 comb += i_out.nia.eq(r.hit_nia)
539 comb += i_out.stop_mark.eq(r.hit_smark)
540 comb += i_out.fetch_failed.eq(r.fetch_failed)
541
542 # Stall fetch1 if we have a miss on cache or TLB
543 # or a protection fault
544 comb += stall_out.eq(~(is_hit & access_ok))
545
546 # Wishbone requests output (from the cache miss reload machine)
547 comb += bus.we.eq(r.wb.we)
548 comb += bus.adr.eq(r.wb.adr)
549 comb += bus.sel.eq(r.wb.sel)
550 comb += bus.stb.eq(r.wb.stb)
551 comb += bus.dat_w.eq(r.wb.dat)
552 comb += bus.cyc.eq(r.wb.cyc)
553
554 # Cache hit synchronous machine
555 def icache_hit(self, m, use_previous, r, req_is_hit, req_hit_way,
556 req_index, req_tag, real_addr):
557 sync = m.d.sync
558
559 i_in, stall_in = self.i_in, self.stall_in
560 flush_in = self.flush_in
561
562 # keep outputs to fetch2 unchanged on a stall
563 # except that flush or reset sets valid to 0
564 # If use_previous, keep the same data as last
565 # cycle and use the second half
566 with m.If(stall_in | use_previous):
567 with m.If(flush_in):
568 sync += r.hit_valid.eq(0)
569 with m.Else():
570 # On a hit, latch the request for the next cycle,
571 # when the BRAM data will be available on the
572 # cache_out output of the corresponding way
573 sync += r.hit_valid.eq(req_is_hit)
574
575 with m.If(req_is_hit):
576 sync += r.hit_way.eq(req_hit_way)
577 sync += Display(
578 "cache hit nia:%x IR:%x SM:%x idx:%x tag:%x " \
579 "way:%x RA:%x", i_in.nia, i_in.virt_mode, \
580 i_in.stop_mark, req_index, req_tag, \
581 req_hit_way, real_addr
582 )
583
584
585
586 with m.If(~stall_in):
587 # Send stop marks and NIA down regardless of validity
588 sync += r.hit_smark.eq(i_in.stop_mark)
589 sync += r.hit_nia.eq(i_in.nia)
590
591 def icache_miss_idle(self, m, r, req_is_miss, req_laddr,
592 req_index, req_tag, replace_way, real_addr):
593 comb = m.d.comb
594 sync = m.d.sync
595
596 i_in = self.i_in
597
598 # Reset per-row valid flags, only used in WAIT_ACK
599 for i in range(ROW_PER_LINE):
600 sync += r.rows_valid[i].eq(0)
601
602 # We need to read a cache line
603 with m.If(req_is_miss):
604 sync += Display(
605 "cache miss nia:%x IR:%x SM:%x idx:%x "
606 " way:%x tag:%x RA:%x", i_in.nia,
607 i_in.virt_mode, i_in.stop_mark, req_index,
608 replace_way, req_tag, real_addr
609 )
610
611 # Keep track of our index and way for subsequent stores
612 st_row = Signal(ROW_BITS)
613 comb += st_row.eq(get_row(req_laddr))
614 sync += r.store_index.eq(req_index)
615 sync += r.store_row.eq(st_row)
616 sync += r.store_tag.eq(req_tag)
617 sync += r.store_valid.eq(1)
618 sync += r.end_row_ix.eq(get_row_of_line(st_row) - 1)
619
620 # Prep for first wishbone read. We calculate the address
621 # of the start of the cache line and start the WB cycle.
622 sync += r.req_adr.eq(req_laddr)
623 sync += r.wb.cyc.eq(1)
624 sync += r.wb.stb.eq(1)
625
626 # Track that we had one request sent
627 sync += r.state.eq(State.CLR_TAG)
628
629 def icache_miss_clr_tag(self, m, r, replace_way,
630 cache_valid_bits, req_index,
631 tagset, cache_tags):
632
633 comb = m.d.comb
634 sync = m.d.sync
635
636 # Get victim way from plru
637 sync += r.store_way.eq(replace_way)
638 # Force misses on that way while reloading that line
639 cv = Signal(INDEX_BITS)
640 comb += cv.eq(cache_valid_bits[req_index])
641 comb += cv.bit_select(replace_way, 1).eq(0)
642 sync += cache_valid_bits[req_index].eq(cv)
643
644 for i in range(NUM_WAYS):
645 with m.If(i == replace_way):
646 comb += tagset.eq(cache_tags[r.store_index])
647 comb += write_tag(i, tagset, r.store_tag)
648 sync += cache_tags[r.store_index].eq(tagset)
649
650 sync += r.state.eq(State.WAIT_ACK)
651
652 def icache_miss_wait_ack(self, m, r, replace_way, inval_in,
653 stbs_done, cache_valid_bits):
654 comb = m.d.comb
655 sync = m.d.sync
656
657 bus = self.bus
658
659 # Requests are all sent if stb is 0
660 stbs_zero = Signal()
661 comb += stbs_zero.eq(r.wb.stb == 0)
662 comb += stbs_done.eq(stbs_zero)
663
664 # If we are still sending requests, was one accepted?
665 with m.If(~bus.stall & ~stbs_zero):
666 # That was the last word? We are done sending.
667 # Clear stb and set stbs_done so we can handle
668 # an eventual last ack on the same cycle.
669 with m.If(is_last_row_addr(r.req_adr, r.end_row_ix)):
670 sync += Display(
671 "IS_LAST_ROW_ADDR r.wb.addr:%x " \
672 "r.end_row_ix:%x r.wb.stb:%x stbs_zero:%x " \
673 "stbs_done:%x", r.wb.adr, r.end_row_ix,
674 r.wb.stb, stbs_zero, stbs_done
675 )
676 sync += r.wb.stb.eq(0)
677 comb += stbs_done.eq(1)
678
679 # Calculate the next row address
680 rarange = Signal(LINE_OFF_BITS - ROW_OFF_BITS)
681 comb += rarange.eq(r.req_adr[ROW_OFF_BITS:LINE_OFF_BITS] + 1)
682 sync += r.req_adr[ROW_OFF_BITS:LINE_OFF_BITS].eq(rarange)
683 sync += Display("RARANGE r.req_adr:%x rarange:%x "
684 "stbs_zero:%x stbs_done:%x",
685 r.req_adr, rarange, stbs_zero, stbs_done)
686
687 # Incoming acks processing
688 with m.If(bus.ack):
689 sync += Display("WB_IN_ACK data:%x stbs_zero:%x "
690 "stbs_done:%x",
691 bus.dat_r, stbs_zero, stbs_done)
692
693 sync += r.rows_valid[r.store_row % ROW_PER_LINE].eq(1)
694
695 # Check for completion
696 with m.If(stbs_done & is_last_row(r.store_row, r.end_row_ix)):
697 # Complete wishbone cycle
698 sync += r.wb.cyc.eq(0)
699 # be nice, clear addr
700 sync += r.req_adr.eq(0)
701
702 # Cache line is now valid
703 cv = Signal(INDEX_BITS)
704 comb += cv.eq(cache_valid_bits[r.store_index])
705 comb += cv.bit_select(replace_way, 1).eq(
706 r.store_valid & ~inval_in
707 )
708 sync += cache_valid_bits[r.store_index].eq(cv)
709
710 sync += r.state.eq(State.IDLE)
711
712 # move on to next request in row
713 # Increment store row counter
714 sync += r.store_row.eq(next_row(r.store_row))
715
716 # Cache miss/reload synchronous machine
717 def icache_miss(self, m, cache_valid_bits, r, req_is_miss,
718 req_index, req_laddr, req_tag, replace_way,
719 cache_tags, access_ok, real_addr):
720 comb = m.d.comb
721 sync = m.d.sync
722
723 i_in, bus, m_in = self.i_in, self.bus, self.m_in
724 stall_in, flush_in = self.stall_in, self.flush_in
725 inval_in = self.inval_in
726
727 tagset = Signal(TAG_RAM_WIDTH)
728 stbs_done = Signal()
729
730 comb += r.wb.sel.eq(-1)
731 comb += r.wb.adr.eq(r.req_adr[3:])
732
733 # Process cache invalidations
734 with m.If(inval_in):
735 for i in range(NUM_LINES):
736 sync += cache_valid_bits[i].eq(0)
737 sync += r.store_valid.eq(0)
738
739 # Main state machine
740 with m.Switch(r.state):
741
742 with m.Case(State.IDLE):
743 self.icache_miss_idle(
744 m, r, req_is_miss, req_laddr,
745 req_index, req_tag, replace_way,
746 real_addr
747 )
748
749 with m.Case(State.CLR_TAG, State.WAIT_ACK):
750 with m.If(r.state == State.CLR_TAG):
751 self.icache_miss_clr_tag(
752 m, r, replace_way,
753 cache_valid_bits, req_index,
754 tagset, cache_tags
755 )
756
757 self.icache_miss_wait_ack(
758 m, r, replace_way, inval_in,
759 stbs_done, cache_valid_bits
760 )
761
762 # TLB miss and protection fault processing
763 with m.If(flush_in | m_in.tlbld):
764 sync += r.fetch_failed.eq(0)
765 with m.Elif(i_in.req & ~access_ok & ~stall_in):
766 sync += r.fetch_failed.eq(1)
767
768 # icache_log: if LOG_LENGTH > 0 generate
769 def icache_log(self, m, req_hit_way, ra_valid, access_ok,
770 req_is_miss, req_is_hit, lway, wstate, r):
771 comb = m.d.comb
772 sync = m.d.sync
773
774 bus, i_out = self.bus, self.i_out
775 log_out, stall_out = self.log_out, self.stall_out
776
777 # Output data to logger
778 for i in range(LOG_LENGTH):
779 log_data = Signal(54)
780 lway = Signal(WAY_BITS)
781 wstate = Signal()
782
783 sync += lway.eq(req_hit_way)
784 sync += wstate.eq(0)
785
786 with m.If(r.state != State.IDLE):
787 sync += wstate.eq(1)
788
789 sync += log_data.eq(Cat(
790 ra_valid, access_ok, req_is_miss, req_is_hit,
791 lway, wstate, r.hit_nia[2:6], r.fetch_failed,
792 stall_out, bus.stall, r.wb.cyc, r.wb.stb,
793 r.real_addr[3:6], bus.ack, i_out.insn, i_out.valid
794 ))
795 comb += log_out.eq(log_data)
796
797 def elaborate(self, platform):
798
799 m = Module()
800 comb = m.d.comb
801
802 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
803 cache_tags = CacheTagArray()
804 cache_valid_bits = CacheValidBitsArray()
805
806 itlb_valid_bits = TLBValidBitsArray()
807 itlb_tags = TLBTagArray()
808 itlb_ptes = TLBPtesArray()
809 # TODO to be passed to nmigen as ram attributes
810 # attribute ram_style of itlb_tags : signal is "distributed";
811 # attribute ram_style of itlb_ptes : signal is "distributed";
812
813 # Privilege bit from PTE EAA field
814 eaa_priv = Signal()
815
816 r = RegInternal()
817
818 # Async signal on incoming request
819 req_index = Signal(INDEX_BITS)
820 req_row = Signal(ROW_BITS)
821 req_hit_way = Signal(WAY_BITS)
822 req_tag = Signal(TAG_BITS)
823 req_is_hit = Signal()
824 req_is_miss = Signal()
825 req_laddr = Signal(64)
826
827 tlb_req_index = Signal(TLB_SIZE)
828 real_addr = Signal(REAL_ADDR_BITS)
829 ra_valid = Signal()
830 priv_fault = Signal()
831 access_ok = Signal()
832 use_previous = Signal()
833
834 cache_out_row = Signal(ROW_SIZE_BITS)
835
836 plru_victim = PLRUOut()
837 replace_way = Signal(WAY_BITS)
838
839 # fake-up the wishbone stall signal to comply with pipeline mode
840 # same thing is done in dcache.py
841 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
842
843 # call sub-functions putting everything together,
844 # using shared signals established above
845 self.rams(m, r, cache_out_row, use_previous, replace_way, req_row)
846 self.maybe_plrus(m, r, plru_victim)
847 self.itlb_lookup(m, tlb_req_index, itlb_ptes, itlb_tags, real_addr,
848 itlb_valid_bits, ra_valid, eaa_priv, priv_fault,
849 access_ok)
850 self.itlb_update(m, itlb_valid_bits, itlb_tags, itlb_ptes)
851 self.icache_comb(m, use_previous, r, req_index, req_row, req_hit_way,
852 req_tag, real_addr, req_laddr, cache_valid_bits,
853 cache_tags, access_ok, req_is_hit, req_is_miss,
854 replace_way, plru_victim, cache_out_row)
855 self.icache_hit(m, use_previous, r, req_is_hit, req_hit_way,
856 req_index, req_tag, real_addr)
857 self.icache_miss(m, cache_valid_bits, r, req_is_miss, req_index,
858 req_laddr, req_tag, replace_way, cache_tags,
859 access_ok, real_addr)
860 #self.icache_log(m, log_out, req_hit_way, ra_valid, access_ok,
861 # req_is_miss, req_is_hit, lway, wstate, r)
862
863 return m
864
865
866 def icache_sim(dut):
867 i_in = dut.i_in
868 i_out = dut.i_out
869 m_out = dut.m_in
870
871 yield i_in.priv_mode.eq(1)
872 yield i_in.req.eq(0)
873 yield i_in.nia.eq(0)
874 yield i_in.stop_mark.eq(0)
875 yield m_out.tlbld.eq(0)
876 yield m_out.tlbie.eq(0)
877 yield m_out.addr.eq(0)
878 yield m_out.pte.eq(0)
879 yield
880 yield
881 yield
882 yield
883
884 # miss, stalls for a bit
885 yield i_in.req.eq(1)
886 yield i_in.nia.eq(Const(0x0000000000000004, 64))
887 yield
888 valid = yield i_out.valid
889 while not valid:
890 yield
891 valid = yield i_out.valid
892 yield i_in.req.eq(0)
893
894 insn = yield i_out.insn
895 nia = yield i_out.nia
896 assert insn == 0x00000001, \
897 "insn @%x=%x expected 00000001" % (nia, insn)
898 yield i_in.req.eq(0)
899 yield
900
901 # hit
902 yield i_in.req.eq(1)
903 yield i_in.nia.eq(Const(0x0000000000000008, 64))
904 yield
905 valid = yield i_out.valid
906 while not valid:
907 yield
908 valid = yield i_out.valid
909 yield i_in.req.eq(0)
910
911 nia = yield i_out.nia
912 insn = yield i_out.insn
913 yield
914 assert insn == 0x00000002, \
915 "insn @%x=%x expected 00000002" % (nia, insn)
916
917 # another miss
918 yield i_in.req.eq(1)
919 yield i_in.nia.eq(Const(0x0000000000000040, 64))
920 yield
921 valid = yield i_out.valid
922 while not valid:
923 yield
924 valid = yield i_out.valid
925 yield i_in.req.eq(0)
926
927 nia = yield i_in.nia
928 insn = yield i_out.insn
929 assert insn == 0x00000010, \
930 "insn @%x=%x expected 00000010" % (nia, insn)
931
932 # test something that aliases (this only works because
933 # the unit test SRAM is a depth of 512)
934 yield i_in.req.eq(1)
935 yield i_in.nia.eq(Const(0x0000000000000100, 64))
936 yield
937 yield
938 valid = yield i_out.valid
939 assert ~valid
940 for i in range(30):
941 yield
942 yield
943 insn = yield i_out.insn
944 valid = yield i_out.valid
945 insn = yield i_out.insn
946 assert valid
947 assert insn == 0x00000040, \
948 "insn @%x=%x expected 00000040" % (nia, insn)
949 yield i_in.req.eq(0)
950
951
952
953 def test_icache(mem):
954 dut = ICache()
955
956 memory = Memory(width=64, depth=512, init=mem)
957 sram = SRAM(memory=memory, granularity=8)
958
959 m = Module()
960
961 m.submodules.icache = dut
962 m.submodules.sram = sram
963
964 m.d.comb += sram.bus.cyc.eq(dut.bus.cyc)
965 m.d.comb += sram.bus.stb.eq(dut.bus.stb)
966 m.d.comb += sram.bus.we.eq(dut.bus.we)
967 m.d.comb += sram.bus.sel.eq(dut.bus.sel)
968 m.d.comb += sram.bus.adr.eq(dut.bus.adr)
969 m.d.comb += sram.bus.dat_w.eq(dut.bus.dat_w)
970
971 m.d.comb += dut.bus.ack.eq(sram.bus.ack)
972 m.d.comb += dut.bus.dat_r.eq(sram.bus.dat_r)
973
974 # nmigen Simulation
975 sim = Simulator(m)
976 sim.add_clock(1e-6)
977
978 sim.add_sync_process(wrap(icache_sim(dut)))
979 with sim.write_vcd('test_icache.vcd'):
980 sim.run()
981
982 if __name__ == '__main__':
983 dut = ICache()
984 vl = rtlil.convert(dut, ports=[])
985 with open("test_icache.il", "w") as f:
986 f.write(vl)
987
988 # set up memory every 32-bits with incrementing values 0 1 2 ...
989 mem = []
990 for i in range(512):
991 mem.append((i*2) | ((i*2+1)<<32))
992
993 test_icache(mem)
994