add fabric compatibility mode
[soc.git] / src / soc / experiment / icache.py
1 """ICache
2
3 based on Anton Blanchard microwatt icache.vhdl
4
5 Set associative icache
6
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
20
21 Links:
22
23 * https://bugs.libre-soc.org/show_bug.cgi?id=485
24 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
25 (discussion about brams for ECP5)
26
27 """
28
29 from enum import (Enum, unique)
30 from nmigen import (Module, Signal, Elaboratable, Cat, Array, Const, Repl,
31 Record)
32 from nmigen.cli import main, rtlil
33 from nmutil.iocontrol import RecordObject
34 from nmigen.utils import log2_int
35 from nmigen.lib.coding import Decoder
36 from nmutil.util import Display
37 from nmutil.latch import SRLatch
38
39 #from nmutil.plru import PLRU
40 from soc.experiment.plru import PLRU, PLRUs
41 from soc.experiment.cache_ram import CacheRam
42
43 from soc.experiment.mem_types import (Fetch1ToICacheType,
44 ICacheToDecode1Type,
45 MMUToICacheType)
46
47 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS,
48 WB_SEL_BITS, WBAddrType, WBDataType,
49 WBSelType, WBMasterOut, WBSlaveOut,
50 )
51
52 from nmigen_soc.wishbone.bus import Interface
53 from soc.minerva.units.fetch import FetchUnitInterface
54
55
56 # for test
57 from soc.bus.sram import SRAM
58 from nmigen import Memory
59 from nmutil.util import wrap
60 from nmigen.cli import main, rtlil
61
62 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
63 # Also, check out the cxxsim nmigen branch, and latest yosys from git
64 from nmutil.sim_tmp_alternative import Simulator, Settle
65
66 # from microwatt/utils.vhdl
67 def ispow2(n):
68 return n != 0 and (n & (n - 1)) == 0
69
70 SIM = 0
71 # Non-zero to enable log data collection
72 LOG_LENGTH = 0
73
74 class ICacheConfig:
75 def __init__(self, XLEN = 64,
76 LINE_SIZE = 64,
77 NUM_LINES = 64, # Number of lines in a set
78 NUM_WAYS = 2, # Number of ways
79 TLB_SIZE = 64, # L1 ITLB number of entries
80 TLB_LG_PGSZ = 12): # L1 ITLB log_2(page_size)
81 self.XLEN = XLEN
82 self.LINE_SIZE = LINE_SIZE
83 self.NUM_LINES = NUM_LINES
84 self.NUM_WAYS = NUM_WAYS
85 self.TLB_SIZE = TLB_SIZE
86 self.TLB_LG_PGSZ = TLB_LG_PGSZ
87
88 # BRAM organisation: We never access more than wishbone_data_bits
89 # at a time so to save resources we make the array only that wide,
90 # and use consecutive indices for to make a cache "line"
91 #
92 # self.ROW_SIZE is the width in bytes of the BRAM
93 # (based on WB, so 64-bits)
94 self.ROW_SIZE = WB_DATA_BITS // 8
95 # Number of real address bits that we store
96 self.REAL_ADDR_BITS = XLEN-8 # 56 for XLEN=64
97
98 self.ROW_SIZE_BITS = self.ROW_SIZE * 8
99 # ROW_PER_LINE is the number of row (wishbone) transactions in a line
100 self.ROW_PER_LINE = self.LINE_SIZE // self.ROW_SIZE
101 # BRAM_ROWS is the number of rows in BRAM
102 # needed to represent the full icache
103 self.BRAM_ROWS = self.NUM_LINES * self.ROW_PER_LINE
104 # INSN_PER_ROW is the number of 32bit instructions per BRAM row
105 self.INSN_PER_ROW = self.ROW_SIZE_BITS // 32
106
107 # Bit fields counts in the address
108 #
109 # INSN_BITS is the number of bits to select an instruction in a row
110 self.INSN_BITS = log2_int(self.INSN_PER_ROW)
111 # ROW_BITS is the number of bits to select a row
112 self.ROW_BITS = log2_int(self.BRAM_ROWS)
113 # ROW_LINE_BITS is the number of bits to select a row within a line
114 self.ROW_LINE_BITS = log2_int(self.ROW_PER_LINE)
115 # LINE_OFF_BITS is the number of bits for the offset in a cache line
116 self.LINE_OFF_BITS = log2_int(self.LINE_SIZE)
117 # ROW_OFF_BITS is the number of bits for the offset in a row
118 self.ROW_OFF_BITS = log2_int(self.ROW_SIZE)
119 # INDEX_BITS is the number of bits to select a cache line
120 self.INDEX_BITS = log2_int(self.NUM_LINES)
121 # SET_SIZE_BITS is the log base 2 of the set size
122 self.SET_SIZE_BITS = self.LINE_OFF_BITS + self.INDEX_BITS
123 # TAG_BITS is the number of bits of the tag part of the address
124 self.TAG_BITS = self.REAL_ADDR_BITS - self.SET_SIZE_BITS
125 # TAG_WIDTH is the width in bits of each way of the tag RAM
126 self.TAG_WIDTH = self.TAG_BITS + 7 - ((self.TAG_BITS + 7) % 8)
127
128 # WAY_BITS is the number of bits to select a way
129 self.WAY_BITS = log2_int(self.NUM_WAYS)
130 self.TAG_RAM_WIDTH = self.TAG_BITS * self.NUM_WAYS
131
132 # L1 ITLB
133 self.TL_BITS = log2_int(self.TLB_SIZE)
134 self.TLB_EA_TAG_BITS = XLEN - (self.TLB_LG_PGSZ + self.TL_BITS)
135 self.TLB_PTE_BITS = XLEN
136
137 print("self.XLEN =", self.XLEN)
138 print("self.BRAM_ROWS =", self.BRAM_ROWS)
139 print("self.INDEX_BITS =", self.INDEX_BITS)
140 print("self.INSN_BITS =", self.INSN_BITS)
141 print("self.INSN_PER_ROW =", self.INSN_PER_ROW)
142 print("self.LINE_SIZE =", self.LINE_SIZE)
143 print("self.LINE_OFF_BITS =", self.LINE_OFF_BITS)
144 print("LOG_LENGTH =", LOG_LENGTH)
145 print("self.NUM_LINES =", self.NUM_LINES)
146 print("self.NUM_WAYS =", self.NUM_WAYS)
147 print("self.REAL_ADDR_BITS =", self.REAL_ADDR_BITS)
148 print("self.ROW_BITS =", self.ROW_BITS)
149 print("self.ROW_OFF_BITS =", self.ROW_OFF_BITS)
150 print("self.ROW_LINE_BITS =", self.ROW_LINE_BITS)
151 print("self.ROW_PER_LINE =", self.ROW_PER_LINE)
152 print("self.ROW_SIZE =", self.ROW_SIZE)
153 print("self.ROW_SIZE_BITS =", self.ROW_SIZE_BITS)
154 print("self.SET_SIZE_BITS =", self.SET_SIZE_BITS)
155 print("SIM =", SIM)
156 print("self.TAG_BITS =", self.TAG_BITS)
157 print("self.TAG_RAM_WIDTH =", self.TAG_RAM_WIDTH)
158 print("self.TAG_BITS =", self.TAG_BITS)
159 print("self.TL_BITS =", self.TL_BITS)
160 print("self.TLB_EA_TAG_BITS =", self.TLB_EA_TAG_BITS)
161 print("self.TLB_LG_PGSZ =", self.TLB_LG_PGSZ)
162 print("self.TLB_PTE_BITS =", self.TLB_PTE_BITS)
163 print("self.TLB_SIZE =", self.TLB_SIZE)
164 print("self.WAY_BITS =", self.WAY_BITS)
165 print()
166
167 assert self.LINE_SIZE % self.ROW_SIZE == 0
168 assert ispow2(self.LINE_SIZE), "self.LINE_SIZE not power of 2"
169 assert ispow2(self.NUM_LINES), "self.NUM_LINES not power of 2"
170 assert ispow2(self.ROW_PER_LINE), "self.ROW_PER_LINE not power of 2"
171 assert ispow2(self.INSN_PER_ROW), "self.INSN_PER_ROW not power of 2"
172 assert (self.ROW_BITS == (self.INDEX_BITS + self.ROW_LINE_BITS)), \
173 "geometry bits don't add up"
174 assert (self.LINE_OFF_BITS ==
175 (self.ROW_OFF_BITS + self.ROW_LINE_BITS)), \
176 "geometry bits don't add up"
177 assert (self.REAL_ADDR_BITS ==
178 (self.TAG_BITS + self.INDEX_BITS + self.LINE_OFF_BITS)), \
179 "geometry bits don't add up"
180 assert (self.REAL_ADDR_BITS ==
181 (self.TAG_BITS + self.ROW_BITS + self.ROW_OFF_BITS)), \
182 "geometry bits don't add up"
183
184 # Example of layout for 32 lines of 64 bytes:
185 #
186 # .. tag |index| line |
187 # .. | row | |
188 # .. | | | |00| zero (2)
189 # .. | | |-| | self.INSN_BITS (1)
190 # .. | |---| | self.ROW_LINE_BITS (3)
191 # .. | |--- - --| self.LINE_OFF_BITS (6)
192 # .. | |- --| self.ROW_OFF_BITS (3)
193 # .. |----- ---| | self.ROW_BITS (8)
194 # .. |-----| | self.INDEX_BITS (5)
195 # .. --------| | self.TAG_BITS (53)
196
197 # The cache data BRAM organized as described above for each way
198 #subtype cache_row_t is std_ulogic_vector(self.ROW_SIZE_BITS-1 downto 0);
199 #
200 def RowPerLineValidArray(self):
201 return Array(Signal(name="rows_valid_%d" %x) \
202 for x in range(self.ROW_PER_LINE))
203
204
205 # TODO to be passed to nigmen as ram attributes
206 # attribute ram_style : string;
207 # attribute ram_style of cache_tags : signal is "distributed";
208
209 def TLBRecord(self, name):
210 tlb_layout = [ ('tag', self.TLB_EA_TAG_BITS),
211 ('pte', self.TLB_PTE_BITS)
212 ]
213 return Record(tlb_layout, name=name)
214
215 def TLBArray(self):
216 return Array(self.TLBRecord("tlb%d" % x) for x in range(self.TLB_SIZE))
217
218 # PLRU output interface
219 def PLRUOut(self):
220 return Array(Signal(self.WAY_BITS, name="plru_out_%d" %x) \
221 for x in range(self.NUM_LINES))
222
223 # Return the cache line index (tag index) for an address
224 def get_index(self, addr):
225 return addr[self.LINE_OFF_BITS:self.SET_SIZE_BITS]
226
227 # Return the cache row index (data memory) for an address
228 def get_row(self, addr):
229 return addr[self.ROW_OFF_BITS:self.SET_SIZE_BITS]
230
231 # Return the index of a row within a line
232 def get_row_of_line(self, row):
233 return row[:self.ROW_BITS][:self.ROW_LINE_BITS]
234
235 # Returns whether this is the last row of a line
236 def is_last_row_addr(self, addr, last):
237 return addr[self.ROW_OFF_BITS:self.LINE_OFF_BITS] == last
238
239 # Returns whether this is the last row of a line
240 def is_last_row(self, row, last):
241 return self.get_row_of_line(row) == last
242
243 # Return the next row in the current cache line. We use a dedicated
244 # function in order to limit the size of the generated adder to be
245 # only the bits within a cache line (3 bits with default settings)
246 def next_row(self, row):
247 row_v = row[0:self.ROW_LINE_BITS] + 1
248 return Cat(row_v[:self.ROW_LINE_BITS], row[self.ROW_LINE_BITS:])
249
250 # Read the instruction word for the given address
251 # in the current cache row
252 def read_insn_word(self, addr, data):
253 word = addr[2:self.INSN_BITS+2]
254 return data.word_select(word, 32)
255
256 # Get the tag value from the address
257 def get_tag(self, addr):
258 return addr[self.SET_SIZE_BITS:self.REAL_ADDR_BITS]
259
260 # Read a tag from a tag memory row
261 def read_tag(self, way, tagset):
262 return tagset.word_select(way, self.TAG_BITS)
263
264 # Write a tag to tag memory row
265 def write_tag(self, way, tagset, tag):
266 return self.read_tag(way, tagset).eq(tag)
267
268 # Simple hash for direct-mapped TLB index
269 def hash_ea(self, addr):
270 hsh = (addr[self.TLB_LG_PGSZ:self.TLB_LG_PGSZ + self.TL_BITS] ^
271 addr[self.TLB_LG_PGSZ + self.TL_BITS:
272 self.TLB_LG_PGSZ + 2 * self.TL_BITS ] ^
273 addr[self.TLB_LG_PGSZ + 2 * self.TL_BITS:
274 self.TLB_LG_PGSZ + 3 * self.TL_BITS])
275 return hsh
276
277
278 # Cache reload state machine
279 @unique
280 class State(Enum):
281 IDLE = 0
282 CLR_TAG = 1
283 WAIT_ACK = 2
284
285
286 class RegInternal(RecordObject):
287 def __init__(self, cfg):
288 super().__init__()
289 # Cache hit state (Latches for 1 cycle BRAM access)
290 self.hit_way = Signal(cfg.WAY_BITS)
291 self.hit_nia = Signal(64)
292 self.hit_smark = Signal()
293 self.hit_valid = Signal()
294
295 # Cache miss state (reload state machine)
296 self.state = Signal(State, reset=State.IDLE)
297 self.wb = WBMasterOut("wb")
298 self.req_adr = Signal(64)
299 self.store_way = Signal(cfg.WAY_BITS)
300 self.store_index = Signal(cfg.INDEX_BITS)
301 self.store_row = Signal(cfg.ROW_BITS)
302 self.store_tag = Signal(cfg.TAG_BITS)
303 self.store_valid = Signal()
304 self.end_row_ix = Signal(cfg.ROW_LINE_BITS)
305 self.rows_valid = cfg.RowPerLineValidArray()
306
307 # TLB miss state
308 self.fetch_failed = Signal()
309
310
311 class ICache(FetchUnitInterface, Elaboratable, ICacheConfig):
312 """64 bit direct mapped icache. All instructions are 4B aligned."""
313 def __init__(self, pspec):
314 FetchUnitInterface.__init__(self, pspec)
315 self.i_in = Fetch1ToICacheType(name="i_in")
316 self.i_out = ICacheToDecode1Type(name="i_out")
317
318 self.m_in = MMUToICacheType(name="m_in")
319
320 self.stall_in = Signal()
321 self.stall_out = Signal()
322 self.flush_in = Signal()
323 self.inval_in = Signal()
324
325 # standard naming (wired to non-standard for compatibility)
326 self.bus = Interface(addr_width=32,
327 data_width=64,
328 granularity=8,
329 features={'stall'},
330 #alignment=0,
331 name="icache_wb")
332
333 self.log_out = Signal(54)
334
335 # use FetchUnitInterface, helps keep some unit tests running
336 self.use_fetch_iface = False
337
338 # test if small cache to be enabled
339 self.small_cache = (hasattr(pspec, "small_cache") and
340 (pspec.small_cache == True))
341 # test if microwatt compatibility to be enabled
342 self.microwatt_compat = (hasattr(pspec, "microwatt_compat") and
343 (pspec.microwatt_compat == True))
344 # test if fabric compatibility is to be enabled
345 self.fabric_compat = (hasattr(pspec, "fabric_compat") and
346 (pspec.fabric_compat == True))
347
348 XLEN = pspec.XLEN
349 LINE_SIZE = 64
350 TLB_SIZE = 8
351 NUM_LINES = 8
352 NUM_WAYS = 2
353 if self.small_cache:
354 # reduce way sizes and num lines to ridiculously small
355 NUM_LINES = 2
356 NUM_WAYS = 1
357 TLB_SIZE = 2
358 if self.microwatt_compat or self.fabric_compat:
359 # reduce way sizes
360 NUM_WAYS = 1
361
362 ICacheConfig.__init__(self, LINE_SIZE=LINE_SIZE,
363 XLEN=XLEN,
364 NUM_LINES = NUM_LINES,
365 NUM_WAYS = NUM_WAYS,
366 TLB_SIZE=TLB_SIZE
367 )
368
369 def use_fetch_interface(self):
370 self.use_fetch_iface = True
371
372 # Generate a cache RAM for each way
373 def rams(self, m, r, cache_out_row, use_previous,
374 replace_way, req_row):
375
376 comb = m.d.comb
377 sync = m.d.sync
378
379 bus, stall_in = self.bus, self.stall_in
380
381 # read condition (for every cache ram)
382 do_read = Signal()
383 comb += do_read.eq(~(stall_in | use_previous))
384
385 rd_addr = Signal(self.ROW_BITS)
386 wr_addr = Signal(self.ROW_BITS)
387 comb += rd_addr.eq(req_row)
388 comb += wr_addr.eq(r.store_row)
389
390 # binary-to-unary converters: replace-way enabled by bus.ack,
391 # hit-way left permanently enabled
392 m.submodules.replace_way_e = re = Decoder(self.NUM_WAYS)
393 m.submodules.hit_way_e = he = Decoder(self.NUM_WAYS)
394 comb += re.i.eq(replace_way)
395 comb += re.n.eq(~bus.ack)
396 comb += he.i.eq(r.hit_way)
397
398 for i in range(self.NUM_WAYS):
399 do_write = Signal(name="do_wr_%d" % i)
400 d_out = Signal(self.ROW_SIZE_BITS, name="d_out_%d" % i)
401 wr_sel = Signal(self.ROW_SIZE, name="wr_sel_%d" % i)
402
403 way = CacheRam(self.ROW_BITS, self.ROW_SIZE_BITS,
404 TRACE=True, ram_num=i)
405 m.submodules["cacheram_%d" % i] = way
406
407 comb += way.rd_en.eq(do_read)
408 comb += way.rd_addr.eq(rd_addr)
409 comb += d_out.eq(way.rd_data_o)
410 comb += way.wr_sel.eq(wr_sel)
411 comb += way.wr_addr.eq(wr_addr)
412 comb += way.wr_data.eq(bus.dat_r)
413
414 comb += do_write.eq(re.o[i])
415
416 with m.If(do_write):
417 sync += Display("cache write adr: %x data: %lx",
418 wr_addr, way.wr_data)
419
420 with m.If(he.o[i]):
421 comb += cache_out_row.eq(d_out)
422 with m.If(do_read):
423 sync += Display("cache read adr: %x data: %x",
424 req_row, d_out)
425
426 comb += wr_sel.eq(Repl(do_write, self.ROW_SIZE))
427
428 # Generate PLRUs
429 def maybe_plrus(self, m, r, plru_victim):
430 comb = m.d.comb
431
432 if self.NUM_WAYS == 0:
433 return
434
435
436 m.submodules.plrus = plru = PLRUs("itag", self.NUM_LINES,
437 self.WAY_BITS)
438 comb += plru.way.eq(r.hit_way)
439 comb += plru.valid.eq(r.hit_valid)
440 comb += plru.index.eq(self.get_index(r.hit_nia))
441 comb += plru.isel.eq(r.store_index) # select victim
442 comb += plru_victim.eq(plru.o_index) # selected victim
443
444 # TLB hit detection and real address generation
445 def itlb_lookup(self, m, tlb_req_index, itlb, itlb_valid,
446 real_addr, ra_valid, eaa_priv,
447 priv_fault, access_ok):
448
449 comb = m.d.comb
450
451 i_in = self.i_in
452
453 # use an *asynchronous* Memory read port here (combinatorial)
454 m.submodules.rd_tlb = rd_tlb = self.tlbmem.read_port(domain="comb")
455 tlb = self.TLBRecord("tlb_rdport")
456 pte, ttag = tlb.pte, tlb.tag
457
458 comb += tlb_req_index.eq(self.hash_ea(i_in.nia))
459 comb += rd_tlb.addr.eq(tlb_req_index)
460 comb += tlb.eq(rd_tlb.data)
461
462 with m.If(i_in.virt_mode):
463 comb += real_addr.eq(Cat(i_in.nia[:self.TLB_LG_PGSZ],
464 pte[self.TLB_LG_PGSZ:self.REAL_ADDR_BITS]))
465
466 with m.If(ttag == i_in.nia[self.TLB_LG_PGSZ + self.TL_BITS:64]):
467 comb += ra_valid.eq(itlb_valid.q.bit_select(tlb_req_index, 1))
468
469 comb += eaa_priv.eq(pte[3])
470
471 with m.Else():
472 comb += real_addr.eq(i_in.nia[:self.REAL_ADDR_BITS])
473 comb += ra_valid.eq(1)
474 comb += eaa_priv.eq(1)
475
476 # No IAMR, so no KUEP support for now
477 comb += priv_fault.eq(eaa_priv & ~i_in.priv_mode)
478 comb += access_ok.eq(ra_valid & ~priv_fault)
479
480 # iTLB update
481 def itlb_update(self, m, itlb, itlb_valid):
482 comb = m.d.comb
483 sync = m.d.sync
484
485 m_in = self.m_in
486
487 wr_index = Signal(self.TL_BITS)
488 wr_unary = Signal(self.TLB_SIZE)
489 comb += wr_index.eq(self.hash_ea(m_in.addr))
490 comb += wr_unary.eq(1<<wr_index)
491
492 m.submodules.wr_tlb = wr_tlb = self.tlbmem.write_port()
493 sync += itlb_valid.s.eq(0)
494 sync += itlb_valid.r.eq(0)
495
496 with m.If(m_in.tlbie & m_in.doall):
497 # Clear all valid bits
498 sync += itlb_valid.r.eq(-1)
499
500 with m.Elif(m_in.tlbie):
501 # Clear entry regardless of hit or miss
502 sync += itlb_valid.r.eq(wr_unary)
503
504 with m.Elif(m_in.tlbld):
505 tlb = self.TLBRecord("tlb_wrport")
506 comb += tlb.tag.eq(m_in.addr[self.TLB_LG_PGSZ + self.TL_BITS:64])
507 comb += tlb.pte.eq(m_in.pte)
508 comb += wr_tlb.en.eq(1)
509 comb += wr_tlb.addr.eq(wr_index)
510 comb += wr_tlb.data.eq(tlb)
511 sync += itlb_valid.s.eq(wr_unary)
512
513 # Cache hit detection, output to fetch2 and other misc logic
514 def icache_comb(self, m, use_previous, r, req_index, req_row,
515 req_hit_way, req_tag, real_addr, req_laddr,
516 cache_valids, access_ok,
517 req_is_hit, req_is_miss, replace_way,
518 plru_victim, cache_out_row):
519
520 comb = m.d.comb
521 m.submodules.rd_tag = rd_tag = self.tagmem.read_port(domain="comb")
522
523 i_in, i_out, bus = self.i_in, self.i_out, self.bus
524 flush_in, stall_out = self.flush_in, self.stall_out
525
526 is_hit = Signal()
527 hit_way = Signal(self.WAY_BITS)
528
529 # i_in.sequential means that i_in.nia this cycle is 4 more than
530 # last cycle. If we read more than 32 bits at a time, had a
531 # cache hit last cycle, and we don't want the first 32-bit chunk
532 # then we can keep the data we read last cycle and just use that.
533 with m.If(i_in.nia[2:self.INSN_BITS+2] != 0):
534 comb += use_previous.eq(i_in.sequential & r.hit_valid)
535
536 # Extract line, row and tag from request
537 comb += req_index.eq(self.get_index(i_in.nia))
538 comb += req_row.eq(self.get_row(i_in.nia))
539 comb += req_tag.eq(self.get_tag(real_addr))
540
541 # Calculate address of beginning of cache row, will be
542 # used for cache miss processing if needed
543 comb += req_laddr.eq(Cat(
544 Const(0, self.ROW_OFF_BITS),
545 real_addr[self.ROW_OFF_BITS:self.REAL_ADDR_BITS],
546 ))
547
548 # Test if pending request is a hit on any way
549 hitcond = Signal()
550 rowvalid = Signal()
551 comb += rowvalid.eq(r.rows_valid[req_row % self.ROW_PER_LINE])
552 comb += hitcond.eq((r.state == State.WAIT_ACK) &
553 (req_index == r.store_index) &
554 rowvalid
555 )
556 # i_in.req asserts Decoder active
557 cvb = Signal(self.NUM_WAYS)
558 ctag = Signal(self.TAG_RAM_WIDTH)
559 comb += rd_tag.addr.eq(req_index)
560 comb += ctag.eq(rd_tag.data)
561 comb += cvb.eq(cache_valids.q.word_select(req_index, self.NUM_WAYS))
562 m.submodules.store_way_e = se = Decoder(self.NUM_WAYS)
563 comb += se.i.eq(r.store_way)
564 comb += se.n.eq(~i_in.req)
565 for i in range(self.NUM_WAYS):
566 tagi = Signal(self.TAG_BITS, name="tag_i%d" % i)
567 hit_test = Signal(name="hit_test%d" % i)
568 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
569 comb += tagi.eq(self.read_tag(i, ctag))
570 comb += hit_test.eq(se.o[i])
571 comb += is_tag_hit.eq((cvb[i] | (hitcond & hit_test)) &
572 (tagi == req_tag))
573 with m.If(is_tag_hit):
574 comb += hit_way.eq(i)
575 comb += is_hit.eq(1)
576
577 # Generate the "hit" and "miss" signals
578 # for the synchronous blocks
579 with m.If(i_in.req & access_ok & ~flush_in):
580 comb += req_is_hit.eq(is_hit)
581 comb += req_is_miss.eq(~is_hit)
582
583 comb += req_hit_way.eq(hit_way)
584
585 # The way to replace on a miss
586 with m.If(r.state == State.CLR_TAG):
587 comb += replace_way.eq(plru_victim)
588 with m.Else():
589 comb += replace_way.eq(r.store_way)
590
591 # Output instruction from current cache row
592 #
593 # Note: This is a mild violation of our design principle of
594 # having pipeline stages output from a clean latch. In this
595 # case we output the result of a mux. The alternative would
596 # be output an entire row which I prefer not to do just yet
597 # as it would force fetch2 to know about some of the cache
598 # geometry information.
599 comb += i_out.insn.eq(self.read_insn_word(r.hit_nia, cache_out_row))
600 comb += i_out.valid.eq(r.hit_valid)
601 comb += i_out.nia.eq(r.hit_nia)
602 comb += i_out.stop_mark.eq(r.hit_smark)
603 comb += i_out.fetch_failed.eq(r.fetch_failed)
604
605 # Stall fetch1 if we have a miss on cache or TLB
606 # or a protection fault
607 comb += stall_out.eq(~(is_hit & access_ok))
608
609 # Wishbone requests output (from the cache miss reload machine)
610 comb += bus.we.eq(r.wb.we)
611 comb += bus.adr.eq(r.wb.adr)
612 comb += bus.sel.eq(r.wb.sel)
613 comb += bus.stb.eq(r.wb.stb)
614 comb += bus.dat_w.eq(r.wb.dat)
615 comb += bus.cyc.eq(r.wb.cyc)
616
617 # Cache hit synchronous machine
618 def icache_hit(self, m, use_previous, r, req_is_hit, req_hit_way,
619 req_index, req_tag, real_addr):
620 sync = m.d.sync
621
622 i_in, stall_in = self.i_in, self.stall_in
623 flush_in = self.flush_in
624
625 # keep outputs to fetch2 unchanged on a stall
626 # except that flush or reset sets valid to 0
627 # If use_previous, keep the same data as last
628 # cycle and use the second half
629 with m.If(stall_in | use_previous):
630 with m.If(flush_in):
631 sync += r.hit_valid.eq(0)
632 with m.Else():
633 # On a hit, latch the request for the next cycle,
634 # when the BRAM data will be available on the
635 # cache_out output of the corresponding way
636 sync += r.hit_valid.eq(req_is_hit)
637
638 with m.If(req_is_hit):
639 sync += r.hit_way.eq(req_hit_way)
640 sync += Display("cache hit nia:%x IR:%x SM:%x idx:%x tag:%x "
641 "way:%x RA:%x", i_in.nia, i_in.virt_mode,
642 i_in.stop_mark, req_index, req_tag,
643 req_hit_way, real_addr)
644
645 with m.If(~stall_in):
646 # Send stop marks and NIA down regardless of validity
647 sync += r.hit_smark.eq(i_in.stop_mark)
648 sync += r.hit_nia.eq(i_in.nia)
649
650 def icache_miss_idle(self, m, r, req_is_miss, req_laddr,
651 req_index, req_tag, replace_way, real_addr):
652 comb = m.d.comb
653 sync = m.d.sync
654
655 i_in = self.i_in
656
657 # Reset per-row valid flags, only used in WAIT_ACK
658 for i in range(self.ROW_PER_LINE):
659 sync += r.rows_valid[i].eq(0)
660
661 # We need to read a cache line
662 with m.If(req_is_miss):
663 sync += Display(
664 "cache miss nia:%x IR:%x SM:%x idx:%x "
665 " way:%x tag:%x RA:%x", i_in.nia,
666 i_in.virt_mode, i_in.stop_mark, req_index,
667 replace_way, req_tag, real_addr)
668
669 # Keep track of our index and way for subsequent stores
670 st_row = Signal(self.ROW_BITS)
671 comb += st_row.eq(self.get_row(req_laddr))
672 sync += r.store_index.eq(req_index)
673 sync += r.store_row.eq(st_row)
674 sync += r.store_tag.eq(req_tag)
675 sync += r.store_valid.eq(1)
676 sync += r.end_row_ix.eq(self.get_row_of_line(st_row) - 1)
677
678 # Prep for first wishbone read. We calculate the address
679 # of the start of the cache line and start the WB cycle.
680 sync += r.req_adr.eq(req_laddr)
681 sync += r.wb.cyc.eq(1)
682 sync += r.wb.stb.eq(1)
683
684 # Track that we had one request sent
685 sync += r.state.eq(State.CLR_TAG)
686
687 def icache_miss_clr_tag(self, m, r, replace_way,
688 req_index,
689 cache_valids):
690 comb = m.d.comb
691 sync = m.d.sync
692 m.submodules.wr_tag = wr_tag = self.tagmem.write_port(
693 granularity=self.TAG_BITS)
694
695 # Get victim way from plru
696 sync += r.store_way.eq(replace_way)
697
698 # Force misses on that way while reloading that line
699 idx = req_index*self.NUM_WAYS + replace_way # 2D index, 1st dim: self.NUM_WAYS
700 comb += cache_valids.r.eq(1<<idx)
701
702 # use write-port "granularity" to select the tag to write to
703 # TODO: the Memory should be multipled-up (by NUM_TAGS)
704 tagset = Signal(self.TAG_RAM_WIDTH)
705 comb += tagset.eq(r.store_tag << (replace_way*self.TAG_BITS))
706 comb += wr_tag.en.eq(1<<replace_way)
707 comb += wr_tag.addr.eq(r.store_index)
708 comb += wr_tag.data.eq(tagset)
709
710 sync += r.state.eq(State.WAIT_ACK)
711
712 def icache_miss_wait_ack(self, m, r, replace_way, inval_in,
713 cache_valids):
714 comb = m.d.comb
715 sync = m.d.sync
716
717 bus = self.bus
718
719 # If we are still sending requests, was one accepted?
720 with m.If(~bus.stall & r.wb.stb):
721 # That was the last word? We are done sending. Clear stb
722 with m.If(self.is_last_row_addr(r.req_adr, r.end_row_ix)):
723 sync += Display("IS_LAST_ROW_ADDR r.wb.addr:%x "
724 "r.end_row_ix:%x r.wb.stb:%x",
725 r.wb.adr, r.end_row_ix, r.wb.stb)
726 sync += r.wb.stb.eq(0)
727
728 # Calculate the next row address
729 rarange = Signal(self.LINE_OFF_BITS - self.ROW_OFF_BITS)
730 comb += rarange.eq(r.req_adr[self.ROW_OFF_BITS:
731 self.LINE_OFF_BITS] + 1)
732 sync += r.req_adr[self.ROW_OFF_BITS:self.LINE_OFF_BITS].eq(rarange)
733 sync += Display("RARANGE r.req_adr:%x rarange:%x "
734 "r.wb.stb:%x",
735 r.req_adr, rarange, r.wb.stb)
736
737 # Incoming acks processing
738 with m.If(bus.ack):
739 sync += Display("WB_IN_ACK data:%x", bus.dat_r)
740
741 sync += r.rows_valid[r.store_row % self.ROW_PER_LINE].eq(1)
742
743 # Check for completion
744 with m.If(self.is_last_row(r.store_row, r.end_row_ix)):
745 # Complete wishbone cycle
746 sync += r.wb.cyc.eq(0)
747 # be nice, clear addr
748 sync += r.req_adr.eq(0)
749
750 # Cache line is now valid
751 idx = r.store_index*self.NUM_WAYS + replace_way # 2D index again
752 valid = r.store_valid & ~inval_in
753 comb += cache_valids.s.eq(1<<idx)
754 sync += r.state.eq(State.IDLE)
755
756 # move on to next request in row
757 # Increment store row counter
758 sync += r.store_row.eq(self.next_row(r.store_row))
759
760 # Cache miss/reload synchronous machine
761 def icache_miss(self, m, r, req_is_miss,
762 req_index, req_laddr, req_tag, replace_way,
763 cache_valids, access_ok, real_addr):
764 comb = m.d.comb
765 sync = m.d.sync
766
767 i_in, bus, m_in = self.i_in, self.bus, self.m_in
768 stall_in, flush_in = self.stall_in, self.flush_in
769 inval_in = self.inval_in
770
771 comb += r.wb.sel.eq(-1)
772 comb += r.wb.adr.eq(r.req_adr[3:])
773
774 # Process cache invalidations
775 with m.If(inval_in):
776 comb += cache_valids.r.eq(-1)
777 sync += r.store_valid.eq(0)
778
779 # Main state machine
780 with m.Switch(r.state):
781
782 with m.Case(State.IDLE):
783 self.icache_miss_idle(m, r, req_is_miss, req_laddr,
784 req_index, req_tag, replace_way,
785 real_addr)
786
787 with m.Case(State.CLR_TAG, State.WAIT_ACK):
788 with m.If(r.state == State.CLR_TAG):
789 self.icache_miss_clr_tag(m, r, replace_way,
790 req_index,
791 cache_valids)
792
793 self.icache_miss_wait_ack(m, r, replace_way, inval_in,
794 cache_valids)
795
796 # TLB miss and protection fault processing
797 with m.If(flush_in | m_in.tlbld):
798 sync += r.fetch_failed.eq(0)
799 with m.Elif(i_in.req & ~access_ok & ~stall_in):
800 sync += r.fetch_failed.eq(1)
801
802 # icache_log: if LOG_LENGTH > 0 generate
803 def icache_log(self, m, req_hit_way, ra_valid, access_ok,
804 req_is_miss, req_is_hit, lway, wstate, r):
805 comb = m.d.comb
806 sync = m.d.sync
807
808 bus, i_out = self.bus, self.i_out
809 log_out, stall_out = self.log_out, self.stall_out
810
811 # Output data to logger
812 for i in range(LOG_LENGTH):
813 log_data = Signal(54)
814 lway = Signal(self.WAY_BITS)
815 wstate = Signal()
816
817 sync += lway.eq(req_hit_way)
818 sync += wstate.eq(0)
819
820 with m.If(r.state != State.IDLE):
821 sync += wstate.eq(1)
822
823 sync += log_data.eq(Cat(
824 ra_valid, access_ok, req_is_miss, req_is_hit,
825 lway, wstate, r.hit_nia[2:6], r.fetch_failed,
826 stall_out, bus.stall, r.wb.cyc, r.wb.stb,
827 r.real_addr[3:6], bus.ack, i_out.insn, i_out.valid
828 ))
829 comb += log_out.eq(log_data)
830
831 def elaborate(self, platform):
832
833 m = Module()
834 comb = m.d.comb
835
836 # Cache-Ways "valid" indicators. this is a 2D Signal, by the
837 # number of ways and the number of lines.
838 vec = SRLatch(sync=True, llen=self.NUM_WAYS*self.NUM_LINES,
839 name="cachevalids")
840 m.submodules.cache_valids = cache_valids = vec
841
842 # TLB Array
843 itlb = self.TLBArray()
844 vec = SRLatch(sync=False, llen=self.TLB_SIZE, name="tlbvalids")
845 m.submodules.itlb_valids = itlb_valid = vec
846
847 # TODO to be passed to nmigen as ram attributes
848 # attribute ram_style of itlb_tags : signal is "distributed";
849 # attribute ram_style of itlb_ptes : signal is "distributed";
850
851 # Privilege bit from PTE EAA field
852 eaa_priv = Signal()
853
854 r = RegInternal(self)
855
856 # Async signal on incoming request
857 req_index = Signal(self.INDEX_BITS)
858 req_row = Signal(self.ROW_BITS)
859 req_hit_way = Signal(self.WAY_BITS)
860 req_tag = Signal(self.TAG_BITS)
861 req_is_hit = Signal()
862 req_is_miss = Signal()
863 req_laddr = Signal(64)
864
865 tlb_req_index = Signal(self.TL_BITS)
866 real_addr = Signal(self.REAL_ADDR_BITS)
867 ra_valid = Signal()
868 priv_fault = Signal()
869 access_ok = Signal()
870 use_previous = Signal()
871
872 cache_out_row = Signal(self.ROW_SIZE_BITS)
873
874 plru_victim = Signal(self.WAY_BITS)
875 replace_way = Signal(self.WAY_BITS)
876
877 self.tlbmem = Memory(depth=self.TLB_SIZE,
878 width=self.TLB_EA_TAG_BITS+self.TLB_PTE_BITS,
879 #attrs={'syn_ramstyle': "block_ram"}
880 )
881 self.tagmem = Memory(depth=self.NUM_LINES,
882 width=self.TAG_RAM_WIDTH,
883 #attrs={'syn_ramstyle': "block_ram"}
884 )
885
886 # call sub-functions putting everything together,
887 # using shared signals established above
888 self.rams(m, r, cache_out_row, use_previous, replace_way, req_row)
889 self.maybe_plrus(m, r, plru_victim)
890 self.itlb_lookup(m, tlb_req_index, itlb, itlb_valid, real_addr,
891 ra_valid, eaa_priv, priv_fault,
892 access_ok)
893 self.itlb_update(m, itlb, itlb_valid)
894 self.icache_comb(m, use_previous, r, req_index, req_row, req_hit_way,
895 req_tag, real_addr, req_laddr,
896 cache_valids,
897 access_ok, req_is_hit, req_is_miss,
898 replace_way, plru_victim, cache_out_row)
899 self.icache_hit(m, use_previous, r, req_is_hit, req_hit_way,
900 req_index, req_tag, real_addr)
901 self.icache_miss(m, r, req_is_miss, req_index,
902 req_laddr, req_tag, replace_way,
903 cache_valids,
904 access_ok, real_addr)
905 #self.icache_log(m, log_out, req_hit_way, ra_valid, access_ok,
906 # req_is_miss, req_is_hit, lway, wstate, r)
907
908 # don't connect up to FetchUnitInterface so that some unit tests
909 # can continue to operate
910 if not self.use_fetch_iface:
911 return m
912
913 # connect to FetchUnitInterface. FetchUnitInterface is undocumented
914 # so needs checking and iterative revising
915 i_in, bus, i_out = self.i_in, self.bus, self.i_out
916 comb += i_in.req.eq(self.a_i_valid)
917 comb += i_in.nia.eq(self.a_pc_i)
918 comb += self.stall_in.eq(self.a_stall_i)
919 comb += self.f_fetch_err_o.eq(i_out.fetch_failed)
920 comb += self.f_badaddr_o.eq(i_out.nia)
921 comb += self.f_instr_o.eq(i_out.insn)
922 comb += self.f_busy_o.eq(~i_out.valid) # probably
923
924 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
925 ibus = self.ibus
926 comb += ibus.adr.eq(self.bus.adr)
927 comb += ibus.dat_w.eq(self.bus.dat_w)
928 comb += ibus.sel.eq(self.bus.sel)
929 comb += ibus.cyc.eq(self.bus.cyc)
930 comb += ibus.stb.eq(self.bus.stb)
931 comb += ibus.we.eq(self.bus.we)
932
933 comb += self.bus.dat_r.eq(ibus.dat_r)
934 comb += self.bus.ack.eq(ibus.ack)
935 if hasattr(ibus, "stall"):
936 comb += self.bus.stall.eq(ibus.stall)
937 else:
938 # fake-up the wishbone stall signal to comply with pipeline mode
939 # same thing is done in dcache.py
940 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
941
942 return m
943
944
945 def icache_sim(dut):
946 i_in = dut.i_in
947 i_out = dut.i_out
948 m_out = dut.m_in
949
950 yield i_in.priv_mode.eq(1)
951 yield i_in.req.eq(0)
952 yield i_in.nia.eq(0)
953 yield i_in.stop_mark.eq(0)
954 yield m_out.tlbld.eq(0)
955 yield m_out.tlbie.eq(0)
956 yield m_out.addr.eq(0)
957 yield m_out.pte.eq(0)
958 yield
959 yield
960 yield
961 yield
962
963 # miss, stalls for a bit
964 yield i_in.req.eq(1)
965 yield i_in.nia.eq(Const(0x0000000000000004, 64))
966 yield
967 valid = yield i_out.valid
968 while not valid:
969 yield
970 valid = yield i_out.valid
971 yield i_in.req.eq(0)
972
973 insn = yield i_out.insn
974 nia = yield i_out.nia
975 assert insn == 0x00000001, \
976 "insn @%x=%x expected 00000001" % (nia, insn)
977 yield i_in.req.eq(0)
978 yield
979
980 # hit
981 yield i_in.req.eq(1)
982 yield i_in.nia.eq(Const(0x0000000000000008, 64))
983 yield
984 valid = yield i_out.valid
985 while not valid:
986 yield
987 valid = yield i_out.valid
988 yield i_in.req.eq(0)
989
990 nia = yield i_out.nia
991 insn = yield i_out.insn
992 yield
993 assert insn == 0x00000002, \
994 "insn @%x=%x expected 00000002" % (nia, insn)
995
996 # another miss
997 yield i_in.req.eq(1)
998 yield i_in.nia.eq(Const(0x0000000000000040, 64))
999 yield
1000 valid = yield i_out.valid
1001 while not valid:
1002 yield
1003 valid = yield i_out.valid
1004 yield i_in.req.eq(0)
1005
1006 nia = yield i_in.nia
1007 insn = yield i_out.insn
1008 assert insn == 0x00000010, \
1009 "insn @%x=%x expected 00000010" % (nia, insn)
1010
1011 # test something that aliases (this only works because
1012 # the unit test SRAM is a depth of 512)
1013 yield i_in.req.eq(1)
1014 yield i_in.nia.eq(Const(0x0000000000000100, 64))
1015 yield
1016 yield
1017 valid = yield i_out.valid
1018 assert ~valid
1019 for i in range(30):
1020 yield
1021 yield
1022 insn = yield i_out.insn
1023 valid = yield i_out.valid
1024 insn = yield i_out.insn
1025 assert valid
1026 assert insn == 0x00000040, \
1027 "insn @%x=%x expected 00000040" % (nia, insn)
1028 yield i_in.req.eq(0)
1029
1030
1031 def test_icache(mem):
1032 from soc.config.test.test_loadstore import TestMemPspec
1033 pspec = TestMemPspec(addr_wid=32,
1034 mask_wid=8,
1035 reg_wid=64,
1036 XLEN=32,
1037 )
1038 dut = ICache(pspec)
1039
1040 memory = Memory(width=64, depth=512, init=mem)
1041 sram = SRAM(memory=memory, granularity=8)
1042
1043 m = Module()
1044
1045 m.submodules.icache = dut
1046 m.submodules.sram = sram
1047
1048 m.d.comb += sram.bus.cyc.eq(dut.bus.cyc)
1049 m.d.comb += sram.bus.stb.eq(dut.bus.stb)
1050 m.d.comb += sram.bus.we.eq(dut.bus.we)
1051 m.d.comb += sram.bus.sel.eq(dut.bus.sel)
1052 m.d.comb += sram.bus.adr.eq(dut.bus.adr)
1053 m.d.comb += sram.bus.dat_w.eq(dut.bus.dat_w)
1054
1055 m.d.comb += dut.bus.ack.eq(sram.bus.ack)
1056 m.d.comb += dut.bus.dat_r.eq(sram.bus.dat_r)
1057
1058 # nmigen Simulation
1059 sim = Simulator(m)
1060 sim.add_clock(1e-6)
1061
1062 sim.add_sync_process(wrap(icache_sim(dut)))
1063 with sim.write_vcd('test_icache.vcd'):
1064 sim.run()
1065
1066
1067 if __name__ == '__main__':
1068 from soc.config.test.test_loadstore import TestMemPspec
1069 pspec = TestMemPspec(addr_wid=64,
1070 mask_wid=8,
1071 XLEN=32,
1072 reg_wid=64,
1073 )
1074 dut = ICache(pspec)
1075 vl = rtlil.convert(dut, ports=[])
1076 with open("test_icache.il", "w") as f:
1077 f.write(vl)
1078
1079 # set up memory every 32-bits with incrementing values 0 1 2 ...
1080 mem = []
1081 for i in range(512):
1082 mem.append((i*2) | ((i*2+1)<<32))
1083
1084 test_icache(mem)