e9c8ed1b1db45b50e6eb7d6158706aaeee6ed14e
[soc.git] / src / soc / experiment / icache.py
1 """ICache
2
3 based on Anton Blanchard microwatt icache.vhdl
4
5 Set associative icache
6
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
20
21 Links:
22
23 * https://bugs.libre-soc.org/show_bug.cgi?id=485
24 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
25 (discussion about brams for ECP5)
26
27 """
28
29 from enum import (Enum, unique)
30 from nmigen import (Module, Signal, Elaboratable, Cat, Array, Const, Repl,
31 Record)
32 from nmigen.cli import main, rtlil
33 from nmutil.iocontrol import RecordObject
34 from nmigen.utils import log2_int
35 from nmigen.lib.coding import Decoder
36 from nmutil.util import Display
37 from nmutil.latch import SRLatch
38
39 #from nmutil.plru import PLRU
40 from soc.experiment.plru import PLRU, PLRUs
41 from soc.experiment.cache_ram import CacheRam
42
43 from soc.experiment.mem_types import (Fetch1ToICacheType,
44 ICacheToDecode1Type,
45 MMUToICacheType)
46
47 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS,
48 WB_SEL_BITS, WBAddrType, WBDataType,
49 WBSelType, WBMasterOut, WBSlaveOut,
50 )
51
52 from nmigen_soc.wishbone.bus import Interface
53 from soc.minerva.units.fetch import FetchUnitInterface
54
55
56 # for test
57 from soc.bus.sram import SRAM
58 from nmigen import Memory
59 from nmutil.util import wrap
60 from nmigen.cli import main, rtlil
61
62 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
63 # Also, check out the cxxsim nmigen branch, and latest yosys from git
64 from nmutil.sim_tmp_alternative import Simulator, Settle
65
66 # from microwatt/utils.vhdl
67 def ispow2(n):
68 return n != 0 and (n & (n - 1)) == 0
69
70 SIM = 0
71 # Non-zero to enable log data collection
72 LOG_LENGTH = 0
73
74 class ICacheConfig:
75 def __init__(self, XLEN = 64,
76 LINE_SIZE = 64,
77 NUM_LINES = 64, # Number of lines in a set
78 NUM_WAYS = 2, # Number of ways
79 TLB_SIZE = 64, # L1 ITLB number of entries
80 TLB_LG_PGSZ = 12): # L1 ITLB log_2(page_size)
81 self.XLEN = XLEN
82 self.LINE_SIZE = LINE_SIZE
83 self.NUM_LINES = NUM_LINES
84 self.NUM_WAYS = NUM_WAYS
85 self.TLB_SIZE = TLB_SIZE
86 self.TLB_LG_PGSZ = TLB_LG_PGSZ
87
88 # BRAM organisation: We never access more than wishbone_data_bits
89 # at a time so to save resources we make the array only that wide,
90 # and use consecutive indices for to make a cache "line"
91 #
92 # self.ROW_SIZE is the width in bytes of the BRAM
93 # (based on WB, so 64-bits)
94 self.ROW_SIZE = WB_DATA_BITS // 8
95 # Number of real address bits that we store
96 self.REAL_ADDR_BITS = XLEN-8 # 56 for XLEN=64
97
98 self.ROW_SIZE_BITS = self.ROW_SIZE * 8
99 # ROW_PER_LINE is the number of row (wishbone) transactions in a line
100 self.ROW_PER_LINE = self.LINE_SIZE // self.ROW_SIZE
101 # BRAM_ROWS is the number of rows in BRAM
102 # needed to represent the full icache
103 self.BRAM_ROWS = self.NUM_LINES * self.ROW_PER_LINE
104 # INSN_PER_ROW is the number of 32bit instructions per BRAM row
105 self.INSN_PER_ROW = self.ROW_SIZE_BITS // 32
106
107 # Bit fields counts in the address
108 #
109 # INSN_BITS is the number of bits to select an instruction in a row
110 self.INSN_BITS = log2_int(self.INSN_PER_ROW)
111 # ROW_BITS is the number of bits to select a row
112 self.ROW_BITS = log2_int(self.BRAM_ROWS)
113 # ROW_LINE_BITS is the number of bits to select a row within a line
114 self.ROW_LINE_BITS = log2_int(self.ROW_PER_LINE)
115 # LINE_OFF_BITS is the number of bits for the offset in a cache line
116 self.LINE_OFF_BITS = log2_int(self.LINE_SIZE)
117 # ROW_OFF_BITS is the number of bits for the offset in a row
118 self.ROW_OFF_BITS = log2_int(self.ROW_SIZE)
119 # INDEX_BITS is the number of bits to select a cache line
120 self.INDEX_BITS = log2_int(self.NUM_LINES)
121 # SET_SIZE_BITS is the log base 2 of the set size
122 self.SET_SIZE_BITS = self.LINE_OFF_BITS + self.INDEX_BITS
123 # TAG_BITS is the number of bits of the tag part of the address
124 self.TAG_BITS = self.REAL_ADDR_BITS - self.SET_SIZE_BITS
125 # TAG_WIDTH is the width in bits of each way of the tag RAM
126 self.TAG_WIDTH = self.TAG_BITS + 7 - ((self.TAG_BITS + 7) % 8)
127
128 # WAY_BITS is the number of bits to select a way
129 self.WAY_BITS = log2_int(self.NUM_WAYS)
130 self.TAG_RAM_WIDTH = self.TAG_BITS * self.NUM_WAYS
131
132 # L1 ITLB
133 self.TL_BITS = log2_int(self.TLB_SIZE)
134 self.TLB_EA_TAG_BITS = XLEN - (self.TLB_LG_PGSZ + self.TL_BITS)
135 self.TLB_PTE_BITS = XLEN
136
137 print("self.XLEN =", self.XLEN)
138 print("self.BRAM_ROWS =", self.BRAM_ROWS)
139 print("self.INDEX_BITS =", self.INDEX_BITS)
140 print("self.INSN_BITS =", self.INSN_BITS)
141 print("self.INSN_PER_ROW =", self.INSN_PER_ROW)
142 print("self.LINE_SIZE =", self.LINE_SIZE)
143 print("self.LINE_OFF_BITS =", self.LINE_OFF_BITS)
144 print("LOG_LENGTH =", LOG_LENGTH)
145 print("self.NUM_LINES =", self.NUM_LINES)
146 print("self.NUM_WAYS =", self.NUM_WAYS)
147 print("self.REAL_ADDR_BITS =", self.REAL_ADDR_BITS)
148 print("self.ROW_BITS =", self.ROW_BITS)
149 print("self.ROW_OFF_BITS =", self.ROW_OFF_BITS)
150 print("self.ROW_LINE_BITS =", self.ROW_LINE_BITS)
151 print("self.ROW_PER_LINE =", self.ROW_PER_LINE)
152 print("self.ROW_SIZE =", self.ROW_SIZE)
153 print("self.ROW_SIZE_BITS =", self.ROW_SIZE_BITS)
154 print("self.SET_SIZE_BITS =", self.SET_SIZE_BITS)
155 print("SIM =", SIM)
156 print("self.TAG_BITS =", self.TAG_BITS)
157 print("self.TAG_RAM_WIDTH =", self.TAG_RAM_WIDTH)
158 print("self.TAG_BITS =", self.TAG_BITS)
159 print("self.TL_BITS =", self.TL_BITS)
160 print("self.TLB_EA_TAG_BITS =", self.TLB_EA_TAG_BITS)
161 print("self.TLB_LG_PGSZ =", self.TLB_LG_PGSZ)
162 print("self.TLB_PTE_BITS =", self.TLB_PTE_BITS)
163 print("self.TLB_SIZE =", self.TLB_SIZE)
164 print("self.WAY_BITS =", self.WAY_BITS)
165 print()
166
167 assert self.LINE_SIZE % self.ROW_SIZE == 0
168 assert ispow2(self.LINE_SIZE), "self.LINE_SIZE not power of 2"
169 assert ispow2(self.NUM_LINES), "self.NUM_LINES not power of 2"
170 assert ispow2(self.ROW_PER_LINE), "self.ROW_PER_LINE not power of 2"
171 assert ispow2(self.INSN_PER_ROW), "self.INSN_PER_ROW not power of 2"
172 assert (self.ROW_BITS == (self.INDEX_BITS + self.ROW_LINE_BITS)), \
173 "geometry bits don't add up"
174 assert (self.LINE_OFF_BITS ==
175 (self.ROW_OFF_BITS + self.ROW_LINE_BITS)), \
176 "geometry bits don't add up"
177 assert (self.REAL_ADDR_BITS ==
178 (self.TAG_BITS + self.INDEX_BITS + self.LINE_OFF_BITS)), \
179 "geometry bits don't add up"
180 assert (self.REAL_ADDR_BITS ==
181 (self.TAG_BITS + self.ROW_BITS + self.ROW_OFF_BITS)), \
182 "geometry bits don't add up"
183
184 # Example of layout for 32 lines of 64 bytes:
185 #
186 # .. tag |index| line |
187 # .. | row | |
188 # .. | | | |00| zero (2)
189 # .. | | |-| | self.INSN_BITS (1)
190 # .. | |---| | self.ROW_LINE_BITS (3)
191 # .. | |--- - --| self.LINE_OFF_BITS (6)
192 # .. | |- --| self.ROW_OFF_BITS (3)
193 # .. |----- ---| | self.ROW_BITS (8)
194 # .. |-----| | self.INDEX_BITS (5)
195 # .. --------| | self.TAG_BITS (53)
196
197 # The cache data BRAM organized as described above for each way
198 #subtype cache_row_t is std_ulogic_vector(self.ROW_SIZE_BITS-1 downto 0);
199 #
200 def RowPerLineValidArray(self):
201 return Array(Signal(name="rows_valid_%d" %x) \
202 for x in range(self.ROW_PER_LINE))
203
204
205 # TODO to be passed to nigmen as ram attributes
206 # attribute ram_style : string;
207 # attribute ram_style of cache_tags : signal is "distributed";
208
209 def TLBRecord(self, name):
210 tlb_layout = [ ('tag', self.TLB_EA_TAG_BITS),
211 ('pte', self.TLB_PTE_BITS)
212 ]
213 return Record(tlb_layout, name=name)
214
215 def TLBArray(self):
216 return Array(self.TLBRecord("tlb%d" % x) for x in range(self.TLB_SIZE))
217
218 # PLRU output interface
219 def PLRUOut(self):
220 return Array(Signal(self.WAY_BITS, name="plru_out_%d" %x) \
221 for x in range(self.NUM_LINES))
222
223 # Return the cache line index (tag index) for an address
224 def get_index(self, addr):
225 return addr[self.LINE_OFF_BITS:self.SET_SIZE_BITS]
226
227 # Return the cache row index (data memory) for an address
228 def get_row(self, addr):
229 return addr[self.ROW_OFF_BITS:self.SET_SIZE_BITS]
230
231 # Return the index of a row within a line
232 def get_row_of_line(self, row):
233 return row[:self.ROW_BITS][:self.ROW_LINE_BITS]
234
235 # Returns whether this is the last row of a line
236 def is_last_row_addr(self, addr, last):
237 return addr[self.ROW_OFF_BITS:self.LINE_OFF_BITS] == last
238
239 # Returns whether this is the last row of a line
240 def is_last_row(self, row, last):
241 return self.get_row_of_line(row) == last
242
243 # Return the next row in the current cache line. We use a dedicated
244 # function in order to limit the size of the generated adder to be
245 # only the bits within a cache line (3 bits with default settings)
246 def next_row(self, row):
247 row_v = row[0:self.ROW_LINE_BITS] + 1
248 return Cat(row_v[:self.ROW_LINE_BITS], row[self.ROW_LINE_BITS:])
249
250 # Read the instruction word for the given address
251 # in the current cache row
252 def read_insn_word(self, addr, data):
253 word = addr[2:self.INSN_BITS+2]
254 return data.word_select(word, 32)
255
256 # Get the tag value from the address
257 def get_tag(self, addr):
258 return addr[self.SET_SIZE_BITS:self.REAL_ADDR_BITS]
259
260 # Read a tag from a tag memory row
261 def read_tag(self, way, tagset):
262 return tagset.word_select(way, self.TAG_BITS)
263
264 # Write a tag to tag memory row
265 def write_tag(self, way, tagset, tag):
266 return self.read_tag(way, tagset).eq(tag)
267
268 # Simple hash for direct-mapped TLB index
269 def hash_ea(self, addr):
270 hsh = (addr[self.TLB_LG_PGSZ:self.TLB_LG_PGSZ + self.TL_BITS] ^
271 addr[self.TLB_LG_PGSZ + self.TL_BITS:
272 self.TLB_LG_PGSZ + 2 * self.TL_BITS ] ^
273 addr[self.TLB_LG_PGSZ + 2 * self.TL_BITS:
274 self.TLB_LG_PGSZ + 3 * self.TL_BITS])
275 return hsh
276
277
278 # Cache reload state machine
279 @unique
280 class State(Enum):
281 IDLE = 0
282 CLR_TAG = 1
283 WAIT_ACK = 2
284
285
286 class RegInternal(RecordObject):
287 def __init__(self, cfg):
288 super().__init__()
289 # Cache hit state (Latches for 1 cycle BRAM access)
290 self.hit_way = Signal(cfg.WAY_BITS)
291 self.hit_nia = Signal(64)
292 self.hit_smark = Signal()
293 self.hit_valid = Signal()
294
295 # Cache miss state (reload state machine)
296 self.state = Signal(State, reset=State.IDLE)
297 self.wb = WBMasterOut("wb")
298 self.req_adr = Signal(64)
299 self.store_way = Signal(cfg.WAY_BITS)
300 self.store_index = Signal(cfg.INDEX_BITS)
301 self.store_row = Signal(cfg.ROW_BITS)
302 self.store_tag = Signal(cfg.TAG_BITS)
303 self.store_valid = Signal()
304 self.end_row_ix = Signal(cfg.ROW_LINE_BITS)
305 self.rows_valid = cfg.RowPerLineValidArray()
306
307 # TLB miss state
308 self.fetch_failed = Signal()
309
310
311 class ICache(FetchUnitInterface, Elaboratable, ICacheConfig):
312 """64 bit direct mapped icache. All instructions are 4B aligned."""
313 def __init__(self, pspec):
314 FetchUnitInterface.__init__(self, pspec)
315 self.i_in = Fetch1ToICacheType(name="i_in")
316 self.i_out = ICacheToDecode1Type(name="i_out")
317
318 self.m_in = MMUToICacheType(name="m_in")
319
320 self.stall_in = Signal()
321 self.stall_out = Signal()
322 self.flush_in = Signal()
323 self.inval_in = Signal()
324
325 # standard naming (wired to non-standard for compatibility)
326 self.bus = Interface(addr_width=32,
327 data_width=64,
328 granularity=8,
329 features={'stall'},
330 #alignment=0,
331 name="icache_wb")
332
333 self.log_out = Signal(54)
334
335 # use FetchUnitInterface, helps keep some unit tests running
336 self.use_fetch_iface = False
337
338 # test if microwatt compatibility is to be enabled
339 self.microwatt_compat = (hasattr(pspec, "microwatt_compat") and
340 (pspec.microwatt_compat == True))
341
342 XLEN = pspec.XLEN
343
344 if self.microwatt_compat:
345 # reduce way sizes and num lines
346 ICacheConfig.__init__(self, LINE_SIZE=XLEN,
347 XLEN=XLEN,
348 NUM_LINES = 4,
349 NUM_WAYS = 1,
350 TLB_SIZE=4 # needs device-tree update
351 )
352 else:
353 ICacheConfig.__init__(self, LINE_SIZE=XLEN, XLEN=XLEN)
354
355 def use_fetch_interface(self):
356 self.use_fetch_iface = True
357
358 # Generate a cache RAM for each way
359 def rams(self, m, r, cache_out_row, use_previous,
360 replace_way, req_row):
361
362 comb = m.d.comb
363 sync = m.d.sync
364
365 bus, stall_in = self.bus, self.stall_in
366
367 # read condition (for every cache ram)
368 do_read = Signal()
369 comb += do_read.eq(~(stall_in | use_previous))
370
371 rd_addr = Signal(self.ROW_BITS)
372 wr_addr = Signal(self.ROW_BITS)
373 comb += rd_addr.eq(req_row)
374 comb += wr_addr.eq(r.store_row)
375
376 # binary-to-unary converters: replace-way enabled by bus.ack,
377 # hit-way left permanently enabled
378 m.submodules.replace_way_e = re = Decoder(self.NUM_WAYS)
379 m.submodules.hit_way_e = he = Decoder(self.NUM_WAYS)
380 comb += re.i.eq(replace_way)
381 comb += re.n.eq(~bus.ack)
382 comb += he.i.eq(r.hit_way)
383
384 for i in range(self.NUM_WAYS):
385 do_write = Signal(name="do_wr_%d" % i)
386 d_out = Signal(self.ROW_SIZE_BITS, name="d_out_%d" % i)
387 wr_sel = Signal(self.ROW_SIZE, name="wr_sel_%d" % i)
388
389 way = CacheRam(self.ROW_BITS, self.ROW_SIZE_BITS,
390 TRACE=True, ram_num=i)
391 m.submodules["cacheram_%d" % i] = way
392
393 comb += way.rd_en.eq(do_read)
394 comb += way.rd_addr.eq(rd_addr)
395 comb += d_out.eq(way.rd_data_o)
396 comb += way.wr_sel.eq(wr_sel)
397 comb += way.wr_addr.eq(wr_addr)
398 comb += way.wr_data.eq(bus.dat_r)
399
400 comb += do_write.eq(re.o[i])
401
402 with m.If(do_write):
403 sync += Display("cache write adr: %x data: %lx",
404 wr_addr, way.wr_data)
405
406 with m.If(he.o[i]):
407 comb += cache_out_row.eq(d_out)
408 with m.If(do_read):
409 sync += Display("cache read adr: %x data: %x",
410 req_row, d_out)
411
412 comb += wr_sel.eq(Repl(do_write, self.ROW_SIZE))
413
414 # Generate PLRUs
415 def maybe_plrus(self, m, r, plru_victim):
416 comb = m.d.comb
417
418 if self.NUM_WAYS == 0:
419 return
420
421
422 m.submodules.plrus = plru = PLRUs("itag", self.NUM_LINES,
423 self.WAY_BITS)
424 comb += plru.way.eq(r.hit_way)
425 comb += plru.valid.eq(r.hit_valid)
426 comb += plru.index.eq(self.get_index(r.hit_nia))
427 comb += plru.isel.eq(r.store_index) # select victim
428 comb += plru_victim.eq(plru.o_index) # selected victim
429
430 # TLB hit detection and real address generation
431 def itlb_lookup(self, m, tlb_req_index, itlb, itlb_valid,
432 real_addr, ra_valid, eaa_priv,
433 priv_fault, access_ok):
434
435 comb = m.d.comb
436
437 i_in = self.i_in
438
439 # use an *asynchronous* Memory read port here (combinatorial)
440 m.submodules.rd_tlb = rd_tlb = self.tlbmem.read_port(domain="comb")
441 tlb = self.TLBRecord("tlb_rdport")
442 pte, ttag = tlb.pte, tlb.tag
443
444 comb += tlb_req_index.eq(self.hash_ea(i_in.nia))
445 comb += rd_tlb.addr.eq(tlb_req_index)
446 comb += tlb.eq(rd_tlb.data)
447
448 with m.If(i_in.virt_mode):
449 comb += real_addr.eq(Cat(i_in.nia[:self.TLB_LG_PGSZ],
450 pte[self.TLB_LG_PGSZ:self.REAL_ADDR_BITS]))
451
452 with m.If(ttag == i_in.nia[self.TLB_LG_PGSZ + self.TL_BITS:64]):
453 comb += ra_valid.eq(itlb_valid.q.bit_select(tlb_req_index, 1))
454
455 comb += eaa_priv.eq(pte[3])
456
457 with m.Else():
458 comb += real_addr.eq(i_in.nia[:self.REAL_ADDR_BITS])
459 comb += ra_valid.eq(1)
460 comb += eaa_priv.eq(1)
461
462 # No IAMR, so no KUEP support for now
463 comb += priv_fault.eq(eaa_priv & ~i_in.priv_mode)
464 comb += access_ok.eq(ra_valid & ~priv_fault)
465
466 # iTLB update
467 def itlb_update(self, m, itlb, itlb_valid):
468 comb = m.d.comb
469 sync = m.d.sync
470
471 m_in = self.m_in
472
473 wr_index = Signal(self.TL_BITS)
474 wr_unary = Signal(self.TLB_SIZE)
475 comb += wr_index.eq(self.hash_ea(m_in.addr))
476 comb += wr_unary.eq(1<<wr_index)
477
478 m.submodules.wr_tlb = wr_tlb = self.tlbmem.write_port()
479 sync += itlb_valid.s.eq(0)
480 sync += itlb_valid.r.eq(0)
481
482 with m.If(m_in.tlbie & m_in.doall):
483 # Clear all valid bits
484 sync += itlb_valid.r.eq(-1)
485
486 with m.Elif(m_in.tlbie):
487 # Clear entry regardless of hit or miss
488 sync += itlb_valid.r.eq(wr_unary)
489
490 with m.Elif(m_in.tlbld):
491 tlb = self.TLBRecord("tlb_wrport")
492 comb += tlb.tag.eq(m_in.addr[self.TLB_LG_PGSZ + self.TL_BITS:64])
493 comb += tlb.pte.eq(m_in.pte)
494 comb += wr_tlb.en.eq(1)
495 comb += wr_tlb.addr.eq(wr_index)
496 comb += wr_tlb.data.eq(tlb)
497 sync += itlb_valid.s.eq(wr_unary)
498
499 # Cache hit detection, output to fetch2 and other misc logic
500 def icache_comb(self, m, use_previous, r, req_index, req_row,
501 req_hit_way, req_tag, real_addr, req_laddr,
502 cache_valids, access_ok,
503 req_is_hit, req_is_miss, replace_way,
504 plru_victim, cache_out_row):
505
506 comb = m.d.comb
507 m.submodules.rd_tag = rd_tag = self.tagmem.read_port(domain="comb")
508
509 i_in, i_out, bus = self.i_in, self.i_out, self.bus
510 flush_in, stall_out = self.flush_in, self.stall_out
511
512 is_hit = Signal()
513 hit_way = Signal(self.WAY_BITS)
514
515 # i_in.sequential means that i_in.nia this cycle is 4 more than
516 # last cycle. If we read more than 32 bits at a time, had a
517 # cache hit last cycle, and we don't want the first 32-bit chunk
518 # then we can keep the data we read last cycle and just use that.
519 with m.If(i_in.nia[2:self.INSN_BITS+2] != 0):
520 comb += use_previous.eq(i_in.sequential & r.hit_valid)
521
522 # Extract line, row and tag from request
523 comb += req_index.eq(self.get_index(i_in.nia))
524 comb += req_row.eq(self.get_row(i_in.nia))
525 comb += req_tag.eq(self.get_tag(real_addr))
526
527 # Calculate address of beginning of cache row, will be
528 # used for cache miss processing if needed
529 comb += req_laddr.eq(Cat(
530 Const(0, self.ROW_OFF_BITS),
531 real_addr[self.ROW_OFF_BITS:self.REAL_ADDR_BITS],
532 ))
533
534 # Test if pending request is a hit on any way
535 hitcond = Signal()
536 rowvalid = Signal()
537 comb += rowvalid.eq(r.rows_valid[req_row % self.ROW_PER_LINE])
538 comb += hitcond.eq((r.state == State.WAIT_ACK) &
539 (req_index == r.store_index) &
540 rowvalid
541 )
542 # i_in.req asserts Decoder active
543 cvb = Signal(self.NUM_WAYS)
544 ctag = Signal(self.TAG_RAM_WIDTH)
545 comb += rd_tag.addr.eq(req_index)
546 comb += ctag.eq(rd_tag.data)
547 comb += cvb.eq(cache_valids.q.word_select(req_index, self.NUM_WAYS))
548 m.submodules.store_way_e = se = Decoder(self.NUM_WAYS)
549 comb += se.i.eq(r.store_way)
550 comb += se.n.eq(~i_in.req)
551 for i in range(self.NUM_WAYS):
552 tagi = Signal(self.TAG_BITS, name="tag_i%d" % i)
553 hit_test = Signal(name="hit_test%d" % i)
554 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
555 comb += tagi.eq(self.read_tag(i, ctag))
556 comb += hit_test.eq(se.o[i])
557 comb += is_tag_hit.eq((cvb[i] | (hitcond & hit_test)) &
558 (tagi == req_tag))
559 with m.If(is_tag_hit):
560 comb += hit_way.eq(i)
561 comb += is_hit.eq(1)
562
563 # Generate the "hit" and "miss" signals
564 # for the synchronous blocks
565 with m.If(i_in.req & access_ok & ~flush_in):
566 comb += req_is_hit.eq(is_hit)
567 comb += req_is_miss.eq(~is_hit)
568
569 comb += req_hit_way.eq(hit_way)
570
571 # The way to replace on a miss
572 with m.If(r.state == State.CLR_TAG):
573 comb += replace_way.eq(plru_victim)
574 with m.Else():
575 comb += replace_way.eq(r.store_way)
576
577 # Output instruction from current cache row
578 #
579 # Note: This is a mild violation of our design principle of
580 # having pipeline stages output from a clean latch. In this
581 # case we output the result of a mux. The alternative would
582 # be output an entire row which I prefer not to do just yet
583 # as it would force fetch2 to know about some of the cache
584 # geometry information.
585 comb += i_out.insn.eq(self.read_insn_word(r.hit_nia, cache_out_row))
586 comb += i_out.valid.eq(r.hit_valid)
587 comb += i_out.nia.eq(r.hit_nia)
588 comb += i_out.stop_mark.eq(r.hit_smark)
589 comb += i_out.fetch_failed.eq(r.fetch_failed)
590
591 # Stall fetch1 if we have a miss on cache or TLB
592 # or a protection fault
593 comb += stall_out.eq(~(is_hit & access_ok))
594
595 # Wishbone requests output (from the cache miss reload machine)
596 comb += bus.we.eq(r.wb.we)
597 comb += bus.adr.eq(r.wb.adr)
598 comb += bus.sel.eq(r.wb.sel)
599 comb += bus.stb.eq(r.wb.stb)
600 comb += bus.dat_w.eq(r.wb.dat)
601 comb += bus.cyc.eq(r.wb.cyc)
602
603 # Cache hit synchronous machine
604 def icache_hit(self, m, use_previous, r, req_is_hit, req_hit_way,
605 req_index, req_tag, real_addr):
606 sync = m.d.sync
607
608 i_in, stall_in = self.i_in, self.stall_in
609 flush_in = self.flush_in
610
611 # keep outputs to fetch2 unchanged on a stall
612 # except that flush or reset sets valid to 0
613 # If use_previous, keep the same data as last
614 # cycle and use the second half
615 with m.If(stall_in | use_previous):
616 with m.If(flush_in):
617 sync += r.hit_valid.eq(0)
618 with m.Else():
619 # On a hit, latch the request for the next cycle,
620 # when the BRAM data will be available on the
621 # cache_out output of the corresponding way
622 sync += r.hit_valid.eq(req_is_hit)
623
624 with m.If(req_is_hit):
625 sync += r.hit_way.eq(req_hit_way)
626 sync += Display("cache hit nia:%x IR:%x SM:%x idx:%x tag:%x "
627 "way:%x RA:%x", i_in.nia, i_in.virt_mode,
628 i_in.stop_mark, req_index, req_tag,
629 req_hit_way, real_addr)
630
631 with m.If(~stall_in):
632 # Send stop marks and NIA down regardless of validity
633 sync += r.hit_smark.eq(i_in.stop_mark)
634 sync += r.hit_nia.eq(i_in.nia)
635
636 def icache_miss_idle(self, m, r, req_is_miss, req_laddr,
637 req_index, req_tag, replace_way, real_addr):
638 comb = m.d.comb
639 sync = m.d.sync
640
641 i_in = self.i_in
642
643 # Reset per-row valid flags, only used in WAIT_ACK
644 for i in range(self.ROW_PER_LINE):
645 sync += r.rows_valid[i].eq(0)
646
647 # We need to read a cache line
648 with m.If(req_is_miss):
649 sync += Display(
650 "cache miss nia:%x IR:%x SM:%x idx:%x "
651 " way:%x tag:%x RA:%x", i_in.nia,
652 i_in.virt_mode, i_in.stop_mark, req_index,
653 replace_way, req_tag, real_addr)
654
655 # Keep track of our index and way for subsequent stores
656 st_row = Signal(self.ROW_BITS)
657 comb += st_row.eq(self.get_row(req_laddr))
658 sync += r.store_index.eq(req_index)
659 sync += r.store_row.eq(st_row)
660 sync += r.store_tag.eq(req_tag)
661 sync += r.store_valid.eq(1)
662 sync += r.end_row_ix.eq(self.get_row_of_line(st_row) - 1)
663
664 # Prep for first wishbone read. We calculate the address
665 # of the start of the cache line and start the WB cycle.
666 sync += r.req_adr.eq(req_laddr)
667 sync += r.wb.cyc.eq(1)
668 sync += r.wb.stb.eq(1)
669
670 # Track that we had one request sent
671 sync += r.state.eq(State.CLR_TAG)
672
673 def icache_miss_clr_tag(self, m, r, replace_way,
674 req_index,
675 cache_valids):
676 comb = m.d.comb
677 sync = m.d.sync
678 m.submodules.wr_tag = wr_tag = self.tagmem.write_port(
679 granularity=self.TAG_BITS)
680
681 # Get victim way from plru
682 sync += r.store_way.eq(replace_way)
683
684 # Force misses on that way while reloading that line
685 idx = req_index*self.NUM_WAYS + replace_way # 2D index, 1st dim: self.NUM_WAYS
686 comb += cache_valids.r.eq(1<<idx)
687
688 # use write-port "granularity" to select the tag to write to
689 # TODO: the Memory should be multipled-up (by NUM_TAGS)
690 tagset = Signal(self.TAG_RAM_WIDTH)
691 comb += tagset.eq(r.store_tag << (replace_way*self.TAG_BITS))
692 comb += wr_tag.en.eq(1<<replace_way)
693 comb += wr_tag.addr.eq(r.store_index)
694 comb += wr_tag.data.eq(tagset)
695
696 sync += r.state.eq(State.WAIT_ACK)
697
698 def icache_miss_wait_ack(self, m, r, replace_way, inval_in,
699 cache_valids):
700 comb = m.d.comb
701 sync = m.d.sync
702
703 bus = self.bus
704
705 # If we are still sending requests, was one accepted?
706 with m.If(~bus.stall & r.wb.stb):
707 # That was the last word? We are done sending. Clear stb
708 with m.If(self.is_last_row_addr(r.req_adr, r.end_row_ix)):
709 sync += Display("IS_LAST_ROW_ADDR r.wb.addr:%x "
710 "r.end_row_ix:%x r.wb.stb:%x",
711 r.wb.adr, r.end_row_ix, r.wb.stb)
712 sync += r.wb.stb.eq(0)
713
714 # Calculate the next row address
715 rarange = Signal(self.LINE_OFF_BITS - self.ROW_OFF_BITS)
716 comb += rarange.eq(r.req_adr[self.ROW_OFF_BITS:
717 self.LINE_OFF_BITS] + 1)
718 sync += r.req_adr[self.ROW_OFF_BITS:self.LINE_OFF_BITS].eq(rarange)
719 sync += Display("RARANGE r.req_adr:%x rarange:%x "
720 "r.wb.stb:%x",
721 r.req_adr, rarange, r.wb.stb)
722
723 # Incoming acks processing
724 with m.If(bus.ack):
725 sync += Display("WB_IN_ACK data:%x", bus.dat_r)
726
727 sync += r.rows_valid[r.store_row % self.ROW_PER_LINE].eq(1)
728
729 # Check for completion
730 with m.If(self.is_last_row(r.store_row, r.end_row_ix)):
731 # Complete wishbone cycle
732 sync += r.wb.cyc.eq(0)
733 # be nice, clear addr
734 sync += r.req_adr.eq(0)
735
736 # Cache line is now valid
737 idx = r.store_index*self.NUM_WAYS + replace_way # 2D index again
738 valid = r.store_valid & ~inval_in
739 comb += cache_valids.s.eq(1<<idx)
740 sync += r.state.eq(State.IDLE)
741
742 # move on to next request in row
743 # Increment store row counter
744 sync += r.store_row.eq(self.next_row(r.store_row))
745
746 # Cache miss/reload synchronous machine
747 def icache_miss(self, m, r, req_is_miss,
748 req_index, req_laddr, req_tag, replace_way,
749 cache_valids, access_ok, real_addr):
750 comb = m.d.comb
751 sync = m.d.sync
752
753 i_in, bus, m_in = self.i_in, self.bus, self.m_in
754 stall_in, flush_in = self.stall_in, self.flush_in
755 inval_in = self.inval_in
756
757 comb += r.wb.sel.eq(-1)
758 comb += r.wb.adr.eq(r.req_adr[3:])
759
760 # Process cache invalidations
761 with m.If(inval_in):
762 comb += cache_valids.r.eq(-1)
763 sync += r.store_valid.eq(0)
764
765 # Main state machine
766 with m.Switch(r.state):
767
768 with m.Case(State.IDLE):
769 self.icache_miss_idle(m, r, req_is_miss, req_laddr,
770 req_index, req_tag, replace_way,
771 real_addr)
772
773 with m.Case(State.CLR_TAG, State.WAIT_ACK):
774 with m.If(r.state == State.CLR_TAG):
775 self.icache_miss_clr_tag(m, r, replace_way,
776 req_index,
777 cache_valids)
778
779 self.icache_miss_wait_ack(m, r, replace_way, inval_in,
780 cache_valids)
781
782 # TLB miss and protection fault processing
783 with m.If(flush_in | m_in.tlbld):
784 sync += r.fetch_failed.eq(0)
785 with m.Elif(i_in.req & ~access_ok & ~stall_in):
786 sync += r.fetch_failed.eq(1)
787
788 # icache_log: if LOG_LENGTH > 0 generate
789 def icache_log(self, m, req_hit_way, ra_valid, access_ok,
790 req_is_miss, req_is_hit, lway, wstate, r):
791 comb = m.d.comb
792 sync = m.d.sync
793
794 bus, i_out = self.bus, self.i_out
795 log_out, stall_out = self.log_out, self.stall_out
796
797 # Output data to logger
798 for i in range(LOG_LENGTH):
799 log_data = Signal(54)
800 lway = Signal(self.WAY_BITS)
801 wstate = Signal()
802
803 sync += lway.eq(req_hit_way)
804 sync += wstate.eq(0)
805
806 with m.If(r.state != State.IDLE):
807 sync += wstate.eq(1)
808
809 sync += log_data.eq(Cat(
810 ra_valid, access_ok, req_is_miss, req_is_hit,
811 lway, wstate, r.hit_nia[2:6], r.fetch_failed,
812 stall_out, bus.stall, r.wb.cyc, r.wb.stb,
813 r.real_addr[3:6], bus.ack, i_out.insn, i_out.valid
814 ))
815 comb += log_out.eq(log_data)
816
817 def elaborate(self, platform):
818
819 m = Module()
820 comb = m.d.comb
821
822 # Cache-Ways "valid" indicators. this is a 2D Signal, by the
823 # number of ways and the number of lines.
824 vec = SRLatch(sync=True, llen=self.NUM_WAYS*self.NUM_LINES,
825 name="cachevalids")
826 m.submodules.cache_valids = cache_valids = vec
827
828 # TLB Array
829 itlb = self.TLBArray()
830 vec = SRLatch(sync=False, llen=self.TLB_SIZE, name="tlbvalids")
831 m.submodules.itlb_valids = itlb_valid = vec
832
833 # TODO to be passed to nmigen as ram attributes
834 # attribute ram_style of itlb_tags : signal is "distributed";
835 # attribute ram_style of itlb_ptes : signal is "distributed";
836
837 # Privilege bit from PTE EAA field
838 eaa_priv = Signal()
839
840 r = RegInternal(self)
841
842 # Async signal on incoming request
843 req_index = Signal(self.INDEX_BITS)
844 req_row = Signal(self.ROW_BITS)
845 req_hit_way = Signal(self.WAY_BITS)
846 req_tag = Signal(self.TAG_BITS)
847 req_is_hit = Signal()
848 req_is_miss = Signal()
849 req_laddr = Signal(64)
850
851 tlb_req_index = Signal(self.TL_BITS)
852 real_addr = Signal(self.REAL_ADDR_BITS)
853 ra_valid = Signal()
854 priv_fault = Signal()
855 access_ok = Signal()
856 use_previous = Signal()
857
858 cache_out_row = Signal(self.ROW_SIZE_BITS)
859
860 plru_victim = Signal(self.WAY_BITS)
861 replace_way = Signal(self.WAY_BITS)
862
863 self.tlbmem = Memory(depth=self.TLB_SIZE,
864 width=self.TLB_EA_TAG_BITS+self.TLB_PTE_BITS,
865 #attrs={'syn_ramstyle': "block_ram"}
866 )
867 self.tagmem = Memory(depth=self.NUM_LINES,
868 width=self.TAG_RAM_WIDTH,
869 #attrs={'syn_ramstyle': "block_ram"}
870 )
871
872 # call sub-functions putting everything together,
873 # using shared signals established above
874 self.rams(m, r, cache_out_row, use_previous, replace_way, req_row)
875 self.maybe_plrus(m, r, plru_victim)
876 self.itlb_lookup(m, tlb_req_index, itlb, itlb_valid, real_addr,
877 ra_valid, eaa_priv, priv_fault,
878 access_ok)
879 self.itlb_update(m, itlb, itlb_valid)
880 self.icache_comb(m, use_previous, r, req_index, req_row, req_hit_way,
881 req_tag, real_addr, req_laddr,
882 cache_valids,
883 access_ok, req_is_hit, req_is_miss,
884 replace_way, plru_victim, cache_out_row)
885 self.icache_hit(m, use_previous, r, req_is_hit, req_hit_way,
886 req_index, req_tag, real_addr)
887 self.icache_miss(m, r, req_is_miss, req_index,
888 req_laddr, req_tag, replace_way,
889 cache_valids,
890 access_ok, real_addr)
891 #self.icache_log(m, log_out, req_hit_way, ra_valid, access_ok,
892 # req_is_miss, req_is_hit, lway, wstate, r)
893
894 # don't connect up to FetchUnitInterface so that some unit tests
895 # can continue to operate
896 if not self.use_fetch_iface:
897 return m
898
899 # connect to FetchUnitInterface. FetchUnitInterface is undocumented
900 # so needs checking and iterative revising
901 i_in, bus, i_out = self.i_in, self.bus, self.i_out
902 comb += i_in.req.eq(self.a_i_valid)
903 comb += i_in.nia.eq(self.a_pc_i)
904 comb += self.stall_in.eq(self.a_stall_i)
905 comb += self.f_fetch_err_o.eq(i_out.fetch_failed)
906 comb += self.f_badaddr_o.eq(i_out.nia)
907 comb += self.f_instr_o.eq(i_out.insn)
908 comb += self.f_busy_o.eq(~i_out.valid) # probably
909
910 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
911 ibus = self.ibus
912 comb += ibus.adr.eq(self.bus.adr)
913 comb += ibus.dat_w.eq(self.bus.dat_w)
914 comb += ibus.sel.eq(self.bus.sel)
915 comb += ibus.cyc.eq(self.bus.cyc)
916 comb += ibus.stb.eq(self.bus.stb)
917 comb += ibus.we.eq(self.bus.we)
918
919 comb += self.bus.dat_r.eq(ibus.dat_r)
920 comb += self.bus.ack.eq(ibus.ack)
921 if hasattr(ibus, "stall"):
922 comb += self.bus.stall.eq(ibus.stall)
923 else:
924 # fake-up the wishbone stall signal to comply with pipeline mode
925 # same thing is done in dcache.py
926 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
927
928 return m
929
930
931 def icache_sim(dut):
932 i_in = dut.i_in
933 i_out = dut.i_out
934 m_out = dut.m_in
935
936 yield i_in.priv_mode.eq(1)
937 yield i_in.req.eq(0)
938 yield i_in.nia.eq(0)
939 yield i_in.stop_mark.eq(0)
940 yield m_out.tlbld.eq(0)
941 yield m_out.tlbie.eq(0)
942 yield m_out.addr.eq(0)
943 yield m_out.pte.eq(0)
944 yield
945 yield
946 yield
947 yield
948
949 # miss, stalls for a bit
950 yield i_in.req.eq(1)
951 yield i_in.nia.eq(Const(0x0000000000000004, 64))
952 yield
953 valid = yield i_out.valid
954 while not valid:
955 yield
956 valid = yield i_out.valid
957 yield i_in.req.eq(0)
958
959 insn = yield i_out.insn
960 nia = yield i_out.nia
961 assert insn == 0x00000001, \
962 "insn @%x=%x expected 00000001" % (nia, insn)
963 yield i_in.req.eq(0)
964 yield
965
966 # hit
967 yield i_in.req.eq(1)
968 yield i_in.nia.eq(Const(0x0000000000000008, 64))
969 yield
970 valid = yield i_out.valid
971 while not valid:
972 yield
973 valid = yield i_out.valid
974 yield i_in.req.eq(0)
975
976 nia = yield i_out.nia
977 insn = yield i_out.insn
978 yield
979 assert insn == 0x00000002, \
980 "insn @%x=%x expected 00000002" % (nia, insn)
981
982 # another miss
983 yield i_in.req.eq(1)
984 yield i_in.nia.eq(Const(0x0000000000000040, 64))
985 yield
986 valid = yield i_out.valid
987 while not valid:
988 yield
989 valid = yield i_out.valid
990 yield i_in.req.eq(0)
991
992 nia = yield i_in.nia
993 insn = yield i_out.insn
994 assert insn == 0x00000010, \
995 "insn @%x=%x expected 00000010" % (nia, insn)
996
997 # test something that aliases (this only works because
998 # the unit test SRAM is a depth of 512)
999 yield i_in.req.eq(1)
1000 yield i_in.nia.eq(Const(0x0000000000000100, 64))
1001 yield
1002 yield
1003 valid = yield i_out.valid
1004 assert ~valid
1005 for i in range(30):
1006 yield
1007 yield
1008 insn = yield i_out.insn
1009 valid = yield i_out.valid
1010 insn = yield i_out.insn
1011 assert valid
1012 assert insn == 0x00000040, \
1013 "insn @%x=%x expected 00000040" % (nia, insn)
1014 yield i_in.req.eq(0)
1015
1016
1017 def test_icache(mem):
1018 from soc.config.test.test_loadstore import TestMemPspec
1019 pspec = TestMemPspec(addr_wid=32,
1020 mask_wid=8,
1021 reg_wid=64,
1022 XLEN=32,
1023 )
1024 dut = ICache(pspec)
1025
1026 memory = Memory(width=64, depth=512, init=mem)
1027 sram = SRAM(memory=memory, granularity=8)
1028
1029 m = Module()
1030
1031 m.submodules.icache = dut
1032 m.submodules.sram = sram
1033
1034 m.d.comb += sram.bus.cyc.eq(dut.bus.cyc)
1035 m.d.comb += sram.bus.stb.eq(dut.bus.stb)
1036 m.d.comb += sram.bus.we.eq(dut.bus.we)
1037 m.d.comb += sram.bus.sel.eq(dut.bus.sel)
1038 m.d.comb += sram.bus.adr.eq(dut.bus.adr)
1039 m.d.comb += sram.bus.dat_w.eq(dut.bus.dat_w)
1040
1041 m.d.comb += dut.bus.ack.eq(sram.bus.ack)
1042 m.d.comb += dut.bus.dat_r.eq(sram.bus.dat_r)
1043
1044 # nmigen Simulation
1045 sim = Simulator(m)
1046 sim.add_clock(1e-6)
1047
1048 sim.add_sync_process(wrap(icache_sim(dut)))
1049 with sim.write_vcd('test_icache.vcd'):
1050 sim.run()
1051
1052
1053 if __name__ == '__main__':
1054 from soc.config.test.test_loadstore import TestMemPspec
1055 pspec = TestMemPspec(addr_wid=64,
1056 mask_wid=8,
1057 XLEN=32,
1058 reg_wid=64,
1059 )
1060 dut = ICache(pspec)
1061 vl = rtlil.convert(dut, ports=[])
1062 with open("test_icache.il", "w") as f:
1063 f.write(vl)
1064
1065 # set up memory every 32-bits with incrementing values 0 1 2 ...
1066 mem = []
1067 for i in range(512):
1068 mem.append((i*2) | ((i*2+1)<<32))
1069
1070 test_icache(mem)