Revert "read last row from r.wb.adr not r.req_adr in icache"
[soc.git] / src / soc / experiment / icache.py
1 """ICache
2
3 based on Anton Blanchard microwatt icache.vhdl
4
5 Set associative icache
6
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
20
21 Links:
22
23 * https://bugs.libre-soc.org/show_bug.cgi?id=485
24 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
25 (discussion about brams for ECP5)
26
27 """
28
29 from enum import (Enum, unique)
30 from nmigen import (Module, Signal, Elaboratable, Cat, Array, Const, Repl,
31 Record)
32 from nmigen.cli import main, rtlil
33 from nmutil.iocontrol import RecordObject
34 from nmigen.utils import log2_int
35 from nmigen.lib.coding import Decoder
36 from nmutil.util import Display
37 from nmutil.latch import SRLatch
38
39 #from nmutil.plru import PLRU
40 from soc.experiment.plru import PLRU, PLRUs
41 from soc.experiment.cache_ram import CacheRam
42
43 from soc.experiment.mem_types import (Fetch1ToICacheType,
44 ICacheToDecode1Type,
45 MMUToICacheType)
46
47 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS,
48 WB_SEL_BITS, WBAddrType, WBDataType,
49 WBSelType, WBMasterOut, WBSlaveOut,
50 )
51
52 from nmigen_soc.wishbone.bus import Interface
53 from soc.minerva.units.fetch import FetchUnitInterface
54
55
56 # for test
57 from soc.bus.sram import SRAM
58 from nmigen import Memory
59 from nmutil.util import wrap
60 from nmigen.cli import main, rtlil
61
62 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
63 # Also, check out the cxxsim nmigen branch, and latest yosys from git
64 from nmutil.sim_tmp_alternative import Simulator, Settle
65
66 # from microwatt/utils.vhdl
67 def ispow2(n):
68 return n != 0 and (n & (n - 1)) == 0
69
70 SIM = 0
71 # Non-zero to enable log data collection
72 LOG_LENGTH = 0
73
74 class ICacheConfig:
75 def __init__(self, XLEN = 64,
76 LINE_SIZE = 64,
77 NUM_LINES = 64, # Number of lines in a set
78 NUM_WAYS = 2, # Number of ways
79 TLB_SIZE = 64, # L1 ITLB number of entries
80 TLB_LG_PGSZ = 12): # L1 ITLB log_2(page_size)
81 self.XLEN = XLEN
82 self.LINE_SIZE = LINE_SIZE
83 self.NUM_LINES = NUM_LINES
84 self.NUM_WAYS = NUM_WAYS
85 self.TLB_SIZE = TLB_SIZE
86 self.TLB_LG_PGSZ = TLB_LG_PGSZ
87
88 # BRAM organisation: We never access more than wishbone_data_bits
89 # at a time so to save resources we make the array only that wide,
90 # and use consecutive indices for to make a cache "line"
91 #
92 # self.ROW_SIZE is the width in bytes of the BRAM
93 # (based on WB, so 64-bits)
94 self.ROW_SIZE = WB_DATA_BITS // 8
95 # Number of real address bits that we store
96 self.REAL_ADDR_BITS = XLEN-8 # 56 for XLEN=64
97
98 self.ROW_SIZE_BITS = self.ROW_SIZE * 8
99 # ROW_PER_LINE is the number of row (wishbone) transactions in a line
100 self.ROW_PER_LINE = self.LINE_SIZE // self.ROW_SIZE
101 # BRAM_ROWS is the number of rows in BRAM
102 # needed to represent the full icache
103 self.BRAM_ROWS = self.NUM_LINES * self.ROW_PER_LINE
104 # INSN_PER_ROW is the number of 32bit instructions per BRAM row
105 self.INSN_PER_ROW = self.ROW_SIZE_BITS // 32
106
107 # Bit fields counts in the address
108 #
109 # INSN_BITS is the number of bits to select an instruction in a row
110 self.INSN_BITS = log2_int(self.INSN_PER_ROW)
111 # ROW_BITS is the number of bits to select a row
112 self.ROW_BITS = log2_int(self.BRAM_ROWS)
113 # ROW_LINE_BITS is the number of bits to select a row within a line
114 self.ROW_LINE_BITS = log2_int(self.ROW_PER_LINE)
115 # LINE_OFF_BITS is the number of bits for the offset in a cache line
116 self.LINE_OFF_BITS = log2_int(self.LINE_SIZE)
117 # ROW_OFF_BITS is the number of bits for the offset in a row
118 self.ROW_OFF_BITS = log2_int(self.ROW_SIZE)
119 # INDEX_BITS is the number of bits to select a cache line
120 self.INDEX_BITS = log2_int(self.NUM_LINES)
121 # SET_SIZE_BITS is the log base 2 of the set size
122 self.SET_SIZE_BITS = self.LINE_OFF_BITS + self.INDEX_BITS
123 # TAG_BITS is the number of bits of the tag part of the address
124 self.TAG_BITS = self.REAL_ADDR_BITS - self.SET_SIZE_BITS
125 # TAG_WIDTH is the width in bits of each way of the tag RAM
126 self.TAG_WIDTH = self.TAG_BITS + 7 - ((self.TAG_BITS + 7) % 8)
127
128 # WAY_BITS is the number of bits to select a way
129 self.WAY_BITS = log2_int(self.NUM_WAYS)
130 self.TAG_RAM_WIDTH = self.TAG_BITS * self.NUM_WAYS
131
132 # L1 ITLB
133 self.TL_BITS = log2_int(self.TLB_SIZE)
134 self.TLB_EA_TAG_BITS = XLEN - (self.TLB_LG_PGSZ + self.TL_BITS)
135 self.TLB_PTE_BITS = XLEN
136
137 print("self.XLEN =", self.XLEN)
138 print("self.BRAM_ROWS =", self.BRAM_ROWS)
139 print("self.INDEX_BITS =", self.INDEX_BITS)
140 print("self.INSN_BITS =", self.INSN_BITS)
141 print("self.INSN_PER_ROW =", self.INSN_PER_ROW)
142 print("self.LINE_SIZE =", self.LINE_SIZE)
143 print("self.LINE_OFF_BITS =", self.LINE_OFF_BITS)
144 print("LOG_LENGTH =", LOG_LENGTH)
145 print("self.NUM_LINES =", self.NUM_LINES)
146 print("self.NUM_WAYS =", self.NUM_WAYS)
147 print("self.REAL_ADDR_BITS =", self.REAL_ADDR_BITS)
148 print("self.ROW_BITS =", self.ROW_BITS)
149 print("self.ROW_OFF_BITS =", self.ROW_OFF_BITS)
150 print("self.ROW_LINE_BITS =", self.ROW_LINE_BITS)
151 print("self.ROW_PER_LINE =", self.ROW_PER_LINE)
152 print("self.ROW_SIZE =", self.ROW_SIZE)
153 print("self.ROW_SIZE_BITS =", self.ROW_SIZE_BITS)
154 print("self.SET_SIZE_BITS =", self.SET_SIZE_BITS)
155 print("SIM =", SIM)
156 print("self.TAG_BITS =", self.TAG_BITS)
157 print("self.TAG_RAM_WIDTH =", self.TAG_RAM_WIDTH)
158 print("self.TAG_BITS =", self.TAG_BITS)
159 print("self.TL_BITS =", self.TL_BITS)
160 print("self.TLB_EA_TAG_BITS =", self.TLB_EA_TAG_BITS)
161 print("self.TLB_LG_PGSZ =", self.TLB_LG_PGSZ)
162 print("self.TLB_PTE_BITS =", self.TLB_PTE_BITS)
163 print("self.TLB_SIZE =", self.TLB_SIZE)
164 print("self.WAY_BITS =", self.WAY_BITS)
165 print()
166
167 assert self.LINE_SIZE % self.ROW_SIZE == 0
168 assert ispow2(self.LINE_SIZE), "self.LINE_SIZE not power of 2"
169 assert ispow2(self.NUM_LINES), "self.NUM_LINES not power of 2"
170 assert ispow2(self.ROW_PER_LINE), "self.ROW_PER_LINE not power of 2"
171 assert ispow2(self.INSN_PER_ROW), "self.INSN_PER_ROW not power of 2"
172 assert (self.ROW_BITS == (self.INDEX_BITS + self.ROW_LINE_BITS)), \
173 "geometry bits don't add up"
174 assert (self.LINE_OFF_BITS ==
175 (self.ROW_OFF_BITS + self.ROW_LINE_BITS)), \
176 "geometry bits don't add up"
177 assert (self.REAL_ADDR_BITS ==
178 (self.TAG_BITS + self.INDEX_BITS + self.LINE_OFF_BITS)), \
179 "geometry bits don't add up"
180 assert (self.REAL_ADDR_BITS ==
181 (self.TAG_BITS + self.ROW_BITS + self.ROW_OFF_BITS)), \
182 "geometry bits don't add up"
183
184 # Example of layout for 32 lines of 64 bytes:
185 #
186 # .. tag |index| line |
187 # .. | row | |
188 # .. | | | |00| zero (2)
189 # .. | | |-| | self.INSN_BITS (1)
190 # .. | |---| | self.ROW_LINE_BITS (3)
191 # .. | |--- - --| self.LINE_OFF_BITS (6)
192 # .. | |- --| self.ROW_OFF_BITS (3)
193 # .. |----- ---| | self.ROW_BITS (8)
194 # .. |-----| | self.INDEX_BITS (5)
195 # .. --------| | self.TAG_BITS (53)
196
197 # The cache data BRAM organized as described above for each way
198 #subtype cache_row_t is std_ulogic_vector(self.ROW_SIZE_BITS-1 downto 0);
199 #
200 def RowPerLineValidArray(self):
201 return Array(Signal(name="rows_valid_%d" %x) \
202 for x in range(self.ROW_PER_LINE))
203
204
205 # TODO to be passed to nigmen as ram attributes
206 # attribute ram_style : string;
207 # attribute ram_style of cache_tags : signal is "distributed";
208
209 def TLBRecord(self, name):
210 tlb_layout = [ ('tag', self.TLB_EA_TAG_BITS),
211 ('pte', self.TLB_PTE_BITS)
212 ]
213 return Record(tlb_layout, name=name)
214
215 def TLBArray(self):
216 return Array(self.TLBRecord("tlb%d" % x) for x in range(self.TLB_SIZE))
217
218 # PLRU output interface
219 def PLRUOut(self):
220 return Array(Signal(self.WAY_BITS, name="plru_out_%d" %x) \
221 for x in range(self.NUM_LINES))
222
223 # Return the cache line index (tag index) for an address
224 def get_index(self, addr):
225 return addr[self.LINE_OFF_BITS:self.SET_SIZE_BITS]
226
227 # Return the cache row index (data memory) for an address
228 def get_row(self, addr):
229 return addr[self.ROW_OFF_BITS:self.SET_SIZE_BITS]
230
231 # Return the index of a row within a line
232 def get_row_of_line(self, row):
233 return row[:self.ROW_BITS][:self.ROW_LINE_BITS]
234
235 # Returns whether this is the last row of a line
236 def is_last_row_addr(self, addr, last):
237 return addr[self.ROW_OFF_BITS:self.LINE_OFF_BITS] == last
238
239 # Returns whether this is the last row of a line
240 def is_last_row(self, row, last):
241 return self.get_row_of_line(row) == last
242
243 # Return the next row in the current cache line. We use a dedicated
244 # function in order to limit the size of the generated adder to be
245 # only the bits within a cache line (3 bits with default settings)
246 def next_row(self, row):
247 row_v = row[0:self.ROW_LINE_BITS] + 1
248 return Cat(row_v[:self.ROW_LINE_BITS], row[self.ROW_LINE_BITS:])
249
250 # Read the instruction word for the given address
251 # in the current cache row
252 def read_insn_word(self, addr, data):
253 word = addr[2:self.INSN_BITS+2]
254 return data.word_select(word, 32)
255
256 # Get the tag value from the address
257 def get_tag(self, addr):
258 return addr[self.SET_SIZE_BITS:self.REAL_ADDR_BITS]
259
260 # Read a tag from a tag memory row
261 def read_tag(self, way, tagset):
262 return tagset.word_select(way, self.TAG_BITS)
263
264 # Write a tag to tag memory row
265 def write_tag(self, way, tagset, tag):
266 return self.read_tag(way, tagset).eq(tag)
267
268 # Simple hash for direct-mapped TLB index
269 def hash_ea(self, addr):
270 hsh = (addr[self.TLB_LG_PGSZ:self.TLB_LG_PGSZ + self.TL_BITS] ^
271 addr[self.TLB_LG_PGSZ + self.TL_BITS:
272 self.TLB_LG_PGSZ + 2 * self.TL_BITS ] ^
273 addr[self.TLB_LG_PGSZ + 2 * self.TL_BITS:
274 self.TLB_LG_PGSZ + 3 * self.TL_BITS])
275 return hsh
276
277
278 # Cache reload state machine
279 @unique
280 class State(Enum):
281 IDLE = 0
282 CLR_TAG = 1
283 WAIT_ACK = 2
284
285
286 class RegInternal(RecordObject):
287 def __init__(self, cfg):
288 super().__init__()
289 # Cache hit state (Latches for 1 cycle BRAM access)
290 self.hit_way = Signal(cfg.WAY_BITS)
291 self.hit_nia = Signal(64)
292 self.hit_smark = Signal()
293 self.hit_valid = Signal()
294
295 # Cache miss state (reload state machine)
296 self.state = Signal(State, reset=State.IDLE)
297 self.wb = WBMasterOut("wb")
298 self.req_adr = Signal(64)
299 self.store_way = Signal(cfg.WAY_BITS)
300 self.store_index = Signal(cfg.INDEX_BITS)
301 self.store_row = Signal(cfg.ROW_BITS)
302 self.store_tag = Signal(cfg.TAG_BITS)
303 self.store_valid = Signal()
304 self.end_row_ix = Signal(cfg.ROW_LINE_BITS)
305 self.rows_valid = cfg.RowPerLineValidArray()
306
307 # TLB miss state
308 self.fetch_failed = Signal()
309
310
311 class ICache(FetchUnitInterface, Elaboratable, ICacheConfig):
312 """64 bit direct mapped icache. All instructions are 4B aligned."""
313 def __init__(self, pspec):
314 FetchUnitInterface.__init__(self, pspec)
315 self.i_in = Fetch1ToICacheType(name="i_in")
316 self.i_out = ICacheToDecode1Type(name="i_out")
317
318 self.m_in = MMUToICacheType(name="m_in")
319
320 self.stall_in = Signal()
321 self.stall_out = Signal()
322 self.flush_in = Signal()
323 self.inval_in = Signal()
324
325 # standard naming (wired to non-standard for compatibility)
326 self.bus = Interface(addr_width=32,
327 data_width=64,
328 granularity=8,
329 features={'stall'},
330 #alignment=0,
331 name="icache_wb")
332
333 self.log_out = Signal(54)
334
335 # use FetchUnitInterface, helps keep some unit tests running
336 self.use_fetch_iface = False
337
338 # test if microwatt compatibility is to be enabled
339 self.microwatt_compat = (hasattr(pspec, "microwatt_compat") and
340 (pspec.microwatt_compat == True))
341
342 XLEN = pspec.XLEN
343
344 if self.microwatt_compat:
345 # reduce way sizes and num lines
346 ICacheConfig.__init__(self, LINE_SIZE=XLEN,
347 XLEN=XLEN,
348 NUM_LINES = 4,
349 NUM_WAYS = 1,
350 TLB_SIZE=4 # needs device-tree update
351 )
352 else:
353 ICacheConfig.__init__(self, LINE_SIZE=XLEN, XLEN=XLEN)
354
355 def use_fetch_interface(self):
356 self.use_fetch_iface = True
357
358 # Generate a cache RAM for each way
359 def rams(self, m, r, cache_out_row, use_previous,
360 replace_way, req_row):
361
362 comb = m.d.comb
363 sync = m.d.sync
364
365 bus, stall_in = self.bus, self.stall_in
366
367 # read condition (for every cache ram)
368 do_read = Signal()
369 comb += do_read.eq(~(stall_in | use_previous))
370
371 rd_addr = Signal(self.ROW_BITS)
372 wr_addr = Signal(self.ROW_BITS)
373 comb += rd_addr.eq(req_row)
374 comb += wr_addr.eq(r.store_row)
375
376 # binary-to-unary converters: replace-way enabled by bus.ack,
377 # hit-way left permanently enabled
378 m.submodules.replace_way_e = re = Decoder(self.NUM_WAYS)
379 m.submodules.hit_way_e = he = Decoder(self.NUM_WAYS)
380 comb += re.i.eq(replace_way)
381 comb += re.n.eq(~bus.ack)
382 comb += he.i.eq(r.hit_way)
383
384 for i in range(self.NUM_WAYS):
385 do_write = Signal(name="do_wr_%d" % i)
386 d_out = Signal(self.ROW_SIZE_BITS, name="d_out_%d" % i)
387 wr_sel = Signal(self.ROW_SIZE, name="wr_sel_%d" % i)
388
389 way = CacheRam(self.ROW_BITS, self.ROW_SIZE_BITS,
390 TRACE=True, ram_num=i)
391 m.submodules["cacheram_%d" % i] = way
392
393 comb += way.rd_en.eq(do_read)
394 comb += way.rd_addr.eq(rd_addr)
395 comb += d_out.eq(way.rd_data_o)
396 comb += way.wr_sel.eq(wr_sel)
397 comb += way.wr_addr.eq(wr_addr)
398 comb += way.wr_data.eq(bus.dat_r)
399
400 comb += do_write.eq(re.o[i])
401
402 with m.If(do_write):
403 sync += Display("cache write adr: %x data: %lx",
404 wr_addr, way.wr_data)
405
406 with m.If(he.o[i]):
407 comb += cache_out_row.eq(d_out)
408 with m.If(do_read):
409 sync += Display("cache read adr: %x data: %x",
410 req_row, d_out)
411
412 comb += wr_sel.eq(Repl(do_write, self.ROW_SIZE))
413
414 # Generate PLRUs
415 def maybe_plrus(self, m, r, plru_victim):
416 comb = m.d.comb
417
418 if self.NUM_WAYS == 0:
419 return
420
421
422 m.submodules.plrus = plru = PLRUs(self.NUM_LINES, self.WAY_BITS)
423 comb += plru.way.eq(r.hit_way)
424 comb += plru.valid.eq(r.hit_valid)
425 comb += plru.index.eq(self.get_index(r.hit_nia))
426 comb += plru.isel.eq(r.store_index) # select victim
427 comb += plru_victim.eq(plru.o_index) # selected victim
428
429 # TLB hit detection and real address generation
430 def itlb_lookup(self, m, tlb_req_index, itlb, itlb_valid,
431 real_addr, ra_valid, eaa_priv,
432 priv_fault, access_ok):
433
434 comb = m.d.comb
435
436 i_in = self.i_in
437
438 # use an *asynchronous* Memory read port here (combinatorial)
439 m.submodules.rd_tlb = rd_tlb = self.tlbmem.read_port(domain="comb")
440 tlb = self.TLBRecord("tlb_rdport")
441 pte, ttag = tlb.pte, tlb.tag
442
443 comb += tlb_req_index.eq(self.hash_ea(i_in.nia))
444 comb += rd_tlb.addr.eq(tlb_req_index)
445 comb += tlb.eq(rd_tlb.data)
446
447 with m.If(i_in.virt_mode):
448 comb += real_addr.eq(Cat(i_in.nia[:self.TLB_LG_PGSZ],
449 pte[self.TLB_LG_PGSZ:self.REAL_ADDR_BITS]))
450
451 with m.If(ttag == i_in.nia[self.TLB_LG_PGSZ + self.TL_BITS:64]):
452 comb += ra_valid.eq(itlb_valid.q.bit_select(tlb_req_index, 1))
453
454 comb += eaa_priv.eq(pte[3])
455
456 with m.Else():
457 comb += real_addr.eq(i_in.nia[:self.REAL_ADDR_BITS])
458 comb += ra_valid.eq(1)
459 comb += eaa_priv.eq(1)
460
461 # No IAMR, so no KUEP support for now
462 comb += priv_fault.eq(eaa_priv & ~i_in.priv_mode)
463 comb += access_ok.eq(ra_valid & ~priv_fault)
464
465 # iTLB update
466 def itlb_update(self, m, itlb, itlb_valid):
467 comb = m.d.comb
468 sync = m.d.sync
469
470 m_in = self.m_in
471
472 wr_index = Signal(self.TL_BITS)
473 wr_unary = Signal(self.TLB_SIZE)
474 comb += wr_index.eq(self.hash_ea(m_in.addr))
475 comb += wr_unary.eq(1<<wr_index)
476
477 m.submodules.wr_tlb = wr_tlb = self.tlbmem.write_port()
478 sync += itlb_valid.s.eq(0)
479 sync += itlb_valid.r.eq(0)
480
481 with m.If(m_in.tlbie & m_in.doall):
482 # Clear all valid bits
483 sync += itlb_valid.r.eq(-1)
484
485 with m.Elif(m_in.tlbie):
486 # Clear entry regardless of hit or miss
487 sync += itlb_valid.r.eq(wr_unary)
488
489 with m.Elif(m_in.tlbld):
490 tlb = self.TLBRecord("tlb_wrport")
491 comb += tlb.tag.eq(m_in.addr[self.TLB_LG_PGSZ + self.TL_BITS:64])
492 comb += tlb.pte.eq(m_in.pte)
493 comb += wr_tlb.en.eq(1)
494 comb += wr_tlb.addr.eq(wr_index)
495 comb += wr_tlb.data.eq(tlb)
496 sync += itlb_valid.s.eq(wr_unary)
497
498 # Cache hit detection, output to fetch2 and other misc logic
499 def icache_comb(self, m, use_previous, r, req_index, req_row,
500 req_hit_way, req_tag, real_addr, req_laddr,
501 cache_valids, access_ok,
502 req_is_hit, req_is_miss, replace_way,
503 plru_victim, cache_out_row):
504
505 comb = m.d.comb
506 m.submodules.rd_tag = rd_tag = self.tagmem.read_port(domain="comb")
507
508 i_in, i_out, bus = self.i_in, self.i_out, self.bus
509 flush_in, stall_out = self.flush_in, self.stall_out
510
511 is_hit = Signal()
512 hit_way = Signal(self.WAY_BITS)
513
514 # i_in.sequential means that i_in.nia this cycle is 4 more than
515 # last cycle. If we read more than 32 bits at a time, had a
516 # cache hit last cycle, and we don't want the first 32-bit chunk
517 # then we can keep the data we read last cycle and just use that.
518 with m.If(i_in.nia[2:self.INSN_BITS+2] != 0):
519 comb += use_previous.eq(i_in.sequential & r.hit_valid)
520
521 # Extract line, row and tag from request
522 comb += req_index.eq(self.get_index(i_in.nia))
523 comb += req_row.eq(self.get_row(i_in.nia))
524 comb += req_tag.eq(self.get_tag(real_addr))
525
526 # Calculate address of beginning of cache row, will be
527 # used for cache miss processing if needed
528 comb += req_laddr.eq(Cat(
529 Const(0, self.ROW_OFF_BITS),
530 real_addr[self.ROW_OFF_BITS:self.REAL_ADDR_BITS],
531 ))
532
533 # Test if pending request is a hit on any way
534 hitcond = Signal()
535 rowvalid = Signal()
536 comb += rowvalid.eq(r.rows_valid[req_row % self.ROW_PER_LINE])
537 comb += hitcond.eq((r.state == State.WAIT_ACK) &
538 (req_index == r.store_index) &
539 rowvalid
540 )
541 # i_in.req asserts Decoder active
542 cvb = Signal(self.NUM_WAYS)
543 ctag = Signal(self.TAG_RAM_WIDTH)
544 comb += rd_tag.addr.eq(req_index)
545 comb += ctag.eq(rd_tag.data)
546 comb += cvb.eq(cache_valids.q.word_select(req_index, self.NUM_WAYS))
547 m.submodules.store_way_e = se = Decoder(self.NUM_WAYS)
548 comb += se.i.eq(r.store_way)
549 comb += se.n.eq(~i_in.req)
550 for i in range(self.NUM_WAYS):
551 tagi = Signal(self.TAG_BITS, name="tag_i%d" % i)
552 hit_test = Signal(name="hit_test%d" % i)
553 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
554 comb += tagi.eq(self.read_tag(i, ctag))
555 comb += hit_test.eq(se.o[i])
556 comb += is_tag_hit.eq((cvb[i] | (hitcond & hit_test)) &
557 (tagi == req_tag))
558 with m.If(is_tag_hit):
559 comb += hit_way.eq(i)
560 comb += is_hit.eq(1)
561
562 # Generate the "hit" and "miss" signals
563 # for the synchronous blocks
564 with m.If(i_in.req & access_ok & ~flush_in):
565 comb += req_is_hit.eq(is_hit)
566 comb += req_is_miss.eq(~is_hit)
567
568 comb += req_hit_way.eq(hit_way)
569
570 # The way to replace on a miss
571 with m.If(r.state == State.CLR_TAG):
572 comb += replace_way.eq(plru_victim)
573 with m.Else():
574 comb += replace_way.eq(r.store_way)
575
576 # Output instruction from current cache row
577 #
578 # Note: This is a mild violation of our design principle of
579 # having pipeline stages output from a clean latch. In this
580 # case we output the result of a mux. The alternative would
581 # be output an entire row which I prefer not to do just yet
582 # as it would force fetch2 to know about some of the cache
583 # geometry information.
584 comb += i_out.insn.eq(self.read_insn_word(r.hit_nia, cache_out_row))
585 comb += i_out.valid.eq(r.hit_valid)
586 comb += i_out.nia.eq(r.hit_nia)
587 comb += i_out.stop_mark.eq(r.hit_smark)
588 comb += i_out.fetch_failed.eq(r.fetch_failed)
589
590 # Stall fetch1 if we have a miss on cache or TLB
591 # or a protection fault
592 comb += stall_out.eq(~(is_hit & access_ok))
593
594 # Wishbone requests output (from the cache miss reload machine)
595 comb += bus.we.eq(r.wb.we)
596 comb += bus.adr.eq(r.wb.adr)
597 comb += bus.sel.eq(r.wb.sel)
598 comb += bus.stb.eq(r.wb.stb)
599 comb += bus.dat_w.eq(r.wb.dat)
600 comb += bus.cyc.eq(r.wb.cyc)
601
602 # Cache hit synchronous machine
603 def icache_hit(self, m, use_previous, r, req_is_hit, req_hit_way,
604 req_index, req_tag, real_addr):
605 sync = m.d.sync
606
607 i_in, stall_in = self.i_in, self.stall_in
608 flush_in = self.flush_in
609
610 # keep outputs to fetch2 unchanged on a stall
611 # except that flush or reset sets valid to 0
612 # If use_previous, keep the same data as last
613 # cycle and use the second half
614 with m.If(stall_in | use_previous):
615 with m.If(flush_in):
616 sync += r.hit_valid.eq(0)
617 with m.Else():
618 # On a hit, latch the request for the next cycle,
619 # when the BRAM data will be available on the
620 # cache_out output of the corresponding way
621 sync += r.hit_valid.eq(req_is_hit)
622
623 with m.If(req_is_hit):
624 sync += r.hit_way.eq(req_hit_way)
625 sync += Display("cache hit nia:%x IR:%x SM:%x idx:%x tag:%x "
626 "way:%x RA:%x", i_in.nia, i_in.virt_mode,
627 i_in.stop_mark, req_index, req_tag,
628 req_hit_way, real_addr)
629
630 with m.If(~stall_in):
631 # Send stop marks and NIA down regardless of validity
632 sync += r.hit_smark.eq(i_in.stop_mark)
633 sync += r.hit_nia.eq(i_in.nia)
634
635 def icache_miss_idle(self, m, r, req_is_miss, req_laddr,
636 req_index, req_tag, replace_way, real_addr):
637 comb = m.d.comb
638 sync = m.d.sync
639
640 i_in = self.i_in
641
642 # Reset per-row valid flags, only used in WAIT_ACK
643 for i in range(self.ROW_PER_LINE):
644 sync += r.rows_valid[i].eq(0)
645
646 # We need to read a cache line
647 with m.If(req_is_miss):
648 sync += Display(
649 "cache miss nia:%x IR:%x SM:%x idx:%x "
650 " way:%x tag:%x RA:%x", i_in.nia,
651 i_in.virt_mode, i_in.stop_mark, req_index,
652 replace_way, req_tag, real_addr)
653
654 # Keep track of our index and way for subsequent stores
655 st_row = Signal(self.ROW_BITS)
656 comb += st_row.eq(self.get_row(req_laddr))
657 sync += r.store_index.eq(req_index)
658 sync += r.store_row.eq(st_row)
659 sync += r.store_tag.eq(req_tag)
660 sync += r.store_valid.eq(1)
661 sync += r.end_row_ix.eq(self.get_row_of_line(st_row) - 1)
662
663 # Prep for first wishbone read. We calculate the address
664 # of the start of the cache line and start the WB cycle.
665 sync += r.req_adr.eq(req_laddr)
666 sync += r.wb.cyc.eq(1)
667 sync += r.wb.stb.eq(1)
668
669 # Track that we had one request sent
670 sync += r.state.eq(State.CLR_TAG)
671
672 def icache_miss_clr_tag(self, m, r, replace_way,
673 req_index,
674 cache_valids):
675 comb = m.d.comb
676 sync = m.d.sync
677 m.submodules.wr_tag = wr_tag = self.tagmem.write_port(
678 granularity=self.TAG_BITS)
679
680 # Get victim way from plru
681 sync += r.store_way.eq(replace_way)
682
683 # Force misses on that way while reloading that line
684 idx = req_index*self.NUM_WAYS + replace_way # 2D index, 1st dim: self.NUM_WAYS
685 comb += cache_valids.r.eq(1<<idx)
686
687 # use write-port "granularity" to select the tag to write to
688 # TODO: the Memory should be multipled-up (by NUM_TAGS)
689 tagset = Signal(self.TAG_RAM_WIDTH)
690 comb += tagset.eq(r.store_tag << (replace_way*self.TAG_BITS))
691 comb += wr_tag.en.eq(1<<replace_way)
692 comb += wr_tag.addr.eq(r.store_index)
693 comb += wr_tag.data.eq(tagset)
694
695 sync += r.state.eq(State.WAIT_ACK)
696
697 def icache_miss_wait_ack(self, m, r, replace_way, inval_in,
698 cache_valids):
699 comb = m.d.comb
700 sync = m.d.sync
701
702 bus = self.bus
703
704 # If we are still sending requests, was one accepted?
705 with m.If(~bus.stall & r.wb.stb):
706 # That was the last word? We are done sending. Clear stb
707 with m.If(self.is_last_row_addr(r.req_adr, r.end_row_ix)):
708 sync += Display("IS_LAST_ROW_ADDR r.wb.addr:%x "
709 "r.end_row_ix:%x r.wb.stb:%x",
710 r.wb.adr, r.end_row_ix, r.wb.stb)
711 sync += r.wb.stb.eq(0)
712
713 # Calculate the next row address
714 rarange = Signal(self.LINE_OFF_BITS - self.ROW_OFF_BITS)
715 comb += rarange.eq(r.req_adr[self.ROW_OFF_BITS:
716 self.LINE_OFF_BITS] + 1)
717 sync += r.req_adr[self.ROW_OFF_BITS:self.LINE_OFF_BITS].eq(rarange)
718 sync += Display("RARANGE r.req_adr:%x rarange:%x "
719 "r.wb.stb:%x",
720 r.req_adr, rarange, r.wb.stb)
721
722 # Incoming acks processing
723 with m.If(bus.ack):
724 sync += Display("WB_IN_ACK data:%x", bus.dat_r)
725
726 sync += r.rows_valid[r.store_row % self.ROW_PER_LINE].eq(1)
727
728 # Check for completion
729 with m.If(self.is_last_row(r.store_row, r.end_row_ix)):
730 # Complete wishbone cycle
731 sync += r.wb.cyc.eq(0)
732 # be nice, clear addr
733 sync += r.req_adr.eq(0)
734
735 # Cache line is now valid
736 idx = r.store_index*self.NUM_WAYS + replace_way # 2D index again
737 valid = r.store_valid & ~inval_in
738 comb += cache_valids.s.eq(1<<idx)
739 sync += r.state.eq(State.IDLE)
740
741 # move on to next request in row
742 # Increment store row counter
743 sync += r.store_row.eq(self.next_row(r.store_row))
744
745 # Cache miss/reload synchronous machine
746 def icache_miss(self, m, r, req_is_miss,
747 req_index, req_laddr, req_tag, replace_way,
748 cache_valids, access_ok, real_addr):
749 comb = m.d.comb
750 sync = m.d.sync
751
752 i_in, bus, m_in = self.i_in, self.bus, self.m_in
753 stall_in, flush_in = self.stall_in, self.flush_in
754 inval_in = self.inval_in
755
756 comb += r.wb.sel.eq(-1)
757 comb += r.wb.adr.eq(r.req_adr[3:])
758
759 # Process cache invalidations
760 with m.If(inval_in):
761 comb += cache_valids.r.eq(-1)
762 sync += r.store_valid.eq(0)
763
764 # Main state machine
765 with m.Switch(r.state):
766
767 with m.Case(State.IDLE):
768 self.icache_miss_idle(m, r, req_is_miss, req_laddr,
769 req_index, req_tag, replace_way,
770 real_addr)
771
772 with m.Case(State.CLR_TAG, State.WAIT_ACK):
773 with m.If(r.state == State.CLR_TAG):
774 self.icache_miss_clr_tag(m, r, replace_way,
775 req_index,
776 cache_valids)
777
778 self.icache_miss_wait_ack(m, r, replace_way, inval_in,
779 cache_valids)
780
781 # TLB miss and protection fault processing
782 with m.If(flush_in | m_in.tlbld):
783 sync += r.fetch_failed.eq(0)
784 with m.Elif(i_in.req & ~access_ok & ~stall_in):
785 sync += r.fetch_failed.eq(1)
786
787 # icache_log: if LOG_LENGTH > 0 generate
788 def icache_log(self, m, req_hit_way, ra_valid, access_ok,
789 req_is_miss, req_is_hit, lway, wstate, r):
790 comb = m.d.comb
791 sync = m.d.sync
792
793 bus, i_out = self.bus, self.i_out
794 log_out, stall_out = self.log_out, self.stall_out
795
796 # Output data to logger
797 for i in range(LOG_LENGTH):
798 log_data = Signal(54)
799 lway = Signal(self.WAY_BITS)
800 wstate = Signal()
801
802 sync += lway.eq(req_hit_way)
803 sync += wstate.eq(0)
804
805 with m.If(r.state != State.IDLE):
806 sync += wstate.eq(1)
807
808 sync += log_data.eq(Cat(
809 ra_valid, access_ok, req_is_miss, req_is_hit,
810 lway, wstate, r.hit_nia[2:6], r.fetch_failed,
811 stall_out, bus.stall, r.wb.cyc, r.wb.stb,
812 r.real_addr[3:6], bus.ack, i_out.insn, i_out.valid
813 ))
814 comb += log_out.eq(log_data)
815
816 def elaborate(self, platform):
817
818 m = Module()
819 comb = m.d.comb
820
821 # Cache-Ways "valid" indicators. this is a 2D Signal, by the
822 # number of ways and the number of lines.
823 vec = SRLatch(sync=True, llen=self.NUM_WAYS*self.NUM_LINES,
824 name="cachevalids")
825 m.submodules.cache_valids = cache_valids = vec
826
827 # TLB Array
828 itlb = self.TLBArray()
829 vec = SRLatch(sync=False, llen=self.TLB_SIZE, name="tlbvalids")
830 m.submodules.itlb_valids = itlb_valid = vec
831
832 # TODO to be passed to nmigen as ram attributes
833 # attribute ram_style of itlb_tags : signal is "distributed";
834 # attribute ram_style of itlb_ptes : signal is "distributed";
835
836 # Privilege bit from PTE EAA field
837 eaa_priv = Signal()
838
839 r = RegInternal(self)
840
841 # Async signal on incoming request
842 req_index = Signal(self.INDEX_BITS)
843 req_row = Signal(self.ROW_BITS)
844 req_hit_way = Signal(self.WAY_BITS)
845 req_tag = Signal(self.TAG_BITS)
846 req_is_hit = Signal()
847 req_is_miss = Signal()
848 req_laddr = Signal(64)
849
850 tlb_req_index = Signal(self.TL_BITS)
851 real_addr = Signal(self.REAL_ADDR_BITS)
852 ra_valid = Signal()
853 priv_fault = Signal()
854 access_ok = Signal()
855 use_previous = Signal()
856
857 cache_out_row = Signal(self.ROW_SIZE_BITS)
858
859 plru_victim = Signal(self.WAY_BITS)
860 replace_way = Signal(self.WAY_BITS)
861
862 self.tlbmem = Memory(depth=self.TLB_SIZE,
863 width=self.TLB_EA_TAG_BITS+self.TLB_PTE_BITS,
864 #attrs={'syn_ramstyle': "block_ram"}
865 )
866 self.tagmem = Memory(depth=self.NUM_LINES,
867 width=self.TAG_RAM_WIDTH,
868 #attrs={'syn_ramstyle': "block_ram"}
869 )
870
871 # call sub-functions putting everything together,
872 # using shared signals established above
873 self.rams(m, r, cache_out_row, use_previous, replace_way, req_row)
874 self.maybe_plrus(m, r, plru_victim)
875 self.itlb_lookup(m, tlb_req_index, itlb, itlb_valid, real_addr,
876 ra_valid, eaa_priv, priv_fault,
877 access_ok)
878 self.itlb_update(m, itlb, itlb_valid)
879 self.icache_comb(m, use_previous, r, req_index, req_row, req_hit_way,
880 req_tag, real_addr, req_laddr,
881 cache_valids,
882 access_ok, req_is_hit, req_is_miss,
883 replace_way, plru_victim, cache_out_row)
884 self.icache_hit(m, use_previous, r, req_is_hit, req_hit_way,
885 req_index, req_tag, real_addr)
886 self.icache_miss(m, r, req_is_miss, req_index,
887 req_laddr, req_tag, replace_way,
888 cache_valids,
889 access_ok, real_addr)
890 #self.icache_log(m, log_out, req_hit_way, ra_valid, access_ok,
891 # req_is_miss, req_is_hit, lway, wstate, r)
892
893 # don't connect up to FetchUnitInterface so that some unit tests
894 # can continue to operate
895 if not self.use_fetch_iface:
896 return m
897
898 # connect to FetchUnitInterface. FetchUnitInterface is undocumented
899 # so needs checking and iterative revising
900 i_in, bus, i_out = self.i_in, self.bus, self.i_out
901 comb += i_in.req.eq(self.a_i_valid)
902 comb += i_in.nia.eq(self.a_pc_i)
903 comb += self.stall_in.eq(self.a_stall_i)
904 comb += self.f_fetch_err_o.eq(i_out.fetch_failed)
905 comb += self.f_badaddr_o.eq(i_out.nia)
906 comb += self.f_instr_o.eq(i_out.insn)
907 comb += self.f_busy_o.eq(~i_out.valid) # probably
908
909 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
910 ibus = self.ibus
911 comb += ibus.adr.eq(self.bus.adr)
912 comb += ibus.dat_w.eq(self.bus.dat_w)
913 comb += ibus.sel.eq(self.bus.sel)
914 comb += ibus.cyc.eq(self.bus.cyc)
915 comb += ibus.stb.eq(self.bus.stb)
916 comb += ibus.we.eq(self.bus.we)
917
918 comb += self.bus.dat_r.eq(ibus.dat_r)
919 comb += self.bus.ack.eq(ibus.ack)
920 if hasattr(ibus, "stall"):
921 comb += self.bus.stall.eq(ibus.stall)
922 else:
923 # fake-up the wishbone stall signal to comply with pipeline mode
924 # same thing is done in dcache.py
925 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
926
927 return m
928
929
930 def icache_sim(dut):
931 i_in = dut.i_in
932 i_out = dut.i_out
933 m_out = dut.m_in
934
935 yield i_in.priv_mode.eq(1)
936 yield i_in.req.eq(0)
937 yield i_in.nia.eq(0)
938 yield i_in.stop_mark.eq(0)
939 yield m_out.tlbld.eq(0)
940 yield m_out.tlbie.eq(0)
941 yield m_out.addr.eq(0)
942 yield m_out.pte.eq(0)
943 yield
944 yield
945 yield
946 yield
947
948 # miss, stalls for a bit
949 yield i_in.req.eq(1)
950 yield i_in.nia.eq(Const(0x0000000000000004, 64))
951 yield
952 valid = yield i_out.valid
953 while not valid:
954 yield
955 valid = yield i_out.valid
956 yield i_in.req.eq(0)
957
958 insn = yield i_out.insn
959 nia = yield i_out.nia
960 assert insn == 0x00000001, \
961 "insn @%x=%x expected 00000001" % (nia, insn)
962 yield i_in.req.eq(0)
963 yield
964
965 # hit
966 yield i_in.req.eq(1)
967 yield i_in.nia.eq(Const(0x0000000000000008, 64))
968 yield
969 valid = yield i_out.valid
970 while not valid:
971 yield
972 valid = yield i_out.valid
973 yield i_in.req.eq(0)
974
975 nia = yield i_out.nia
976 insn = yield i_out.insn
977 yield
978 assert insn == 0x00000002, \
979 "insn @%x=%x expected 00000002" % (nia, insn)
980
981 # another miss
982 yield i_in.req.eq(1)
983 yield i_in.nia.eq(Const(0x0000000000000040, 64))
984 yield
985 valid = yield i_out.valid
986 while not valid:
987 yield
988 valid = yield i_out.valid
989 yield i_in.req.eq(0)
990
991 nia = yield i_in.nia
992 insn = yield i_out.insn
993 assert insn == 0x00000010, \
994 "insn @%x=%x expected 00000010" % (nia, insn)
995
996 # test something that aliases (this only works because
997 # the unit test SRAM is a depth of 512)
998 yield i_in.req.eq(1)
999 yield i_in.nia.eq(Const(0x0000000000000100, 64))
1000 yield
1001 yield
1002 valid = yield i_out.valid
1003 assert ~valid
1004 for i in range(30):
1005 yield
1006 yield
1007 insn = yield i_out.insn
1008 valid = yield i_out.valid
1009 insn = yield i_out.insn
1010 assert valid
1011 assert insn == 0x00000040, \
1012 "insn @%x=%x expected 00000040" % (nia, insn)
1013 yield i_in.req.eq(0)
1014
1015
1016 def test_icache(mem):
1017 from soc.config.test.test_loadstore import TestMemPspec
1018 pspec = TestMemPspec(addr_wid=32,
1019 mask_wid=8,
1020 reg_wid=64,
1021 XLEN=32,
1022 )
1023 dut = ICache(pspec)
1024
1025 memory = Memory(width=64, depth=512, init=mem)
1026 sram = SRAM(memory=memory, granularity=8)
1027
1028 m = Module()
1029
1030 m.submodules.icache = dut
1031 m.submodules.sram = sram
1032
1033 m.d.comb += sram.bus.cyc.eq(dut.bus.cyc)
1034 m.d.comb += sram.bus.stb.eq(dut.bus.stb)
1035 m.d.comb += sram.bus.we.eq(dut.bus.we)
1036 m.d.comb += sram.bus.sel.eq(dut.bus.sel)
1037 m.d.comb += sram.bus.adr.eq(dut.bus.adr)
1038 m.d.comb += sram.bus.dat_w.eq(dut.bus.dat_w)
1039
1040 m.d.comb += dut.bus.ack.eq(sram.bus.ack)
1041 m.d.comb += dut.bus.dat_r.eq(sram.bus.dat_r)
1042
1043 # nmigen Simulation
1044 sim = Simulator(m)
1045 sim.add_clock(1e-6)
1046
1047 sim.add_sync_process(wrap(icache_sim(dut)))
1048 with sim.write_vcd('test_icache.vcd'):
1049 sim.run()
1050
1051
1052 if __name__ == '__main__':
1053 from soc.config.test.test_loadstore import TestMemPspec
1054 pspec = TestMemPspec(addr_wid=64,
1055 mask_wid=8,
1056 XLEN=32,
1057 reg_wid=64,
1058 )
1059 dut = ICache(pspec)
1060 vl = rtlil.convert(dut, ports=[])
1061 with open("test_icache.il", "w") as f:
1062 f.write(vl)
1063
1064 # set up memory every 32-bits with incrementing values 0 1 2 ...
1065 mem = []
1066 for i in range(512):
1067 mem.append((i*2) | ((i*2+1)<<32))
1068
1069 test_icache(mem)