4e40ce403e94f1536b38e269867496619676293c
[soc.git] / src / soc / experiment / icache.py
1 """ICache
2
3 based on Anton Blanchard microwatt icache.vhdl
4
5 Set associative icache
6
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
20
21 Links:
22
23 * https://bugs.libre-soc.org/show_bug.cgi?id=485
24 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
25 (discussion about brams for ECP5)
26
27 """
28
29 from enum import (Enum, unique)
30 from nmigen import (Module, Signal, Elaboratable, Cat, Array, Const, Repl,
31 Record)
32 from nmigen.cli import main, rtlil
33 from nmutil.iocontrol import RecordObject
34 from nmigen.utils import log2_int
35 from nmigen.lib.coding import Decoder
36 from nmutil.util import Display
37 from nmutil.latch import SRLatch
38
39 #from nmutil.plru import PLRU
40 from soc.experiment.plru import PLRU, PLRUs
41 from soc.experiment.cache_ram import CacheRam
42
43 from soc.experiment.mem_types import (Fetch1ToICacheType,
44 ICacheToDecode1Type,
45 MMUToICacheType)
46
47 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS,
48 WB_SEL_BITS, WBAddrType, WBDataType,
49 WBSelType, WBMasterOut, WBSlaveOut,
50 )
51
52 from nmigen_soc.wishbone.bus import Interface
53 from soc.minerva.units.fetch import FetchUnitInterface
54
55
56 # for test
57 from soc.bus.sram import SRAM
58 from nmigen import Memory
59 from nmutil.util import wrap
60 from nmigen.cli import main, rtlil
61
62 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
63 # Also, check out the cxxsim nmigen branch, and latest yosys from git
64 from nmutil.sim_tmp_alternative import Simulator, Settle
65
66 # from microwatt/utils.vhdl
67 def ispow2(n):
68 return n != 0 and (n & (n - 1)) == 0
69
70 SIM = 0
71 # Non-zero to enable log data collection
72 LOG_LENGTH = 0
73
74 class ICacheConfig:
75 def __init__(self, XLEN = 64,
76 LINE_SIZE = 64,
77 NUM_LINES = 64, # Number of lines in a set
78 NUM_WAYS = 2, # Number of ways
79 TLB_SIZE = 64, # L1 ITLB number of entries
80 TLB_LG_PGSZ = 12): # L1 ITLB log_2(page_size)
81 self.XLEN = XLEN
82 self.LINE_SIZE = LINE_SIZE
83 self.NUM_LINES = NUM_LINES
84 self.NUM_WAYS = NUM_WAYS
85 self.TLB_SIZE = TLB_SIZE
86 self.TLB_LG_PGSZ = TLB_LG_PGSZ
87
88 # BRAM organisation: We never access more than wishbone_data_bits
89 # at a time so to save resources we make the array only that wide,
90 # and use consecutive indices for to make a cache "line"
91 #
92 # self.ROW_SIZE is the width in bytes of the BRAM
93 # (based on WB, so 64-bits)
94 self.ROW_SIZE = WB_DATA_BITS // 8
95 # Number of real address bits that we store
96 self.REAL_ADDR_BITS = XLEN-8 # 56 for XLEN=64
97
98 self.ROW_SIZE_BITS = self.ROW_SIZE * 8
99 # ROW_PER_LINE is the number of row (wishbone) transactions in a line
100 self.ROW_PER_LINE = self.LINE_SIZE // self.ROW_SIZE
101 # BRAM_ROWS is the number of rows in BRAM
102 # needed to represent the full icache
103 self.BRAM_ROWS = self.NUM_LINES * self.ROW_PER_LINE
104 # INSN_PER_ROW is the number of 32bit instructions per BRAM row
105 self.INSN_PER_ROW = self.ROW_SIZE_BITS // 32
106
107 # Bit fields counts in the address
108 #
109 # INSN_BITS is the number of bits to select an instruction in a row
110 self.INSN_BITS = log2_int(self.INSN_PER_ROW)
111 # ROW_BITS is the number of bits to select a row
112 self.ROW_BITS = log2_int(self.BRAM_ROWS)
113 # ROW_LINE_BITS is the number of bits to select a row within a line
114 self.ROW_LINE_BITS = log2_int(self.ROW_PER_LINE)
115 # LINE_OFF_BITS is the number of bits for the offset in a cache line
116 self.LINE_OFF_BITS = log2_int(self.LINE_SIZE)
117 # ROW_OFF_BITS is the number of bits for the offset in a row
118 self.ROW_OFF_BITS = log2_int(self.ROW_SIZE)
119 # INDEX_BITS is the number of bits to select a cache line
120 self.INDEX_BITS = log2_int(self.NUM_LINES)
121 # SET_SIZE_BITS is the log base 2 of the set size
122 self.SET_SIZE_BITS = self.LINE_OFF_BITS + self.INDEX_BITS
123 # TAG_BITS is the number of bits of the tag part of the address
124 self.TAG_BITS = self.REAL_ADDR_BITS - self.SET_SIZE_BITS
125 # TAG_WIDTH is the width in bits of each way of the tag RAM
126 self.TAG_WIDTH = self.TAG_BITS + 7 - ((self.TAG_BITS + 7) % 8)
127
128 # WAY_BITS is the number of bits to select a way
129 self.WAY_BITS = log2_int(self.NUM_WAYS)
130 self.TAG_RAM_WIDTH = self.TAG_BITS * self.NUM_WAYS
131
132 # L1 ITLB
133 self.TL_BITS = log2_int(self.TLB_SIZE)
134 self.TLB_EA_TAG_BITS = XLEN - (self.TLB_LG_PGSZ + self.TL_BITS)
135 self.TLB_PTE_BITS = XLEN
136
137 print("self.XLEN =", self.XLEN)
138 print("self.BRAM_ROWS =", self.BRAM_ROWS)
139 print("self.INDEX_BITS =", self.INDEX_BITS)
140 print("self.INSN_BITS =", self.INSN_BITS)
141 print("self.INSN_PER_ROW =", self.INSN_PER_ROW)
142 print("self.LINE_SIZE =", self.LINE_SIZE)
143 print("self.LINE_OFF_BITS =", self.LINE_OFF_BITS)
144 print("LOG_LENGTH =", LOG_LENGTH)
145 print("self.NUM_LINES =", self.NUM_LINES)
146 print("self.NUM_WAYS =", self.NUM_WAYS)
147 print("self.REAL_ADDR_BITS =", self.REAL_ADDR_BITS)
148 print("self.ROW_BITS =", self.ROW_BITS)
149 print("self.ROW_OFF_BITS =", self.ROW_OFF_BITS)
150 print("self.ROW_LINE_BITS =", self.ROW_LINE_BITS)
151 print("self.ROW_PER_LINE =", self.ROW_PER_LINE)
152 print("self.ROW_SIZE =", self.ROW_SIZE)
153 print("self.ROW_SIZE_BITS =", self.ROW_SIZE_BITS)
154 print("self.SET_SIZE_BITS =", self.SET_SIZE_BITS)
155 print("SIM =", SIM)
156 print("self.TAG_BITS =", self.TAG_BITS)
157 print("self.TAG_RAM_WIDTH =", self.TAG_RAM_WIDTH)
158 print("self.TAG_BITS =", self.TAG_BITS)
159 print("self.TL_BITS =", self.TL_BITS)
160 print("self.TLB_EA_TAG_BITS =", self.TLB_EA_TAG_BITS)
161 print("self.TLB_LG_PGSZ =", self.TLB_LG_PGSZ)
162 print("self.TLB_PTE_BITS =", self.TLB_PTE_BITS)
163 print("self.TLB_SIZE =", self.TLB_SIZE)
164 print("self.WAY_BITS =", self.WAY_BITS)
165 print()
166
167 assert self.LINE_SIZE % self.ROW_SIZE == 0
168 assert ispow2(self.LINE_SIZE), "self.LINE_SIZE not power of 2"
169 assert ispow2(self.NUM_LINES), "self.NUM_LINES not power of 2"
170 assert ispow2(self.ROW_PER_LINE), "self.ROW_PER_LINE not power of 2"
171 assert ispow2(self.INSN_PER_ROW), "self.INSN_PER_ROW not power of 2"
172 assert (self.ROW_BITS == (self.INDEX_BITS + self.ROW_LINE_BITS)), \
173 "geometry bits don't add up"
174 assert (self.LINE_OFF_BITS ==
175 (self.ROW_OFF_BITS + self.ROW_LINE_BITS)), \
176 "geometry bits don't add up"
177 assert (self.REAL_ADDR_BITS ==
178 (self.TAG_BITS + self.INDEX_BITS + self.LINE_OFF_BITS)), \
179 "geometry bits don't add up"
180 assert (self.REAL_ADDR_BITS ==
181 (self.TAG_BITS + self.ROW_BITS + self.ROW_OFF_BITS)), \
182 "geometry bits don't add up"
183
184 # Example of layout for 32 lines of 64 bytes:
185 #
186 # .. tag |index| line |
187 # .. | row | |
188 # .. | | | |00| zero (2)
189 # .. | | |-| | self.INSN_BITS (1)
190 # .. | |---| | self.ROW_LINE_BITS (3)
191 # .. | |--- - --| self.LINE_OFF_BITS (6)
192 # .. | |- --| self.ROW_OFF_BITS (3)
193 # .. |----- ---| | self.ROW_BITS (8)
194 # .. |-----| | self.INDEX_BITS (5)
195 # .. --------| | self.TAG_BITS (53)
196
197 # The cache data BRAM organized as described above for each way
198 #subtype cache_row_t is std_ulogic_vector(self.ROW_SIZE_BITS-1 downto 0);
199 #
200 def RowPerLineValidArray(self):
201 return Array(Signal(name="rows_valid_%d" %x) \
202 for x in range(self.ROW_PER_LINE))
203
204
205 # TODO to be passed to nigmen as ram attributes
206 # attribute ram_style : string;
207 # attribute ram_style of cache_tags : signal is "distributed";
208
209 def TLBRecord(self, name):
210 tlb_layout = [ ('tag', self.TLB_EA_TAG_BITS),
211 ('pte', self.TLB_PTE_BITS)
212 ]
213 return Record(tlb_layout, name=name)
214
215 def TLBArray(self):
216 return Array(self.TLBRecord("tlb%d" % x) for x in range(self.TLB_SIZE))
217
218 # PLRU output interface
219 def PLRUOut(self):
220 return Array(Signal(self.WAY_BITS, name="plru_out_%d" %x) \
221 for x in range(self.NUM_LINES))
222
223 # Return the cache line index (tag index) for an address
224 def get_index(self, addr):
225 return addr[self.LINE_OFF_BITS:self.SET_SIZE_BITS]
226
227 # Return the cache row index (data memory) for an address
228 def get_row(self, addr):
229 return addr[self.ROW_OFF_BITS:self.SET_SIZE_BITS]
230
231 # Return the index of a row within a line
232 def get_row_of_line(self, row):
233 return row[:self.ROW_BITS][:self.ROW_LINE_BITS]
234
235 # Returns whether this is the last row of a line
236 def is_last_row_addr(self, addr, last):
237 return addr[self.ROW_OFF_BITS:self.LINE_OFF_BITS] == last
238
239 # Returns whether this is the last row of a line
240 def is_last_row(self, row, last):
241 return self.get_row_of_line(row) == last
242
243 # Return the next row in the current cache line. We use a dedicated
244 # function in order to limit the size of the generated adder to be
245 # only the bits within a cache line (3 bits with default settings)
246 def next_row(self, row):
247 row_v = row[0:self.ROW_LINE_BITS] + 1
248 return Cat(row_v[:self.ROW_LINE_BITS], row[self.ROW_LINE_BITS:])
249
250 # Read the instruction word for the given address
251 # in the current cache row
252 def read_insn_word(self, addr, data):
253 word = addr[2:self.INSN_BITS+2]
254 return data.word_select(word, 32)
255
256 # Get the tag value from the address
257 def get_tag(self, addr):
258 return addr[self.SET_SIZE_BITS:self.REAL_ADDR_BITS]
259
260 # Read a tag from a tag memory row
261 def read_tag(self, way, tagset):
262 return tagset.word_select(way, self.TAG_BITS)
263
264 # Write a tag to tag memory row
265 def write_tag(self, way, tagset, tag):
266 return self.read_tag(way, tagset).eq(tag)
267
268 # Simple hash for direct-mapped TLB index
269 def hash_ea(self, addr):
270 hsh = (addr[self.TLB_LG_PGSZ:self.TLB_LG_PGSZ + self.TL_BITS] ^
271 addr[self.TLB_LG_PGSZ + self.TL_BITS:
272 self.TLB_LG_PGSZ + 2 * self.TL_BITS ] ^
273 addr[self.TLB_LG_PGSZ + 2 * self.TL_BITS:
274 self.TLB_LG_PGSZ + 3 * self.TL_BITS])
275 return hsh
276
277
278 # Cache reload state machine
279 @unique
280 class State(Enum):
281 IDLE = 0
282 CLR_TAG = 1
283 WAIT_ACK = 2
284
285
286 class RegInternal(RecordObject):
287 def __init__(self, cfg):
288 super().__init__()
289 # Cache hit state (Latches for 1 cycle BRAM access)
290 self.hit_way = Signal(cfg.WAY_BITS)
291 self.hit_nia = Signal(64)
292 self.hit_smark = Signal()
293 self.hit_valid = Signal()
294
295 # Cache miss state (reload state machine)
296 self.state = Signal(State, reset=State.IDLE)
297 self.wb = WBMasterOut("wb")
298 self.req_adr = Signal(64)
299 self.store_way = Signal(cfg.WAY_BITS)
300 self.store_index = Signal(cfg.INDEX_BITS)
301 self.store_row = Signal(cfg.ROW_BITS)
302 self.store_tag = Signal(cfg.TAG_BITS)
303 self.store_valid = Signal()
304 self.end_row_ix = Signal(cfg.ROW_LINE_BITS)
305 self.rows_valid = cfg.RowPerLineValidArray()
306
307 # TLB miss state
308 self.fetch_failed = Signal()
309
310
311 class ICache(FetchUnitInterface, Elaboratable, ICacheConfig):
312 """64 bit direct mapped icache. All instructions are 4B aligned."""
313 def __init__(self, pspec):
314 FetchUnitInterface.__init__(self, pspec)
315 self.i_in = Fetch1ToICacheType(name="i_in")
316 self.i_out = ICacheToDecode1Type(name="i_out")
317
318 self.m_in = MMUToICacheType(name="m_in")
319
320 self.stall_in = Signal()
321 self.stall_out = Signal()
322 self.flush_in = Signal()
323 self.inval_in = Signal()
324
325 # standard naming (wired to non-standard for compatibility)
326 self.bus = Interface(addr_width=32,
327 data_width=64,
328 granularity=8,
329 features={'stall'},
330 #alignment=0,
331 name="icache_wb")
332
333 self.log_out = Signal(54)
334
335 # use FetchUnitInterface, helps keep some unit tests running
336 self.use_fetch_iface = False
337
338 # test if small cache to be enabled
339 self.small_cache = (hasattr(pspec, "small_cache") and
340 (pspec.small_cache == True))
341 # test if microwatt compatibility to be enabled
342 self.microwatt_compat = (hasattr(pspec, "microwatt_compat") and
343 (pspec.microwatt_compat == True))
344
345 XLEN = pspec.XLEN
346 LINE_SIZE = 64
347 TLB_SIZE = 8
348 NUM_LINES = 8
349 NUM_WAYS = 2
350 if self.small_cache:
351 # reduce way sizes and num lines to ridiculously small
352 NUM_LINES = 2
353 NUM_WAYS = 1
354 TLB_SIZE = 2
355 if self.microwatt_compat:
356 # reduce way sizes
357 NUM_WAYS = 1
358
359 ICacheConfig.__init__(self, LINE_SIZE=LINE_SIZE,
360 XLEN=XLEN,
361 NUM_LINES = NUM_LINES,
362 NUM_WAYS = NUM_WAYS,
363 TLB_SIZE=TLB_SIZE
364 )
365
366 def use_fetch_interface(self):
367 self.use_fetch_iface = True
368
369 # Generate a cache RAM for each way
370 def rams(self, m, r, cache_out_row, use_previous,
371 replace_way, req_row):
372
373 comb = m.d.comb
374 sync = m.d.sync
375
376 bus, stall_in = self.bus, self.stall_in
377
378 # read condition (for every cache ram)
379 do_read = Signal()
380 comb += do_read.eq(~(stall_in | use_previous))
381
382 rd_addr = Signal(self.ROW_BITS)
383 wr_addr = Signal(self.ROW_BITS)
384 comb += rd_addr.eq(req_row)
385 comb += wr_addr.eq(r.store_row)
386
387 # binary-to-unary converters: replace-way enabled by bus.ack,
388 # hit-way left permanently enabled
389 m.submodules.replace_way_e = re = Decoder(self.NUM_WAYS)
390 m.submodules.hit_way_e = he = Decoder(self.NUM_WAYS)
391 comb += re.i.eq(replace_way)
392 comb += re.n.eq(~bus.ack)
393 comb += he.i.eq(r.hit_way)
394
395 for i in range(self.NUM_WAYS):
396 do_write = Signal(name="do_wr_%d" % i)
397 d_out = Signal(self.ROW_SIZE_BITS, name="d_out_%d" % i)
398 wr_sel = Signal(self.ROW_SIZE, name="wr_sel_%d" % i)
399
400 way = CacheRam(self.ROW_BITS, self.ROW_SIZE_BITS,
401 TRACE=True, ram_num=i)
402 m.submodules["cacheram_%d" % i] = way
403
404 comb += way.rd_en.eq(do_read)
405 comb += way.rd_addr.eq(rd_addr)
406 comb += d_out.eq(way.rd_data_o)
407 comb += way.wr_sel.eq(wr_sel)
408 comb += way.wr_addr.eq(wr_addr)
409 comb += way.wr_data.eq(bus.dat_r)
410
411 comb += do_write.eq(re.o[i])
412
413 with m.If(do_write):
414 sync += Display("cache write adr: %x data: %lx",
415 wr_addr, way.wr_data)
416
417 with m.If(he.o[i]):
418 comb += cache_out_row.eq(d_out)
419 with m.If(do_read):
420 sync += Display("cache read adr: %x data: %x",
421 req_row, d_out)
422
423 comb += wr_sel.eq(Repl(do_write, self.ROW_SIZE))
424
425 # Generate PLRUs
426 def maybe_plrus(self, m, r, plru_victim):
427 comb = m.d.comb
428
429 if self.NUM_WAYS == 0:
430 return
431
432
433 m.submodules.plrus = plru = PLRUs("itag", self.NUM_LINES,
434 self.WAY_BITS)
435 comb += plru.way.eq(r.hit_way)
436 comb += plru.valid.eq(r.hit_valid)
437 comb += plru.index.eq(self.get_index(r.hit_nia))
438 comb += plru.isel.eq(r.store_index) # select victim
439 comb += plru_victim.eq(plru.o_index) # selected victim
440
441 # TLB hit detection and real address generation
442 def itlb_lookup(self, m, tlb_req_index, itlb, itlb_valid,
443 real_addr, ra_valid, eaa_priv,
444 priv_fault, access_ok):
445
446 comb = m.d.comb
447
448 i_in = self.i_in
449
450 # use an *asynchronous* Memory read port here (combinatorial)
451 m.submodules.rd_tlb = rd_tlb = self.tlbmem.read_port(domain="comb")
452 tlb = self.TLBRecord("tlb_rdport")
453 pte, ttag = tlb.pte, tlb.tag
454
455 comb += tlb_req_index.eq(self.hash_ea(i_in.nia))
456 comb += rd_tlb.addr.eq(tlb_req_index)
457 comb += tlb.eq(rd_tlb.data)
458
459 with m.If(i_in.virt_mode):
460 comb += real_addr.eq(Cat(i_in.nia[:self.TLB_LG_PGSZ],
461 pte[self.TLB_LG_PGSZ:self.REAL_ADDR_BITS]))
462
463 with m.If(ttag == i_in.nia[self.TLB_LG_PGSZ + self.TL_BITS:64]):
464 comb += ra_valid.eq(itlb_valid.q.bit_select(tlb_req_index, 1))
465
466 comb += eaa_priv.eq(pte[3])
467
468 with m.Else():
469 comb += real_addr.eq(i_in.nia[:self.REAL_ADDR_BITS])
470 comb += ra_valid.eq(1)
471 comb += eaa_priv.eq(1)
472
473 # No IAMR, so no KUEP support for now
474 comb += priv_fault.eq(eaa_priv & ~i_in.priv_mode)
475 comb += access_ok.eq(ra_valid & ~priv_fault)
476
477 # iTLB update
478 def itlb_update(self, m, itlb, itlb_valid):
479 comb = m.d.comb
480 sync = m.d.sync
481
482 m_in = self.m_in
483
484 wr_index = Signal(self.TL_BITS)
485 wr_unary = Signal(self.TLB_SIZE)
486 comb += wr_index.eq(self.hash_ea(m_in.addr))
487 comb += wr_unary.eq(1<<wr_index)
488
489 m.submodules.wr_tlb = wr_tlb = self.tlbmem.write_port()
490 sync += itlb_valid.s.eq(0)
491 sync += itlb_valid.r.eq(0)
492
493 with m.If(m_in.tlbie & m_in.doall):
494 # Clear all valid bits
495 sync += itlb_valid.r.eq(-1)
496
497 with m.Elif(m_in.tlbie):
498 # Clear entry regardless of hit or miss
499 sync += itlb_valid.r.eq(wr_unary)
500
501 with m.Elif(m_in.tlbld):
502 tlb = self.TLBRecord("tlb_wrport")
503 comb += tlb.tag.eq(m_in.addr[self.TLB_LG_PGSZ + self.TL_BITS:64])
504 comb += tlb.pte.eq(m_in.pte)
505 comb += wr_tlb.en.eq(1)
506 comb += wr_tlb.addr.eq(wr_index)
507 comb += wr_tlb.data.eq(tlb)
508 sync += itlb_valid.s.eq(wr_unary)
509
510 # Cache hit detection, output to fetch2 and other misc logic
511 def icache_comb(self, m, use_previous, r, req_index, req_row,
512 req_hit_way, req_tag, real_addr, req_laddr,
513 cache_valids, access_ok,
514 req_is_hit, req_is_miss, replace_way,
515 plru_victim, cache_out_row):
516
517 comb = m.d.comb
518 m.submodules.rd_tag = rd_tag = self.tagmem.read_port(domain="comb")
519
520 i_in, i_out, bus = self.i_in, self.i_out, self.bus
521 flush_in, stall_out = self.flush_in, self.stall_out
522
523 is_hit = Signal()
524 hit_way = Signal(self.WAY_BITS)
525
526 # i_in.sequential means that i_in.nia this cycle is 4 more than
527 # last cycle. If we read more than 32 bits at a time, had a
528 # cache hit last cycle, and we don't want the first 32-bit chunk
529 # then we can keep the data we read last cycle and just use that.
530 with m.If(i_in.nia[2:self.INSN_BITS+2] != 0):
531 comb += use_previous.eq(i_in.sequential & r.hit_valid)
532
533 # Extract line, row and tag from request
534 comb += req_index.eq(self.get_index(i_in.nia))
535 comb += req_row.eq(self.get_row(i_in.nia))
536 comb += req_tag.eq(self.get_tag(real_addr))
537
538 # Calculate address of beginning of cache row, will be
539 # used for cache miss processing if needed
540 comb += req_laddr.eq(Cat(
541 Const(0, self.ROW_OFF_BITS),
542 real_addr[self.ROW_OFF_BITS:self.REAL_ADDR_BITS],
543 ))
544
545 # Test if pending request is a hit on any way
546 hitcond = Signal()
547 rowvalid = Signal()
548 comb += rowvalid.eq(r.rows_valid[req_row % self.ROW_PER_LINE])
549 comb += hitcond.eq((r.state == State.WAIT_ACK) &
550 (req_index == r.store_index) &
551 rowvalid
552 )
553 # i_in.req asserts Decoder active
554 cvb = Signal(self.NUM_WAYS)
555 ctag = Signal(self.TAG_RAM_WIDTH)
556 comb += rd_tag.addr.eq(req_index)
557 comb += ctag.eq(rd_tag.data)
558 comb += cvb.eq(cache_valids.q.word_select(req_index, self.NUM_WAYS))
559 m.submodules.store_way_e = se = Decoder(self.NUM_WAYS)
560 comb += se.i.eq(r.store_way)
561 comb += se.n.eq(~i_in.req)
562 for i in range(self.NUM_WAYS):
563 tagi = Signal(self.TAG_BITS, name="tag_i%d" % i)
564 hit_test = Signal(name="hit_test%d" % i)
565 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
566 comb += tagi.eq(self.read_tag(i, ctag))
567 comb += hit_test.eq(se.o[i])
568 comb += is_tag_hit.eq((cvb[i] | (hitcond & hit_test)) &
569 (tagi == req_tag))
570 with m.If(is_tag_hit):
571 comb += hit_way.eq(i)
572 comb += is_hit.eq(1)
573
574 # Generate the "hit" and "miss" signals
575 # for the synchronous blocks
576 with m.If(i_in.req & access_ok & ~flush_in):
577 comb += req_is_hit.eq(is_hit)
578 comb += req_is_miss.eq(~is_hit)
579
580 comb += req_hit_way.eq(hit_way)
581
582 # The way to replace on a miss
583 with m.If(r.state == State.CLR_TAG):
584 comb += replace_way.eq(plru_victim)
585 with m.Else():
586 comb += replace_way.eq(r.store_way)
587
588 # Output instruction from current cache row
589 #
590 # Note: This is a mild violation of our design principle of
591 # having pipeline stages output from a clean latch. In this
592 # case we output the result of a mux. The alternative would
593 # be output an entire row which I prefer not to do just yet
594 # as it would force fetch2 to know about some of the cache
595 # geometry information.
596 comb += i_out.insn.eq(self.read_insn_word(r.hit_nia, cache_out_row))
597 comb += i_out.valid.eq(r.hit_valid)
598 comb += i_out.nia.eq(r.hit_nia)
599 comb += i_out.stop_mark.eq(r.hit_smark)
600 comb += i_out.fetch_failed.eq(r.fetch_failed)
601
602 # Stall fetch1 if we have a miss on cache or TLB
603 # or a protection fault
604 comb += stall_out.eq(~(is_hit & access_ok))
605
606 # Wishbone requests output (from the cache miss reload machine)
607 comb += bus.we.eq(r.wb.we)
608 comb += bus.adr.eq(r.wb.adr)
609 comb += bus.sel.eq(r.wb.sel)
610 comb += bus.stb.eq(r.wb.stb)
611 comb += bus.dat_w.eq(r.wb.dat)
612 comb += bus.cyc.eq(r.wb.cyc)
613
614 # Cache hit synchronous machine
615 def icache_hit(self, m, use_previous, r, req_is_hit, req_hit_way,
616 req_index, req_tag, real_addr):
617 sync = m.d.sync
618
619 i_in, stall_in = self.i_in, self.stall_in
620 flush_in = self.flush_in
621
622 # keep outputs to fetch2 unchanged on a stall
623 # except that flush or reset sets valid to 0
624 # If use_previous, keep the same data as last
625 # cycle and use the second half
626 with m.If(stall_in | use_previous):
627 with m.If(flush_in):
628 sync += r.hit_valid.eq(0)
629 with m.Else():
630 # On a hit, latch the request for the next cycle,
631 # when the BRAM data will be available on the
632 # cache_out output of the corresponding way
633 sync += r.hit_valid.eq(req_is_hit)
634
635 with m.If(req_is_hit):
636 sync += r.hit_way.eq(req_hit_way)
637 sync += Display("cache hit nia:%x IR:%x SM:%x idx:%x tag:%x "
638 "way:%x RA:%x", i_in.nia, i_in.virt_mode,
639 i_in.stop_mark, req_index, req_tag,
640 req_hit_way, real_addr)
641
642 with m.If(~stall_in):
643 # Send stop marks and NIA down regardless of validity
644 sync += r.hit_smark.eq(i_in.stop_mark)
645 sync += r.hit_nia.eq(i_in.nia)
646
647 def icache_miss_idle(self, m, r, req_is_miss, req_laddr,
648 req_index, req_tag, replace_way, real_addr):
649 comb = m.d.comb
650 sync = m.d.sync
651
652 i_in = self.i_in
653
654 # Reset per-row valid flags, only used in WAIT_ACK
655 for i in range(self.ROW_PER_LINE):
656 sync += r.rows_valid[i].eq(0)
657
658 # We need to read a cache line
659 with m.If(req_is_miss):
660 sync += Display(
661 "cache miss nia:%x IR:%x SM:%x idx:%x "
662 " way:%x tag:%x RA:%x", i_in.nia,
663 i_in.virt_mode, i_in.stop_mark, req_index,
664 replace_way, req_tag, real_addr)
665
666 # Keep track of our index and way for subsequent stores
667 st_row = Signal(self.ROW_BITS)
668 comb += st_row.eq(self.get_row(req_laddr))
669 sync += r.store_index.eq(req_index)
670 sync += r.store_row.eq(st_row)
671 sync += r.store_tag.eq(req_tag)
672 sync += r.store_valid.eq(1)
673 sync += r.end_row_ix.eq(self.get_row_of_line(st_row) - 1)
674
675 # Prep for first wishbone read. We calculate the address
676 # of the start of the cache line and start the WB cycle.
677 sync += r.req_adr.eq(req_laddr)
678 sync += r.wb.cyc.eq(1)
679 sync += r.wb.stb.eq(1)
680
681 # Track that we had one request sent
682 sync += r.state.eq(State.CLR_TAG)
683
684 def icache_miss_clr_tag(self, m, r, replace_way,
685 req_index,
686 cache_valids):
687 comb = m.d.comb
688 sync = m.d.sync
689 m.submodules.wr_tag = wr_tag = self.tagmem.write_port(
690 granularity=self.TAG_BITS)
691
692 # Get victim way from plru
693 sync += r.store_way.eq(replace_way)
694
695 # Force misses on that way while reloading that line
696 idx = req_index*self.NUM_WAYS + replace_way # 2D index, 1st dim: self.NUM_WAYS
697 comb += cache_valids.r.eq(1<<idx)
698
699 # use write-port "granularity" to select the tag to write to
700 # TODO: the Memory should be multipled-up (by NUM_TAGS)
701 tagset = Signal(self.TAG_RAM_WIDTH)
702 comb += tagset.eq(r.store_tag << (replace_way*self.TAG_BITS))
703 comb += wr_tag.en.eq(1<<replace_way)
704 comb += wr_tag.addr.eq(r.store_index)
705 comb += wr_tag.data.eq(tagset)
706
707 sync += r.state.eq(State.WAIT_ACK)
708
709 def icache_miss_wait_ack(self, m, r, replace_way, inval_in,
710 cache_valids):
711 comb = m.d.comb
712 sync = m.d.sync
713
714 bus = self.bus
715
716 # If we are still sending requests, was one accepted?
717 with m.If(~bus.stall & r.wb.stb):
718 # That was the last word? We are done sending. Clear stb
719 with m.If(self.is_last_row_addr(r.req_adr, r.end_row_ix)):
720 sync += Display("IS_LAST_ROW_ADDR r.wb.addr:%x "
721 "r.end_row_ix:%x r.wb.stb:%x",
722 r.wb.adr, r.end_row_ix, r.wb.stb)
723 sync += r.wb.stb.eq(0)
724
725 # Calculate the next row address
726 rarange = Signal(self.LINE_OFF_BITS - self.ROW_OFF_BITS)
727 comb += rarange.eq(r.req_adr[self.ROW_OFF_BITS:
728 self.LINE_OFF_BITS] + 1)
729 sync += r.req_adr[self.ROW_OFF_BITS:self.LINE_OFF_BITS].eq(rarange)
730 sync += Display("RARANGE r.req_adr:%x rarange:%x "
731 "r.wb.stb:%x",
732 r.req_adr, rarange, r.wb.stb)
733
734 # Incoming acks processing
735 with m.If(bus.ack):
736 sync += Display("WB_IN_ACK data:%x", bus.dat_r)
737
738 sync += r.rows_valid[r.store_row % self.ROW_PER_LINE].eq(1)
739
740 # Check for completion
741 with m.If(self.is_last_row(r.store_row, r.end_row_ix)):
742 # Complete wishbone cycle
743 sync += r.wb.cyc.eq(0)
744 # be nice, clear addr
745 sync += r.req_adr.eq(0)
746
747 # Cache line is now valid
748 idx = r.store_index*self.NUM_WAYS + replace_way # 2D index again
749 valid = r.store_valid & ~inval_in
750 comb += cache_valids.s.eq(1<<idx)
751 sync += r.state.eq(State.IDLE)
752
753 # move on to next request in row
754 # Increment store row counter
755 sync += r.store_row.eq(self.next_row(r.store_row))
756
757 # Cache miss/reload synchronous machine
758 def icache_miss(self, m, r, req_is_miss,
759 req_index, req_laddr, req_tag, replace_way,
760 cache_valids, access_ok, real_addr):
761 comb = m.d.comb
762 sync = m.d.sync
763
764 i_in, bus, m_in = self.i_in, self.bus, self.m_in
765 stall_in, flush_in = self.stall_in, self.flush_in
766 inval_in = self.inval_in
767
768 comb += r.wb.sel.eq(-1)
769 comb += r.wb.adr.eq(r.req_adr[3:])
770
771 # Process cache invalidations
772 with m.If(inval_in):
773 comb += cache_valids.r.eq(-1)
774 sync += r.store_valid.eq(0)
775
776 # Main state machine
777 with m.Switch(r.state):
778
779 with m.Case(State.IDLE):
780 self.icache_miss_idle(m, r, req_is_miss, req_laddr,
781 req_index, req_tag, replace_way,
782 real_addr)
783
784 with m.Case(State.CLR_TAG, State.WAIT_ACK):
785 with m.If(r.state == State.CLR_TAG):
786 self.icache_miss_clr_tag(m, r, replace_way,
787 req_index,
788 cache_valids)
789
790 self.icache_miss_wait_ack(m, r, replace_way, inval_in,
791 cache_valids)
792
793 # TLB miss and protection fault processing
794 with m.If(flush_in | m_in.tlbld):
795 sync += r.fetch_failed.eq(0)
796 with m.Elif(i_in.req & ~access_ok & ~stall_in):
797 sync += r.fetch_failed.eq(1)
798
799 # icache_log: if LOG_LENGTH > 0 generate
800 def icache_log(self, m, req_hit_way, ra_valid, access_ok,
801 req_is_miss, req_is_hit, lway, wstate, r):
802 comb = m.d.comb
803 sync = m.d.sync
804
805 bus, i_out = self.bus, self.i_out
806 log_out, stall_out = self.log_out, self.stall_out
807
808 # Output data to logger
809 for i in range(LOG_LENGTH):
810 log_data = Signal(54)
811 lway = Signal(self.WAY_BITS)
812 wstate = Signal()
813
814 sync += lway.eq(req_hit_way)
815 sync += wstate.eq(0)
816
817 with m.If(r.state != State.IDLE):
818 sync += wstate.eq(1)
819
820 sync += log_data.eq(Cat(
821 ra_valid, access_ok, req_is_miss, req_is_hit,
822 lway, wstate, r.hit_nia[2:6], r.fetch_failed,
823 stall_out, bus.stall, r.wb.cyc, r.wb.stb,
824 r.real_addr[3:6], bus.ack, i_out.insn, i_out.valid
825 ))
826 comb += log_out.eq(log_data)
827
828 def elaborate(self, platform):
829
830 m = Module()
831 comb = m.d.comb
832
833 # Cache-Ways "valid" indicators. this is a 2D Signal, by the
834 # number of ways and the number of lines.
835 vec = SRLatch(sync=True, llen=self.NUM_WAYS*self.NUM_LINES,
836 name="cachevalids")
837 m.submodules.cache_valids = cache_valids = vec
838
839 # TLB Array
840 itlb = self.TLBArray()
841 vec = SRLatch(sync=False, llen=self.TLB_SIZE, name="tlbvalids")
842 m.submodules.itlb_valids = itlb_valid = vec
843
844 # TODO to be passed to nmigen as ram attributes
845 # attribute ram_style of itlb_tags : signal is "distributed";
846 # attribute ram_style of itlb_ptes : signal is "distributed";
847
848 # Privilege bit from PTE EAA field
849 eaa_priv = Signal()
850
851 r = RegInternal(self)
852
853 # Async signal on incoming request
854 req_index = Signal(self.INDEX_BITS)
855 req_row = Signal(self.ROW_BITS)
856 req_hit_way = Signal(self.WAY_BITS)
857 req_tag = Signal(self.TAG_BITS)
858 req_is_hit = Signal()
859 req_is_miss = Signal()
860 req_laddr = Signal(64)
861
862 tlb_req_index = Signal(self.TL_BITS)
863 real_addr = Signal(self.REAL_ADDR_BITS)
864 ra_valid = Signal()
865 priv_fault = Signal()
866 access_ok = Signal()
867 use_previous = Signal()
868
869 cache_out_row = Signal(self.ROW_SIZE_BITS)
870
871 plru_victim = Signal(self.WAY_BITS)
872 replace_way = Signal(self.WAY_BITS)
873
874 self.tlbmem = Memory(depth=self.TLB_SIZE,
875 width=self.TLB_EA_TAG_BITS+self.TLB_PTE_BITS,
876 #attrs={'syn_ramstyle': "block_ram"}
877 )
878 self.tagmem = Memory(depth=self.NUM_LINES,
879 width=self.TAG_RAM_WIDTH,
880 #attrs={'syn_ramstyle': "block_ram"}
881 )
882
883 # call sub-functions putting everything together,
884 # using shared signals established above
885 self.rams(m, r, cache_out_row, use_previous, replace_way, req_row)
886 self.maybe_plrus(m, r, plru_victim)
887 self.itlb_lookup(m, tlb_req_index, itlb, itlb_valid, real_addr,
888 ra_valid, eaa_priv, priv_fault,
889 access_ok)
890 self.itlb_update(m, itlb, itlb_valid)
891 self.icache_comb(m, use_previous, r, req_index, req_row, req_hit_way,
892 req_tag, real_addr, req_laddr,
893 cache_valids,
894 access_ok, req_is_hit, req_is_miss,
895 replace_way, plru_victim, cache_out_row)
896 self.icache_hit(m, use_previous, r, req_is_hit, req_hit_way,
897 req_index, req_tag, real_addr)
898 self.icache_miss(m, r, req_is_miss, req_index,
899 req_laddr, req_tag, replace_way,
900 cache_valids,
901 access_ok, real_addr)
902 #self.icache_log(m, log_out, req_hit_way, ra_valid, access_ok,
903 # req_is_miss, req_is_hit, lway, wstate, r)
904
905 # don't connect up to FetchUnitInterface so that some unit tests
906 # can continue to operate
907 if not self.use_fetch_iface:
908 return m
909
910 # connect to FetchUnitInterface. FetchUnitInterface is undocumented
911 # so needs checking and iterative revising
912 i_in, bus, i_out = self.i_in, self.bus, self.i_out
913 comb += i_in.req.eq(self.a_i_valid)
914 comb += i_in.nia.eq(self.a_pc_i)
915 comb += self.stall_in.eq(self.a_stall_i)
916 comb += self.f_fetch_err_o.eq(i_out.fetch_failed)
917 comb += self.f_badaddr_o.eq(i_out.nia)
918 comb += self.f_instr_o.eq(i_out.insn)
919 comb += self.f_busy_o.eq(~i_out.valid) # probably
920
921 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
922 ibus = self.ibus
923 comb += ibus.adr.eq(self.bus.adr)
924 comb += ibus.dat_w.eq(self.bus.dat_w)
925 comb += ibus.sel.eq(self.bus.sel)
926 comb += ibus.cyc.eq(self.bus.cyc)
927 comb += ibus.stb.eq(self.bus.stb)
928 comb += ibus.we.eq(self.bus.we)
929
930 comb += self.bus.dat_r.eq(ibus.dat_r)
931 comb += self.bus.ack.eq(ibus.ack)
932 if hasattr(ibus, "stall"):
933 comb += self.bus.stall.eq(ibus.stall)
934 else:
935 # fake-up the wishbone stall signal to comply with pipeline mode
936 # same thing is done in dcache.py
937 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
938
939 return m
940
941
942 def icache_sim(dut):
943 i_in = dut.i_in
944 i_out = dut.i_out
945 m_out = dut.m_in
946
947 yield i_in.priv_mode.eq(1)
948 yield i_in.req.eq(0)
949 yield i_in.nia.eq(0)
950 yield i_in.stop_mark.eq(0)
951 yield m_out.tlbld.eq(0)
952 yield m_out.tlbie.eq(0)
953 yield m_out.addr.eq(0)
954 yield m_out.pte.eq(0)
955 yield
956 yield
957 yield
958 yield
959
960 # miss, stalls for a bit
961 yield i_in.req.eq(1)
962 yield i_in.nia.eq(Const(0x0000000000000004, 64))
963 yield
964 valid = yield i_out.valid
965 while not valid:
966 yield
967 valid = yield i_out.valid
968 yield i_in.req.eq(0)
969
970 insn = yield i_out.insn
971 nia = yield i_out.nia
972 assert insn == 0x00000001, \
973 "insn @%x=%x expected 00000001" % (nia, insn)
974 yield i_in.req.eq(0)
975 yield
976
977 # hit
978 yield i_in.req.eq(1)
979 yield i_in.nia.eq(Const(0x0000000000000008, 64))
980 yield
981 valid = yield i_out.valid
982 while not valid:
983 yield
984 valid = yield i_out.valid
985 yield i_in.req.eq(0)
986
987 nia = yield i_out.nia
988 insn = yield i_out.insn
989 yield
990 assert insn == 0x00000002, \
991 "insn @%x=%x expected 00000002" % (nia, insn)
992
993 # another miss
994 yield i_in.req.eq(1)
995 yield i_in.nia.eq(Const(0x0000000000000040, 64))
996 yield
997 valid = yield i_out.valid
998 while not valid:
999 yield
1000 valid = yield i_out.valid
1001 yield i_in.req.eq(0)
1002
1003 nia = yield i_in.nia
1004 insn = yield i_out.insn
1005 assert insn == 0x00000010, \
1006 "insn @%x=%x expected 00000010" % (nia, insn)
1007
1008 # test something that aliases (this only works because
1009 # the unit test SRAM is a depth of 512)
1010 yield i_in.req.eq(1)
1011 yield i_in.nia.eq(Const(0x0000000000000100, 64))
1012 yield
1013 yield
1014 valid = yield i_out.valid
1015 assert ~valid
1016 for i in range(30):
1017 yield
1018 yield
1019 insn = yield i_out.insn
1020 valid = yield i_out.valid
1021 insn = yield i_out.insn
1022 assert valid
1023 assert insn == 0x00000040, \
1024 "insn @%x=%x expected 00000040" % (nia, insn)
1025 yield i_in.req.eq(0)
1026
1027
1028 def test_icache(mem):
1029 from soc.config.test.test_loadstore import TestMemPspec
1030 pspec = TestMemPspec(addr_wid=32,
1031 mask_wid=8,
1032 reg_wid=64,
1033 XLEN=32,
1034 )
1035 dut = ICache(pspec)
1036
1037 memory = Memory(width=64, depth=512, init=mem)
1038 sram = SRAM(memory=memory, granularity=8)
1039
1040 m = Module()
1041
1042 m.submodules.icache = dut
1043 m.submodules.sram = sram
1044
1045 m.d.comb += sram.bus.cyc.eq(dut.bus.cyc)
1046 m.d.comb += sram.bus.stb.eq(dut.bus.stb)
1047 m.d.comb += sram.bus.we.eq(dut.bus.we)
1048 m.d.comb += sram.bus.sel.eq(dut.bus.sel)
1049 m.d.comb += sram.bus.adr.eq(dut.bus.adr)
1050 m.d.comb += sram.bus.dat_w.eq(dut.bus.dat_w)
1051
1052 m.d.comb += dut.bus.ack.eq(sram.bus.ack)
1053 m.d.comb += dut.bus.dat_r.eq(sram.bus.dat_r)
1054
1055 # nmigen Simulation
1056 sim = Simulator(m)
1057 sim.add_clock(1e-6)
1058
1059 sim.add_sync_process(wrap(icache_sim(dut)))
1060 with sim.write_vcd('test_icache.vcd'):
1061 sim.run()
1062
1063
1064 if __name__ == '__main__':
1065 from soc.config.test.test_loadstore import TestMemPspec
1066 pspec = TestMemPspec(addr_wid=64,
1067 mask_wid=8,
1068 XLEN=32,
1069 reg_wid=64,
1070 )
1071 dut = ICache(pspec)
1072 vl = rtlil.convert(dut, ports=[])
1073 with open("test_icache.il", "w") as f:
1074 f.write(vl)
1075
1076 # set up memory every 32-bits with incrementing values 0 1 2 ...
1077 mem = []
1078 for i in range(512):
1079 mem.append((i*2) | ((i*2+1)<<32))
1080
1081 test_icache(mem)