116c89024d391f63932db2ca84cfd3de68b7b6f6
3 based on Anton Blanchard microwatt icache.vhdl
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
23 * https://bugs.libre-soc.org/show_bug.cgi?id=485
24 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
25 (discussion about brams for ECP5)
29 from enum
import (Enum
, unique
)
30 from nmigen
import (Module
, Signal
, Elaboratable
, Cat
, Array
, Const
, Repl
,
32 from nmigen
.cli
import main
, rtlil
33 from nmutil
.iocontrol
import RecordObject
34 from nmigen
.utils
import log2_int
35 from nmigen
.lib
.coding
import Decoder
36 from nmutil
.util
import Display
37 from nmutil
.latch
import SRLatch
39 #from nmutil.plru import PLRU
40 from soc
.experiment
.plru
import PLRU
, PLRUs
41 from soc
.experiment
.cache_ram
import CacheRam
43 from soc
.experiment
.mem_types
import (Fetch1ToICacheType
,
47 from soc
.experiment
.wb_types
import (WB_ADDR_BITS
, WB_DATA_BITS
,
48 WB_SEL_BITS
, WBAddrType
, WBDataType
,
49 WBSelType
, WBMasterOut
, WBSlaveOut
,
52 from nmigen_soc
.wishbone
.bus
import Interface
53 from soc
.minerva
.units
.fetch
import FetchUnitInterface
57 from soc
.bus
.sram
import SRAM
58 from nmigen
import Memory
59 from nmutil
.util
import wrap
60 from nmigen
.cli
import main
, rtlil
62 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
63 # Also, check out the cxxsim nmigen branch, and latest yosys from git
64 from nmutil
.sim_tmp_alternative
import Simulator
, Settle
66 # from microwatt/utils.vhdl
68 return n
!= 0 and (n
& (n
- 1)) == 0
71 # Non-zero to enable log data collection
75 def __init__(self
, XLEN
= 64,
77 NUM_LINES
= 64, # Number of lines in a set
78 NUM_WAYS
= 2, # Number of ways
79 TLB_SIZE
= 64, # L1 ITLB number of entries
80 TLB_LG_PGSZ
= 12): # L1 ITLB log_2(page_size)
82 self
.LINE_SIZE
= LINE_SIZE
83 self
.NUM_LINES
= NUM_LINES
84 self
.NUM_WAYS
= NUM_WAYS
85 self
.TLB_SIZE
= TLB_SIZE
86 self
.TLB_LG_PGSZ
= TLB_LG_PGSZ
88 # BRAM organisation: We never access more than wishbone_data_bits
89 # at a time so to save resources we make the array only that wide,
90 # and use consecutive indices for to make a cache "line"
92 # self.ROW_SIZE is the width in bytes of the BRAM
93 # (based on WB, so 64-bits)
94 self
.ROW_SIZE
= WB_DATA_BITS
// 8
95 # Number of real address bits that we store
96 self
.REAL_ADDR_BITS
= XLEN
-8 # 56 for XLEN=64
98 self
.ROW_SIZE_BITS
= self
.ROW_SIZE
* 8
99 # ROW_PER_LINE is the number of row (wishbone) transactions in a line
100 self
.ROW_PER_LINE
= self
.LINE_SIZE
// self
.ROW_SIZE
101 # BRAM_ROWS is the number of rows in BRAM
102 # needed to represent the full icache
103 self
.BRAM_ROWS
= self
.NUM_LINES
* self
.ROW_PER_LINE
104 # INSN_PER_ROW is the number of 32bit instructions per BRAM row
105 self
.INSN_PER_ROW
= self
.ROW_SIZE_BITS
// 32
107 # Bit fields counts in the address
109 # INSN_BITS is the number of bits to select an instruction in a row
110 self
.INSN_BITS
= log2_int(self
.INSN_PER_ROW
)
111 # ROW_BITS is the number of bits to select a row
112 self
.ROW_BITS
= log2_int(self
.BRAM_ROWS
)
113 # ROW_LINE_BITS is the number of bits to select a row within a line
114 self
.ROW_LINE_BITS
= log2_int(self
.ROW_PER_LINE
)
115 # LINE_OFF_BITS is the number of bits for the offset in a cache line
116 self
.LINE_OFF_BITS
= log2_int(self
.LINE_SIZE
)
117 # ROW_OFF_BITS is the number of bits for the offset in a row
118 self
.ROW_OFF_BITS
= log2_int(self
.ROW_SIZE
)
119 # INDEX_BITS is the number of bits to select a cache line
120 self
.INDEX_BITS
= log2_int(self
.NUM_LINES
)
121 # SET_SIZE_BITS is the log base 2 of the set size
122 self
.SET_SIZE_BITS
= self
.LINE_OFF_BITS
+ self
.INDEX_BITS
123 # TAG_BITS is the number of bits of the tag part of the address
124 self
.TAG_BITS
= self
.REAL_ADDR_BITS
- self
.SET_SIZE_BITS
125 # TAG_WIDTH is the width in bits of each way of the tag RAM
126 self
.TAG_WIDTH
= self
.TAG_BITS
+ 7 - ((self
.TAG_BITS
+ 7) % 8)
128 # WAY_BITS is the number of bits to select a way
129 self
.WAY_BITS
= log2_int(self
.NUM_WAYS
)
130 self
.TAG_RAM_WIDTH
= self
.TAG_BITS
* self
.NUM_WAYS
133 self
.TL_BITS
= log2_int(self
.TLB_SIZE
)
134 self
.TLB_EA_TAG_BITS
= XLEN
- (self
.TLB_LG_PGSZ
+ self
.TL_BITS
)
135 self
.TLB_PTE_BITS
= XLEN
137 print("self.XLEN =", self
.XLEN
)
138 print("self.BRAM_ROWS =", self
.BRAM_ROWS
)
139 print("self.INDEX_BITS =", self
.INDEX_BITS
)
140 print("self.INSN_BITS =", self
.INSN_BITS
)
141 print("self.INSN_PER_ROW =", self
.INSN_PER_ROW
)
142 print("self.LINE_SIZE =", self
.LINE_SIZE
)
143 print("self.LINE_OFF_BITS =", self
.LINE_OFF_BITS
)
144 print("LOG_LENGTH =", LOG_LENGTH
)
145 print("self.NUM_LINES =", self
.NUM_LINES
)
146 print("self.NUM_WAYS =", self
.NUM_WAYS
)
147 print("self.REAL_ADDR_BITS =", self
.REAL_ADDR_BITS
)
148 print("self.ROW_BITS =", self
.ROW_BITS
)
149 print("self.ROW_OFF_BITS =", self
.ROW_OFF_BITS
)
150 print("self.ROW_LINE_BITS =", self
.ROW_LINE_BITS
)
151 print("self.ROW_PER_LINE =", self
.ROW_PER_LINE
)
152 print("self.ROW_SIZE =", self
.ROW_SIZE
)
153 print("self.ROW_SIZE_BITS =", self
.ROW_SIZE_BITS
)
154 print("self.SET_SIZE_BITS =", self
.SET_SIZE_BITS
)
156 print("self.TAG_BITS =", self
.TAG_BITS
)
157 print("self.TAG_RAM_WIDTH =", self
.TAG_RAM_WIDTH
)
158 print("self.TAG_BITS =", self
.TAG_BITS
)
159 print("self.TL_BITS =", self
.TL_BITS
)
160 print("self.TLB_EA_TAG_BITS =", self
.TLB_EA_TAG_BITS
)
161 print("self.TLB_LG_PGSZ =", self
.TLB_LG_PGSZ
)
162 print("self.TLB_PTE_BITS =", self
.TLB_PTE_BITS
)
163 print("self.TLB_SIZE =", self
.TLB_SIZE
)
164 print("self.WAY_BITS =", self
.WAY_BITS
)
167 assert self
.LINE_SIZE
% self
.ROW_SIZE
== 0
168 assert ispow2(self
.LINE_SIZE
), "self.LINE_SIZE not power of 2"
169 assert ispow2(self
.NUM_LINES
), "self.NUM_LINES not power of 2"
170 assert ispow2(self
.ROW_PER_LINE
), "self.ROW_PER_LINE not power of 2"
171 assert ispow2(self
.INSN_PER_ROW
), "self.INSN_PER_ROW not power of 2"
172 assert (self
.ROW_BITS
== (self
.INDEX_BITS
+ self
.ROW_LINE_BITS
)), \
173 "geometry bits don't add up"
174 assert (self
.LINE_OFF_BITS
==
175 (self
.ROW_OFF_BITS
+ self
.ROW_LINE_BITS
)), \
176 "geometry bits don't add up"
177 assert (self
.REAL_ADDR_BITS
==
178 (self
.TAG_BITS
+ self
.INDEX_BITS
+ self
.LINE_OFF_BITS
)), \
179 "geometry bits don't add up"
180 assert (self
.REAL_ADDR_BITS
==
181 (self
.TAG_BITS
+ self
.ROW_BITS
+ self
.ROW_OFF_BITS
)), \
182 "geometry bits don't add up"
184 # Example of layout for 32 lines of 64 bytes:
186 # .. tag |index| line |
188 # .. | | | |00| zero (2)
189 # .. | | |-| | self.INSN_BITS (1)
190 # .. | |---| | self.ROW_LINE_BITS (3)
191 # .. | |--- - --| self.LINE_OFF_BITS (6)
192 # .. | |- --| self.ROW_OFF_BITS (3)
193 # .. |----- ---| | self.ROW_BITS (8)
194 # .. |-----| | self.INDEX_BITS (5)
195 # .. --------| | self.TAG_BITS (53)
197 # The cache data BRAM organized as described above for each way
198 #subtype cache_row_t is std_ulogic_vector(self.ROW_SIZE_BITS-1 downto 0);
200 def RowPerLineValidArray(self
):
201 return Array(Signal(name
="rows_valid_%d" %x) \
202 for x
in range(self
.ROW_PER_LINE
))
205 # TODO to be passed to nigmen as ram attributes
206 # attribute ram_style : string;
207 # attribute ram_style of cache_tags : signal is "distributed";
209 def TLBRecord(self
, name
):
210 tlb_layout
= [ ('tag', self
.TLB_EA_TAG_BITS
),
211 ('pte', self
.TLB_PTE_BITS
)
213 return Record(tlb_layout
, name
=name
)
216 return Array(self
.TLBRecord("tlb%d" % x
) for x
in range(self
.TLB_SIZE
))
218 # PLRU output interface
220 return Array(Signal(self
.WAY_BITS
, name
="plru_out_%d" %x) \
221 for x
in range(self
.NUM_LINES
))
223 # Return the cache line index (tag index) for an address
224 def get_index(self
, addr
):
225 return addr
[self
.LINE_OFF_BITS
:self
.SET_SIZE_BITS
]
227 # Return the cache row index (data memory) for an address
228 def get_row(self
, addr
):
229 return addr
[self
.ROW_OFF_BITS
:self
.SET_SIZE_BITS
]
231 # Return the index of a row within a line
232 def get_row_of_line(self
, row
):
233 return row
[:self
.ROW_BITS
][:self
.ROW_LINE_BITS
]
235 # Returns whether this is the last row of a line
236 def is_last_row_addr(self
, addr
, last
):
237 return addr
[self
.ROW_OFF_BITS
:self
.LINE_OFF_BITS
] == last
239 # Returns whether this is the last row of a line
240 def is_last_row(self
, row
, last
):
241 return self
.get_row_of_line(row
) == last
243 # Return the next row in the current cache line. We use a dedicated
244 # function in order to limit the size of the generated adder to be
245 # only the bits within a cache line (3 bits with default settings)
246 def next_row(self
, row
):
247 row_v
= row
[0:self
.ROW_LINE_BITS
] + 1
248 return Cat(row_v
[:self
.ROW_LINE_BITS
], row
[self
.ROW_LINE_BITS
:])
250 # Read the instruction word for the given address
251 # in the current cache row
252 def read_insn_word(self
, addr
, data
):
253 word
= addr
[2:self
.INSN_BITS
+2]
254 return data
.word_select(word
, 32)
256 # Get the tag value from the address
257 def get_tag(self
, addr
):
258 return addr
[self
.SET_SIZE_BITS
:self
.REAL_ADDR_BITS
]
260 # Read a tag from a tag memory row
261 def read_tag(self
, way
, tagset
):
262 return tagset
.word_select(way
, self
.TAG_BITS
)
264 # Write a tag to tag memory row
265 def write_tag(self
, way
, tagset
, tag
):
266 return self
.read_tag(way
, tagset
).eq(tag
)
268 # Simple hash for direct-mapped TLB index
269 def hash_ea(self
, addr
):
270 hsh
= (addr
[self
.TLB_LG_PGSZ
:self
.TLB_LG_PGSZ
+ self
.TL_BITS
] ^
271 addr
[self
.TLB_LG_PGSZ
+ self
.TL_BITS
:
272 self
.TLB_LG_PGSZ
+ 2 * self
.TL_BITS
] ^
273 addr
[self
.TLB_LG_PGSZ
+ 2 * self
.TL_BITS
:
274 self
.TLB_LG_PGSZ
+ 3 * self
.TL_BITS
])
278 # Cache reload state machine
286 class RegInternal(RecordObject
):
287 def __init__(self
, cfg
):
289 # Cache hit state (Latches for 1 cycle BRAM access)
290 self
.hit_way
= Signal(cfg
.WAY_BITS
)
291 self
.hit_nia
= Signal(64)
292 self
.hit_smark
= Signal()
293 self
.hit_valid
= Signal()
295 # Cache miss state (reload state machine)
296 self
.state
= Signal(State
, reset
=State
.IDLE
)
297 self
.wb
= WBMasterOut("wb")
298 self
.req_adr
= Signal(64)
299 self
.store_way
= Signal(cfg
.WAY_BITS
)
300 self
.store_index
= Signal(cfg
.INDEX_BITS
)
301 self
.store_row
= Signal(cfg
.ROW_BITS
)
302 self
.store_tag
= Signal(cfg
.TAG_BITS
)
303 self
.store_valid
= Signal()
304 self
.end_row_ix
= Signal(cfg
.ROW_LINE_BITS
)
305 self
.rows_valid
= cfg
.RowPerLineValidArray()
308 self
.fetch_failed
= Signal()
311 class ICache(FetchUnitInterface
, Elaboratable
, ICacheConfig
):
312 """64 bit direct mapped icache. All instructions are 4B aligned."""
313 def __init__(self
, pspec
):
314 FetchUnitInterface
.__init
__(self
, pspec
)
315 self
.i_in
= Fetch1ToICacheType(name
="i_in")
316 self
.i_out
= ICacheToDecode1Type(name
="i_out")
318 self
.m_in
= MMUToICacheType(name
="m_in")
320 self
.stall_in
= Signal()
321 self
.stall_out
= Signal()
322 self
.flush_in
= Signal()
323 self
.inval_in
= Signal()
325 # standard naming (wired to non-standard for compatibility)
326 self
.bus
= Interface(addr_width
=32,
333 self
.log_out
= Signal(54)
335 # use FetchUnitInterface, helps keep some unit tests running
336 self
.use_fetch_iface
= False
338 # test if microwatt compatibility is to be enabled
339 self
.microwatt_compat
= (hasattr(pspec
, "microwatt_compat") and
340 (pspec
.microwatt_compat
== True))
344 if self
.microwatt_compat
:
345 # reduce way sizes and num lines
346 ICacheConfig
.__init
__(self
, LINE_SIZE
=XLEN
,
350 TLB_SIZE
=4 # needs device-tree update
353 ICacheConfig
.__init
__(self
, LINE_SIZE
=XLEN
, XLEN
=XLEN
)
355 def use_fetch_interface(self
):
356 self
.use_fetch_iface
= True
358 # Generate a cache RAM for each way
359 def rams(self
, m
, r
, cache_out_row
, use_previous
,
360 replace_way
, req_row
):
365 bus
, stall_in
= self
.bus
, self
.stall_in
367 # read condition (for every cache ram)
369 comb
+= do_read
.eq(~
(stall_in | use_previous
))
371 rd_addr
= Signal(self
.ROW_BITS
)
372 wr_addr
= Signal(self
.ROW_BITS
)
373 comb
+= rd_addr
.eq(req_row
)
374 comb
+= wr_addr
.eq(r
.store_row
)
376 # binary-to-unary converters: replace-way enabled by bus.ack,
377 # hit-way left permanently enabled
378 m
.submodules
.replace_way_e
= re
= Decoder(self
.NUM_WAYS
)
379 m
.submodules
.hit_way_e
= he
= Decoder(self
.NUM_WAYS
)
380 comb
+= re
.i
.eq(replace_way
)
381 comb
+= re
.n
.eq(~bus
.ack
)
382 comb
+= he
.i
.eq(r
.hit_way
)
384 for i
in range(self
.NUM_WAYS
):
385 do_write
= Signal(name
="do_wr_%d" % i
)
386 d_out
= Signal(self
.ROW_SIZE_BITS
, name
="d_out_%d" % i
)
387 wr_sel
= Signal(self
.ROW_SIZE
, name
="wr_sel_%d" % i
)
389 way
= CacheRam(self
.ROW_BITS
, self
.ROW_SIZE_BITS
,
390 TRACE
=True, ram_num
=i
)
391 m
.submodules
["cacheram_%d" % i
] = way
393 comb
+= way
.rd_en
.eq(do_read
)
394 comb
+= way
.rd_addr
.eq(rd_addr
)
395 comb
+= d_out
.eq(way
.rd_data_o
)
396 comb
+= way
.wr_sel
.eq(wr_sel
)
397 comb
+= way
.wr_addr
.eq(wr_addr
)
398 comb
+= way
.wr_data
.eq(bus
.dat_r
)
400 comb
+= do_write
.eq(re
.o
[i
])
403 sync
+= Display("cache write adr: %x data: %lx",
404 wr_addr
, way
.wr_data
)
407 comb
+= cache_out_row
.eq(d_out
)
409 sync
+= Display("cache read adr: %x data: %x",
412 comb
+= wr_sel
.eq(Repl(do_write
, self
.ROW_SIZE
))
415 def maybe_plrus(self
, m
, r
, plru_victim
):
418 if self
.NUM_WAYS
== 0:
422 m
.submodules
.plrus
= plru
= PLRUs(self
.NUM_LINES
, self
.WAY_BITS
)
423 comb
+= plru
.way
.eq(r
.hit_way
)
424 comb
+= plru
.valid
.eq(r
.hit_valid
)
425 comb
+= plru
.index
.eq(self
.get_index(r
.hit_nia
))
426 comb
+= plru
.isel
.eq(r
.store_index
) # select victim
427 comb
+= plru_victim
.eq(plru
.o_index
) # selected victim
429 # TLB hit detection and real address generation
430 def itlb_lookup(self
, m
, tlb_req_index
, itlb
, itlb_valid
,
431 real_addr
, ra_valid
, eaa_priv
,
432 priv_fault
, access_ok
):
438 # use an *asynchronous* Memory read port here (combinatorial)
439 m
.submodules
.rd_tlb
= rd_tlb
= self
.tlbmem
.read_port(domain
="comb")
440 tlb
= self
.TLBRecord("tlb_rdport")
441 pte
, ttag
= tlb
.pte
, tlb
.tag
443 comb
+= tlb_req_index
.eq(self
.hash_ea(i_in
.nia
))
444 comb
+= rd_tlb
.addr
.eq(tlb_req_index
)
445 comb
+= tlb
.eq(rd_tlb
.data
)
447 with m
.If(i_in
.virt_mode
):
448 comb
+= real_addr
.eq(Cat(i_in
.nia
[:self
.TLB_LG_PGSZ
],
449 pte
[self
.TLB_LG_PGSZ
:self
.REAL_ADDR_BITS
]))
451 with m
.If(ttag
== i_in
.nia
[self
.TLB_LG_PGSZ
+ self
.TL_BITS
:64]):
452 comb
+= ra_valid
.eq(itlb_valid
.q
.bit_select(tlb_req_index
, 1))
454 comb
+= eaa_priv
.eq(pte
[3])
457 comb
+= real_addr
.eq(i_in
.nia
[:self
.REAL_ADDR_BITS
])
458 comb
+= ra_valid
.eq(1)
459 comb
+= eaa_priv
.eq(1)
461 # No IAMR, so no KUEP support for now
462 comb
+= priv_fault
.eq(eaa_priv
& ~i_in
.priv_mode
)
463 comb
+= access_ok
.eq(ra_valid
& ~priv_fault
)
466 def itlb_update(self
, m
, itlb
, itlb_valid
):
472 wr_index
= Signal(self
.TL_BITS
)
473 wr_unary
= Signal(self
.TLB_SIZE
)
474 comb
+= wr_index
.eq(self
.hash_ea(m_in
.addr
))
475 comb
+= wr_unary
.eq(1<<wr_index
)
477 m
.submodules
.wr_tlb
= wr_tlb
= self
.tlbmem
.write_port()
478 sync
+= itlb_valid
.s
.eq(0)
479 sync
+= itlb_valid
.r
.eq(0)
481 with m
.If(m_in
.tlbie
& m_in
.doall
):
482 # Clear all valid bits
483 sync
+= itlb_valid
.r
.eq(-1)
485 with m
.Elif(m_in
.tlbie
):
486 # Clear entry regardless of hit or miss
487 sync
+= itlb_valid
.r
.eq(wr_unary
)
489 with m
.Elif(m_in
.tlbld
):
490 tlb
= self
.TLBRecord("tlb_wrport")
491 comb
+= tlb
.tag
.eq(m_in
.addr
[self
.TLB_LG_PGSZ
+ self
.TL_BITS
:64])
492 comb
+= tlb
.pte
.eq(m_in
.pte
)
493 comb
+= wr_tlb
.en
.eq(1)
494 comb
+= wr_tlb
.addr
.eq(wr_index
)
495 comb
+= wr_tlb
.data
.eq(tlb
)
496 sync
+= itlb_valid
.s
.eq(wr_unary
)
498 # Cache hit detection, output to fetch2 and other misc logic
499 def icache_comb(self
, m
, use_previous
, r
, req_index
, req_row
,
500 req_hit_way
, req_tag
, real_addr
, req_laddr
,
501 cache_valids
, access_ok
,
502 req_is_hit
, req_is_miss
, replace_way
,
503 plru_victim
, cache_out_row
):
506 m
.submodules
.rd_tag
= rd_tag
= self
.tagmem
.read_port(domain
="comb")
508 i_in
, i_out
, bus
= self
.i_in
, self
.i_out
, self
.bus
509 flush_in
, stall_out
= self
.flush_in
, self
.stall_out
512 hit_way
= Signal(self
.WAY_BITS
)
514 # i_in.sequential means that i_in.nia this cycle is 4 more than
515 # last cycle. If we read more than 32 bits at a time, had a
516 # cache hit last cycle, and we don't want the first 32-bit chunk
517 # then we can keep the data we read last cycle and just use that.
518 with m
.If(i_in
.nia
[2:self
.INSN_BITS
+2] != 0):
519 comb
+= use_previous
.eq(i_in
.sequential
& r
.hit_valid
)
521 # Extract line, row and tag from request
522 comb
+= req_index
.eq(self
.get_index(i_in
.nia
))
523 comb
+= req_row
.eq(self
.get_row(i_in
.nia
))
524 comb
+= req_tag
.eq(self
.get_tag(real_addr
))
526 # Calculate address of beginning of cache row, will be
527 # used for cache miss processing if needed
528 comb
+= req_laddr
.eq(Cat(
529 Const(0, self
.ROW_OFF_BITS
),
530 real_addr
[self
.ROW_OFF_BITS
:self
.REAL_ADDR_BITS
],
533 # Test if pending request is a hit on any way
536 comb
+= rowvalid
.eq(r
.rows_valid
[req_row
% self
.ROW_PER_LINE
])
537 comb
+= hitcond
.eq((r
.state
== State
.WAIT_ACK
) &
538 (req_index
== r
.store_index
) &
541 # i_in.req asserts Decoder active
542 cvb
= Signal(self
.NUM_WAYS
)
543 ctag
= Signal(self
.TAG_RAM_WIDTH
)
544 comb
+= rd_tag
.addr
.eq(req_index
)
545 comb
+= ctag
.eq(rd_tag
.data
)
546 comb
+= cvb
.eq(cache_valids
.q
.word_select(req_index
, self
.NUM_WAYS
))
547 m
.submodules
.store_way_e
= se
= Decoder(self
.NUM_WAYS
)
548 comb
+= se
.i
.eq(r
.store_way
)
549 comb
+= se
.n
.eq(~i_in
.req
)
550 for i
in range(self
.NUM_WAYS
):
551 tagi
= Signal(self
.TAG_BITS
, name
="tag_i%d" % i
)
552 hit_test
= Signal(name
="hit_test%d" % i
)
553 is_tag_hit
= Signal(name
="is_tag_hit_%d" % i
)
554 comb
+= tagi
.eq(self
.read_tag(i
, ctag
))
555 comb
+= hit_test
.eq(se
.o
[i
])
556 comb
+= is_tag_hit
.eq((cvb
[i
] |
(hitcond
& hit_test
)) &
558 with m
.If(is_tag_hit
):
559 comb
+= hit_way
.eq(i
)
562 # Generate the "hit" and "miss" signals
563 # for the synchronous blocks
564 with m
.If(i_in
.req
& access_ok
& ~flush_in
):
565 comb
+= req_is_hit
.eq(is_hit
)
566 comb
+= req_is_miss
.eq(~is_hit
)
568 comb
+= req_hit_way
.eq(hit_way
)
570 # The way to replace on a miss
571 with m
.If(r
.state
== State
.CLR_TAG
):
572 comb
+= replace_way
.eq(plru_victim
)
574 comb
+= replace_way
.eq(r
.store_way
)
576 # Output instruction from current cache row
578 # Note: This is a mild violation of our design principle of
579 # having pipeline stages output from a clean latch. In this
580 # case we output the result of a mux. The alternative would
581 # be output an entire row which I prefer not to do just yet
582 # as it would force fetch2 to know about some of the cache
583 # geometry information.
584 comb
+= i_out
.insn
.eq(self
.read_insn_word(r
.hit_nia
, cache_out_row
))
585 comb
+= i_out
.valid
.eq(r
.hit_valid
)
586 comb
+= i_out
.nia
.eq(r
.hit_nia
)
587 comb
+= i_out
.stop_mark
.eq(r
.hit_smark
)
588 comb
+= i_out
.fetch_failed
.eq(r
.fetch_failed
)
590 # Stall fetch1 if we have a miss on cache or TLB
591 # or a protection fault
592 comb
+= stall_out
.eq(~
(is_hit
& access_ok
))
594 # Wishbone requests output (from the cache miss reload machine)
595 comb
+= bus
.we
.eq(r
.wb
.we
)
596 comb
+= bus
.adr
.eq(r
.wb
.adr
)
597 comb
+= bus
.sel
.eq(r
.wb
.sel
)
598 comb
+= bus
.stb
.eq(r
.wb
.stb
)
599 comb
+= bus
.dat_w
.eq(r
.wb
.dat
)
600 comb
+= bus
.cyc
.eq(r
.wb
.cyc
)
602 # Cache hit synchronous machine
603 def icache_hit(self
, m
, use_previous
, r
, req_is_hit
, req_hit_way
,
604 req_index
, req_tag
, real_addr
):
607 i_in
, stall_in
= self
.i_in
, self
.stall_in
608 flush_in
= self
.flush_in
610 # keep outputs to fetch2 unchanged on a stall
611 # except that flush or reset sets valid to 0
612 # If use_previous, keep the same data as last
613 # cycle and use the second half
614 with m
.If(stall_in | use_previous
):
616 sync
+= r
.hit_valid
.eq(0)
618 # On a hit, latch the request for the next cycle,
619 # when the BRAM data will be available on the
620 # cache_out output of the corresponding way
621 sync
+= r
.hit_valid
.eq(req_is_hit
)
623 with m
.If(req_is_hit
):
624 sync
+= r
.hit_way
.eq(req_hit_way
)
625 sync
+= Display("cache hit nia:%x IR:%x SM:%x idx:%x tag:%x "
626 "way:%x RA:%x", i_in
.nia
, i_in
.virt_mode
,
627 i_in
.stop_mark
, req_index
, req_tag
,
628 req_hit_way
, real_addr
)
630 with m
.If(~stall_in
):
631 # Send stop marks and NIA down regardless of validity
632 sync
+= r
.hit_smark
.eq(i_in
.stop_mark
)
633 sync
+= r
.hit_nia
.eq(i_in
.nia
)
635 def icache_miss_idle(self
, m
, r
, req_is_miss
, req_laddr
,
636 req_index
, req_tag
, replace_way
, real_addr
):
642 # Reset per-row valid flags, only used in WAIT_ACK
643 for i
in range(self
.ROW_PER_LINE
):
644 sync
+= r
.rows_valid
[i
].eq(0)
646 # We need to read a cache line
647 with m
.If(req_is_miss
):
649 "cache miss nia:%x IR:%x SM:%x idx:%x "
650 " way:%x tag:%x RA:%x", i_in
.nia
,
651 i_in
.virt_mode
, i_in
.stop_mark
, req_index
,
652 replace_way
, req_tag
, real_addr
)
654 # Keep track of our index and way for subsequent stores
655 st_row
= Signal(self
.ROW_BITS
)
656 comb
+= st_row
.eq(self
.get_row(req_laddr
))
657 sync
+= r
.store_index
.eq(req_index
)
658 sync
+= r
.store_row
.eq(st_row
)
659 sync
+= r
.store_tag
.eq(req_tag
)
660 sync
+= r
.store_valid
.eq(1)
661 sync
+= r
.end_row_ix
.eq(self
.get_row_of_line(st_row
) - 1)
663 # Prep for first wishbone read. We calculate the address
664 # of the start of the cache line and start the WB cycle.
665 sync
+= r
.req_adr
.eq(req_laddr
)
666 sync
+= r
.wb
.cyc
.eq(1)
667 sync
+= r
.wb
.stb
.eq(1)
669 # Track that we had one request sent
670 sync
+= r
.state
.eq(State
.CLR_TAG
)
672 def icache_miss_clr_tag(self
, m
, r
, replace_way
,
677 m
.submodules
.wr_tag
= wr_tag
= self
.tagmem
.write_port(
678 granularity
=self
.TAG_BITS
)
680 # Get victim way from plru
681 sync
+= r
.store_way
.eq(replace_way
)
683 # Force misses on that way while reloading that line
684 idx
= req_index
*self
.NUM_WAYS
+ replace_way
# 2D index, 1st dim: self.NUM_WAYS
685 comb
+= cache_valids
.r
.eq(1<<idx
)
687 # use write-port "granularity" to select the tag to write to
688 # TODO: the Memory should be multipled-up (by NUM_TAGS)
689 tagset
= Signal(self
.TAG_RAM_WIDTH
)
690 comb
+= tagset
.eq(r
.store_tag
<< (replace_way
*self
.TAG_BITS
))
691 comb
+= wr_tag
.en
.eq(1<<replace_way
)
692 comb
+= wr_tag
.addr
.eq(r
.store_index
)
693 comb
+= wr_tag
.data
.eq(tagset
)
695 sync
+= r
.state
.eq(State
.WAIT_ACK
)
697 def icache_miss_wait_ack(self
, m
, r
, replace_way
, inval_in
,
698 cache_valids
, stbs_done
):
704 # Requests are all sent if stb is 0
706 comb
+= stbs_zero
.eq(r
.wb
.stb
== 0)
707 comb
+= stbs_done
.eq(stbs_zero
)
709 # If we are still sending requests, was one accepted?
710 with m
.If(~bus
.stall
& ~stbs_zero
):
711 # That was the last word? We are done sending.
712 # Clear stb and set stbs_done so we can handle
713 # an eventual last ack on the same cycle.
714 with m
.If(self
.is_last_row_addr(r
.req_adr
, r
.end_row_ix
)):
715 sync
+= Display("IS_LAST_ROW_ADDR r.wb.addr:%x "
716 "r.end_row_ix:%x r.wb.stb:%x stbs_zero:%x "
717 "stbs_done:%x", r
.wb
.adr
, r
.end_row_ix
,
718 r
.wb
.stb
, stbs_zero
, stbs_done
)
719 sync
+= r
.wb
.stb
.eq(0)
720 comb
+= stbs_done
.eq(1)
722 # Calculate the next row address
723 rarange
= Signal(self
.LINE_OFF_BITS
- self
.ROW_OFF_BITS
)
724 comb
+= rarange
.eq(r
.req_adr
[self
.ROW_OFF_BITS
:
725 self
.LINE_OFF_BITS
] + 1)
726 sync
+= r
.req_adr
[self
.ROW_OFF_BITS
:self
.LINE_OFF_BITS
].eq(rarange
)
727 sync
+= Display("RARANGE r.req_adr:%x rarange:%x "
728 "stbs_zero:%x stbs_done:%x",
729 r
.req_adr
, rarange
, stbs_zero
, stbs_done
)
731 # Incoming acks processing
733 sync
+= Display("WB_IN_ACK data:%x stbs_zero:%x "
735 bus
.dat_r
, stbs_zero
, stbs_done
)
737 sync
+= r
.rows_valid
[r
.store_row
% self
.ROW_PER_LINE
].eq(1)
739 # Check for completion
740 with m
.If(stbs_done
& self
.is_last_row(r
.store_row
, r
.end_row_ix
)):
741 # Complete wishbone cycle
742 sync
+= r
.wb
.cyc
.eq(0)
743 # be nice, clear addr
744 sync
+= r
.req_adr
.eq(0)
746 # Cache line is now valid
747 idx
= r
.store_index
*self
.NUM_WAYS
+ replace_way
# 2D index again
748 valid
= r
.store_valid
& ~inval_in
749 comb
+= cache_valids
.s
.eq(1<<idx
)
750 sync
+= r
.state
.eq(State
.IDLE
)
752 # move on to next request in row
753 # Increment store row counter
754 sync
+= r
.store_row
.eq(self
.next_row(r
.store_row
))
756 # Cache miss/reload synchronous machine
757 def icache_miss(self
, m
, r
, req_is_miss
,
758 req_index
, req_laddr
, req_tag
, replace_way
,
759 cache_valids
, access_ok
, real_addr
):
763 i_in
, bus
, m_in
= self
.i_in
, self
.bus
, self
.m_in
764 stall_in
, flush_in
= self
.stall_in
, self
.flush_in
765 inval_in
= self
.inval_in
769 comb
+= r
.wb
.sel
.eq(-1)
770 comb
+= r
.wb
.adr
.eq(r
.req_adr
[3:])
772 # Process cache invalidations
774 comb
+= cache_valids
.r
.eq(-1)
775 sync
+= r
.store_valid
.eq(0)
778 with m
.Switch(r
.state
):
780 with m
.Case(State
.IDLE
):
781 self
.icache_miss_idle(m
, r
, req_is_miss
, req_laddr
,
782 req_index
, req_tag
, replace_way
,
785 with m
.Case(State
.CLR_TAG
, State
.WAIT_ACK
):
786 with m
.If(r
.state
== State
.CLR_TAG
):
787 self
.icache_miss_clr_tag(m
, r
, replace_way
,
791 self
.icache_miss_wait_ack(m
, r
, replace_way
, inval_in
,
792 cache_valids
, stbs_done
)
794 # TLB miss and protection fault processing
795 with m
.If(flush_in | m_in
.tlbld
):
796 sync
+= r
.fetch_failed
.eq(0)
797 with m
.Elif(i_in
.req
& ~access_ok
& ~stall_in
):
798 sync
+= r
.fetch_failed
.eq(1)
800 # icache_log: if LOG_LENGTH > 0 generate
801 def icache_log(self
, m
, req_hit_way
, ra_valid
, access_ok
,
802 req_is_miss
, req_is_hit
, lway
, wstate
, r
):
806 bus
, i_out
= self
.bus
, self
.i_out
807 log_out
, stall_out
= self
.log_out
, self
.stall_out
809 # Output data to logger
810 for i
in range(LOG_LENGTH
):
811 log_data
= Signal(54)
812 lway
= Signal(self
.WAY_BITS
)
815 sync
+= lway
.eq(req_hit_way
)
818 with m
.If(r
.state
!= State
.IDLE
):
821 sync
+= log_data
.eq(Cat(
822 ra_valid
, access_ok
, req_is_miss
, req_is_hit
,
823 lway
, wstate
, r
.hit_nia
[2:6], r
.fetch_failed
,
824 stall_out
, bus
.stall
, r
.wb
.cyc
, r
.wb
.stb
,
825 r
.real_addr
[3:6], bus
.ack
, i_out
.insn
, i_out
.valid
827 comb
+= log_out
.eq(log_data
)
829 def elaborate(self
, platform
):
834 # Cache-Ways "valid" indicators. this is a 2D Signal, by the
835 # number of ways and the number of lines.
836 vec
= SRLatch(sync
=True, llen
=self
.NUM_WAYS
*self
.NUM_LINES
,
838 m
.submodules
.cache_valids
= cache_valids
= vec
841 itlb
= self
.TLBArray()
842 vec
= SRLatch(sync
=False, llen
=self
.TLB_SIZE
, name
="tlbvalids")
843 m
.submodules
.itlb_valids
= itlb_valid
= vec
845 # TODO to be passed to nmigen as ram attributes
846 # attribute ram_style of itlb_tags : signal is "distributed";
847 # attribute ram_style of itlb_ptes : signal is "distributed";
849 # Privilege bit from PTE EAA field
852 r
= RegInternal(self
)
854 # Async signal on incoming request
855 req_index
= Signal(self
.INDEX_BITS
)
856 req_row
= Signal(self
.ROW_BITS
)
857 req_hit_way
= Signal(self
.WAY_BITS
)
858 req_tag
= Signal(self
.TAG_BITS
)
859 req_is_hit
= Signal()
860 req_is_miss
= Signal()
861 req_laddr
= Signal(64)
863 tlb_req_index
= Signal(self
.TL_BITS
)
864 real_addr
= Signal(self
.REAL_ADDR_BITS
)
866 priv_fault
= Signal()
868 use_previous
= Signal()
870 cache_out_row
= Signal(self
.ROW_SIZE_BITS
)
872 plru_victim
= Signal(self
.WAY_BITS
)
873 replace_way
= Signal(self
.WAY_BITS
)
875 self
.tlbmem
= Memory(depth
=self
.TLB_SIZE
,
876 width
=self
.TLB_EA_TAG_BITS
+self
.TLB_PTE_BITS
,
877 #attrs={'syn_ramstyle': "block_ram"}
879 self
.tagmem
= Memory(depth
=self
.NUM_LINES
,
880 width
=self
.TAG_RAM_WIDTH
,
881 #attrs={'syn_ramstyle': "block_ram"}
884 # call sub-functions putting everything together,
885 # using shared signals established above
886 self
.rams(m
, r
, cache_out_row
, use_previous
, replace_way
, req_row
)
887 self
.maybe_plrus(m
, r
, plru_victim
)
888 self
.itlb_lookup(m
, tlb_req_index
, itlb
, itlb_valid
, real_addr
,
889 ra_valid
, eaa_priv
, priv_fault
,
891 self
.itlb_update(m
, itlb
, itlb_valid
)
892 self
.icache_comb(m
, use_previous
, r
, req_index
, req_row
, req_hit_way
,
893 req_tag
, real_addr
, req_laddr
,
895 access_ok
, req_is_hit
, req_is_miss
,
896 replace_way
, plru_victim
, cache_out_row
)
897 self
.icache_hit(m
, use_previous
, r
, req_is_hit
, req_hit_way
,
898 req_index
, req_tag
, real_addr
)
899 self
.icache_miss(m
, r
, req_is_miss
, req_index
,
900 req_laddr
, req_tag
, replace_way
,
902 access_ok
, real_addr
)
903 #self.icache_log(m, log_out, req_hit_way, ra_valid, access_ok,
904 # req_is_miss, req_is_hit, lway, wstate, r)
906 # don't connect up to FetchUnitInterface so that some unit tests
907 # can continue to operate
908 if not self
.use_fetch_iface
:
911 # connect to FetchUnitInterface. FetchUnitInterface is undocumented
912 # so needs checking and iterative revising
913 i_in
, bus
, i_out
= self
.i_in
, self
.bus
, self
.i_out
914 comb
+= i_in
.req
.eq(self
.a_i_valid
)
915 comb
+= i_in
.nia
.eq(self
.a_pc_i
)
916 comb
+= self
.stall_in
.eq(self
.a_stall_i
)
917 comb
+= self
.f_fetch_err_o
.eq(i_out
.fetch_failed
)
918 comb
+= self
.f_badaddr_o
.eq(i_out
.nia
)
919 comb
+= self
.f_instr_o
.eq(i_out
.insn
)
920 comb
+= self
.f_busy_o
.eq(~i_out
.valid
) # probably
922 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
924 comb
+= ibus
.adr
.eq(self
.bus
.adr
)
925 comb
+= ibus
.dat_w
.eq(self
.bus
.dat_w
)
926 comb
+= ibus
.sel
.eq(self
.bus
.sel
)
927 comb
+= ibus
.cyc
.eq(self
.bus
.cyc
)
928 comb
+= ibus
.stb
.eq(self
.bus
.stb
)
929 comb
+= ibus
.we
.eq(self
.bus
.we
)
931 comb
+= self
.bus
.dat_r
.eq(ibus
.dat_r
)
932 comb
+= self
.bus
.ack
.eq(ibus
.ack
)
933 if hasattr(ibus
, "stall"):
934 comb
+= self
.bus
.stall
.eq(ibus
.stall
)
936 # fake-up the wishbone stall signal to comply with pipeline mode
937 # same thing is done in dcache.py
938 comb
+= self
.bus
.stall
.eq(self
.bus
.cyc
& ~self
.bus
.ack
)
948 yield i_in
.priv_mode
.eq(1)
951 yield i_in
.stop_mark
.eq(0)
952 yield m_out
.tlbld
.eq(0)
953 yield m_out
.tlbie
.eq(0)
954 yield m_out
.addr
.eq(0)
955 yield m_out
.pte
.eq(0)
961 # miss, stalls for a bit
963 yield i_in
.nia
.eq(Const(0x0000000000000004, 64))
965 valid
= yield i_out
.valid
968 valid
= yield i_out
.valid
971 insn
= yield i_out
.insn
972 nia
= yield i_out
.nia
973 assert insn
== 0x00000001, \
974 "insn @%x=%x expected 00000001" % (nia
, insn
)
980 yield i_in
.nia
.eq(Const(0x0000000000000008, 64))
982 valid
= yield i_out
.valid
985 valid
= yield i_out
.valid
988 nia
= yield i_out
.nia
989 insn
= yield i_out
.insn
991 assert insn
== 0x00000002, \
992 "insn @%x=%x expected 00000002" % (nia
, insn
)
996 yield i_in
.nia
.eq(Const(0x0000000000000040, 64))
998 valid
= yield i_out
.valid
1001 valid
= yield i_out
.valid
1002 yield i_in
.req
.eq(0)
1004 nia
= yield i_in
.nia
1005 insn
= yield i_out
.insn
1006 assert insn
== 0x00000010, \
1007 "insn @%x=%x expected 00000010" % (nia
, insn
)
1009 # test something that aliases (this only works because
1010 # the unit test SRAM is a depth of 512)
1011 yield i_in
.req
.eq(1)
1012 yield i_in
.nia
.eq(Const(0x0000000000000100, 64))
1015 valid
= yield i_out
.valid
1020 insn
= yield i_out
.insn
1021 valid
= yield i_out
.valid
1022 insn
= yield i_out
.insn
1024 assert insn
== 0x00000040, \
1025 "insn @%x=%x expected 00000040" % (nia
, insn
)
1026 yield i_in
.req
.eq(0)
1029 def test_icache(mem
):
1030 from soc
.config
.test
.test_loadstore
import TestMemPspec
1031 pspec
= TestMemPspec(addr_wid
=32,
1038 memory
= Memory(width
=64, depth
=512, init
=mem
)
1039 sram
= SRAM(memory
=memory
, granularity
=8)
1043 m
.submodules
.icache
= dut
1044 m
.submodules
.sram
= sram
1046 m
.d
.comb
+= sram
.bus
.cyc
.eq(dut
.bus
.cyc
)
1047 m
.d
.comb
+= sram
.bus
.stb
.eq(dut
.bus
.stb
)
1048 m
.d
.comb
+= sram
.bus
.we
.eq(dut
.bus
.we
)
1049 m
.d
.comb
+= sram
.bus
.sel
.eq(dut
.bus
.sel
)
1050 m
.d
.comb
+= sram
.bus
.adr
.eq(dut
.bus
.adr
)
1051 m
.d
.comb
+= sram
.bus
.dat_w
.eq(dut
.bus
.dat_w
)
1053 m
.d
.comb
+= dut
.bus
.ack
.eq(sram
.bus
.ack
)
1054 m
.d
.comb
+= dut
.bus
.dat_r
.eq(sram
.bus
.dat_r
)
1060 sim
.add_sync_process(wrap(icache_sim(dut
)))
1061 with sim
.write_vcd('test_icache.vcd'):
1065 if __name__
== '__main__':
1066 from soc
.config
.test
.test_loadstore
import TestMemPspec
1067 pspec
= TestMemPspec(addr_wid
=64,
1073 vl
= rtlil
.convert(dut
, ports
=[])
1074 with
open("test_icache.il", "w") as f
:
1077 # set up memory every 32-bits with incrementing values 0 1 2 ...
1079 for i
in range(512):
1080 mem
.append((i
*2) |
((i
*2+1)<<32))