3 based on Anton Blanchard microwatt icache.vhdl
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
22 from enum
import (Enum
, unique
)
23 from nmigen
import (Module
, Signal
, Elaboratable
, Cat
, Array
, Const
, Repl
,
25 from nmigen
.cli
import main
, rtlil
26 from nmutil
.iocontrol
import RecordObject
27 from nmigen
.utils
import log2_int
28 from nmutil
.util
import Display
30 #from nmutil.plru import PLRU
31 from soc
.experiment
.cache_ram
import CacheRam
32 from soc
.experiment
.plru
import PLRU
34 from soc
.experiment
.mem_types
import (Fetch1ToICacheType
,
38 from soc
.experiment
.wb_types
import (WB_ADDR_BITS
, WB_DATA_BITS
,
39 WB_SEL_BITS
, WBAddrType
, WBDataType
,
40 WBSelType
, WBMasterOut
, WBSlaveOut
,
43 from nmigen_soc
.wishbone
.bus
import Interface
46 from soc
.bus
.sram
import SRAM
47 from nmigen
import Memory
48 from nmutil
.util
import wrap
49 from nmigen
.cli
import main
, rtlil
51 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
52 # Also, check out the cxxsim nmigen branch, and latest yosys from git
53 from nmutil
.sim_tmp_alternative
import Simulator
, Settle
58 # BRAM organisation: We never access more than wishbone_data_bits
59 # at a time so to save resources we make the array only that wide,
60 # and use consecutive indices for to make a cache "line"
62 # ROW_SIZE is the width in bytes of the BRAM (based on WB, so 64-bits)
63 ROW_SIZE
= WB_DATA_BITS
// 8
64 # Number of lines in a set
68 # L1 ITLB number of entries (direct mapped)
70 # L1 ITLB log_2(page_size)
72 # Number of real address bits that we store
74 # Non-zero to enable log data collection
77 ROW_SIZE_BITS
= ROW_SIZE
* 8
78 # ROW_PER_LINE is the number of row (wishbone) transactions in a line
79 ROW_PER_LINE
= LINE_SIZE
// ROW_SIZE
80 # BRAM_ROWS is the number of rows in BRAM needed to represent the full icache
81 BRAM_ROWS
= NUM_LINES
* ROW_PER_LINE
82 # INSN_PER_ROW is the number of 32bit instructions per BRAM row
83 INSN_PER_ROW
= ROW_SIZE_BITS
// 32
85 # Bit fields counts in the address
87 # INSN_BITS is the number of bits to select an instruction in a row
88 INSN_BITS
= log2_int(INSN_PER_ROW
)
89 # ROW_BITS is the number of bits to select a row
90 ROW_BITS
= log2_int(BRAM_ROWS
)
91 # ROW_LINE_BITS is the number of bits to select a row within a line
92 ROW_LINE_BITS
= log2_int(ROW_PER_LINE
)
93 # LINE_OFF_BITS is the number of bits for the offset in a cache line
94 LINE_OFF_BITS
= log2_int(LINE_SIZE
)
95 # ROW_OFF_BITS is the number of bits for the offset in a row
96 ROW_OFF_BITS
= log2_int(ROW_SIZE
)
97 # INDEX_BITS is the number of bits to select a cache line
98 INDEX_BITS
= log2_int(NUM_LINES
)
99 # SET_SIZE_BITS is the log base 2 of the set size
100 SET_SIZE_BITS
= LINE_OFF_BITS
+ INDEX_BITS
101 # TAG_BITS is the number of bits of the tag part of the address
102 TAG_BITS
= REAL_ADDR_BITS
- SET_SIZE_BITS
103 # TAG_WIDTH is the width in bits of each way of the tag RAM
104 TAG_WIDTH
= TAG_BITS
+ 7 - ((TAG_BITS
+ 7) % 8)
106 # WAY_BITS is the number of bits to select a way
107 WAY_BITS
= log2_int(NUM_WAYS
)
108 TAG_RAM_WIDTH
= TAG_BITS
* NUM_WAYS
111 TLB_BITS
= log2_int(TLB_SIZE
)
112 TLB_EA_TAG_BITS
= 64 - (TLB_LG_PGSZ
+ TLB_BITS
)
115 print("BRAM_ROWS =", BRAM_ROWS
)
116 print("INDEX_BITS =", INDEX_BITS
)
117 print("INSN_BITS =", INSN_BITS
)
118 print("INSN_PER_ROW =", INSN_PER_ROW
)
119 print("LINE_SIZE =", LINE_SIZE
)
120 print("LINE_OFF_BITS =", LINE_OFF_BITS
)
121 print("LOG_LENGTH =", LOG_LENGTH
)
122 print("NUM_LINES =", NUM_LINES
)
123 print("NUM_WAYS =", NUM_WAYS
)
124 print("REAL_ADDR_BITS =", REAL_ADDR_BITS
)
125 print("ROW_BITS =", ROW_BITS
)
126 print("ROW_OFF_BITS =", ROW_OFF_BITS
)
127 print("ROW_LINE_BITS =", ROW_LINE_BITS
)
128 print("ROW_PER_LINE =", ROW_PER_LINE
)
129 print("ROW_SIZE =", ROW_SIZE
)
130 print("ROW_SIZE_BITS =", ROW_SIZE_BITS
)
131 print("SET_SIZE_BITS =", SET_SIZE_BITS
)
133 print("TAG_BITS =", TAG_BITS
)
134 print("TAG_RAM_WIDTH =", TAG_RAM_WIDTH
)
135 print("TAG_BITS =", TAG_BITS
)
136 print("TLB_BITS =", TLB_BITS
)
137 print("TLB_EA_TAG_BITS =", TLB_EA_TAG_BITS
)
138 print("TLB_LG_PGSZ =", TLB_LG_PGSZ
)
139 print("TLB_PTE_BITS =", TLB_PTE_BITS
)
140 print("TLB_SIZE =", TLB_SIZE
)
141 print("WAY_BITS =", WAY_BITS
)
143 # from microwatt/utils.vhdl
145 return n
!= 0 and (n
& (n
- 1)) == 0
147 assert LINE_SIZE
% ROW_SIZE
== 0
148 assert ispow2(LINE_SIZE
), "LINE_SIZE not power of 2"
149 assert ispow2(NUM_LINES
), "NUM_LINES not power of 2"
150 assert ispow2(ROW_PER_LINE
), "ROW_PER_LINE not power of 2"
151 assert ispow2(INSN_PER_ROW
), "INSN_PER_ROW not power of 2"
152 assert (ROW_BITS
== (INDEX_BITS
+ ROW_LINE_BITS
)), \
153 "geometry bits don't add up"
154 assert (LINE_OFF_BITS
== (ROW_OFF_BITS
+ ROW_LINE_BITS
)), \
155 "geometry bits don't add up"
156 assert (REAL_ADDR_BITS
== (TAG_BITS
+ INDEX_BITS
+ LINE_OFF_BITS
)), \
157 "geometry bits don't add up"
158 assert (REAL_ADDR_BITS
== (TAG_BITS
+ ROW_BITS
+ ROW_OFF_BITS
)), \
159 "geometry bits don't add up"
161 # Example of layout for 32 lines of 64 bytes:
163 # .. tag |index| line |
165 # .. | | | |00| zero (2)
166 # .. | | |-| | INSN_BITS (1)
167 # .. | |---| | ROW_LINE_BITS (3)
168 # .. | |--- - --| LINE_OFF_BITS (6)
169 # .. | |- --| ROW_OFF_BITS (3)
170 # .. |----- ---| | ROW_BITS (8)
171 # .. |-----| | INDEX_BITS (5)
172 # .. --------| | TAG_BITS (53)
174 # The cache data BRAM organized as described above for each way
175 #subtype cache_row_t is std_ulogic_vector(ROW_SIZE_BITS-1 downto 0);
177 # The cache tags LUTRAM has a row per set. Vivado is a pain and will
178 # not handle a clean (commented) definition of the cache tags as a 3d
179 # memory. For now, work around it by putting all the tags
181 return Array(Signal(TAG_RAM_WIDTH
, name
="cachetag_%d" %x) \
182 for x
in range(NUM_LINES
))
184 # The cache valid bits
185 def CacheValidBitsArray():
186 return Array(Signal(NUM_WAYS
, name
="cachevalid_%d" %x) \
187 for x
in range(NUM_LINES
))
189 def RowPerLineValidArray():
190 return Array(Signal(name
="rows_valid_%d" %x) \
191 for x
in range(ROW_PER_LINE
))
194 # TODO to be passed to nigmen as ram attributes
195 # attribute ram_style : string;
196 # attribute ram_style of cache_tags : signal is "distributed";
199 tlb_layout
= [('valid', 1),
200 ('tag', TLB_EA_TAG_BITS
),
201 ('pte', TLB_PTE_BITS
)
203 return Array(Record(tlb_layout
, name
="tlb%d" % x
) for x
in range(TLB_SIZE
))
205 # Cache RAM interface
207 return Array(Signal(ROW_SIZE_BITS
, name
="cache_out_%d" %x) \
208 for x
in range(NUM_WAYS
))
210 # PLRU output interface
212 return Array(Signal(WAY_BITS
, name
="plru_out_%d" %x) \
213 for x
in range(NUM_LINES
))
215 # Return the cache line index (tag index) for an address
217 return addr
[LINE_OFF_BITS
:SET_SIZE_BITS
]
219 # Return the cache row index (data memory) for an address
221 return addr
[ROW_OFF_BITS
:SET_SIZE_BITS
]
223 # Return the index of a row within a line
224 def get_row_of_line(row
):
225 return row
[:ROW_BITS
][:ROW_LINE_BITS
]
227 # Returns whether this is the last row of a line
228 def is_last_row_addr(addr
, last
):
229 return addr
[ROW_OFF_BITS
:LINE_OFF_BITS
] == last
231 # Returns whether this is the last row of a line
232 def is_last_row(row
, last
):
233 return get_row_of_line(row
) == last
235 # Return the next row in the current cache line. We use a dedicated
236 # function in order to limit the size of the generated adder to be
237 # only the bits within a cache line (3 bits with default settings)
239 row_v
= row
[0:ROW_LINE_BITS
] + 1
240 return Cat(row_v
[:ROW_LINE_BITS
], row
[ROW_LINE_BITS
:])
242 # Read the instruction word for the given address
243 # in the current cache row
244 def read_insn_word(addr
, data
):
245 word
= addr
[2:INSN_BITS
+2]
246 return data
.word_select(word
, 32)
248 # Get the tag value from the address
250 return addr
[SET_SIZE_BITS
:REAL_ADDR_BITS
]
252 # Read a tag from a tag memory row
253 def read_tag(way
, tagset
):
254 return tagset
.word_select(way
, TAG_WIDTH
)[:TAG_BITS
]
256 # Write a tag to tag memory row
257 def write_tag(way
, tagset
, tag
):
258 return read_tag(way
, tagset
).eq(tag
)
260 # Simple hash for direct-mapped TLB index
262 hsh
= addr
[TLB_LG_PGSZ
:TLB_LG_PGSZ
+ TLB_BITS
] ^ addr
[
263 TLB_LG_PGSZ
+ TLB_BITS
:TLB_LG_PGSZ
+ 2 * TLB_BITS
265 TLB_LG_PGSZ
+ 2 * TLB_BITS
:TLB_LG_PGSZ
+ 3 * TLB_BITS
270 # Cache reload state machine
278 class RegInternal(RecordObject
):
281 # Cache hit state (Latches for 1 cycle BRAM access)
282 self
.hit_way
= Signal(WAY_BITS
)
283 self
.hit_nia
= Signal(64)
284 self
.hit_smark
= Signal()
285 self
.hit_valid
= Signal()
287 # Cache miss state (reload state machine)
288 self
.state
= Signal(State
, reset
=State
.IDLE
)
289 self
.wb
= WBMasterOut("wb")
290 self
.req_adr
= Signal(64)
291 self
.store_way
= Signal(WAY_BITS
)
292 self
.store_index
= Signal(INDEX_BITS
)
293 self
.store_row
= Signal(ROW_BITS
)
294 self
.store_tag
= Signal(TAG_BITS
)
295 self
.store_valid
= Signal()
296 self
.end_row_ix
= Signal(ROW_LINE_BITS
)
297 self
.rows_valid
= RowPerLineValidArray()
300 self
.fetch_failed
= Signal()
303 class ICache(Elaboratable
):
304 """64 bit direct mapped icache. All instructions are 4B aligned."""
306 self
.i_in
= Fetch1ToICacheType(name
="i_in")
307 self
.i_out
= ICacheToDecode1Type(name
="i_out")
309 self
.m_in
= MMUToICacheType(name
="m_in")
311 self
.stall_in
= Signal()
312 self
.stall_out
= Signal()
313 self
.flush_in
= Signal()
314 self
.inval_in
= Signal()
316 # standard naming (wired to non-standard for compatibility)
317 self
.bus
= Interface(addr_width
=32,
324 self
.log_out
= Signal(54)
327 # Generate a cache RAM for each way
328 def rams(self
, m
, r
, cache_out_row
, use_previous
,
329 replace_way
, req_row
):
334 bus
, stall_in
= self
.bus
, self
.stall_in
336 for i
in range(NUM_WAYS
):
337 do_read
= Signal(name
="do_rd_%d" % i
)
338 do_write
= Signal(name
="do_wr_%d" % i
)
339 rd_addr
= Signal(ROW_BITS
)
340 wr_addr
= Signal(ROW_BITS
)
341 d_out
= Signal(ROW_SIZE_BITS
, name
="d_out_%d" % i
)
342 wr_sel
= Signal(ROW_SIZE
)
344 way
= CacheRam(ROW_BITS
, ROW_SIZE_BITS
, True, ram_num
=i
)
345 setattr(m
.submodules
, "cacheram_%d" % i
, way
)
347 comb
+= way
.rd_en
.eq(do_read
)
348 comb
+= way
.rd_addr
.eq(rd_addr
)
349 comb
+= d_out
.eq(way
.rd_data_o
)
350 comb
+= way
.wr_sel
.eq(wr_sel
)
351 comb
+= way
.wr_addr
.eq(wr_addr
)
352 comb
+= way
.wr_data
.eq(bus
.dat_r
)
354 comb
+= do_read
.eq(~
(stall_in | use_previous
))
355 comb
+= do_write
.eq(bus
.ack
& (replace_way
== i
))
358 sync
+= Display("cache write adr: %x data: %lx",
359 wr_addr
, way
.wr_data
)
361 with m
.If(r
.hit_way
== i
):
362 comb
+= cache_out_row
.eq(d_out
)
364 sync
+= Display("cache read adr: %x data: %x",
367 comb
+= rd_addr
.eq(req_row
)
368 comb
+= wr_addr
.eq(r
.store_row
)
369 comb
+= wr_sel
.eq(Repl(do_write
, ROW_SIZE
))
372 def maybe_plrus(self
, m
, r
, plru_victim
):
375 with m
.If(NUM_WAYS
> 1):
376 for i
in range(NUM_LINES
):
377 plru_acc_i
= Signal(WAY_BITS
)
378 plru_acc_en
= Signal()
379 plru
= PLRU(WAY_BITS
)
380 setattr(m
.submodules
, "plru_%d" % i
, plru
)
382 comb
+= plru
.acc_i
.eq(plru_acc_i
)
383 comb
+= plru
.acc_en
.eq(plru_acc_en
)
386 with m
.If(get_index(r
.hit_nia
) == i
):
387 comb
+= plru
.acc_en
.eq(r
.hit_valid
)
389 comb
+= plru
.acc_i
.eq(r
.hit_way
)
390 comb
+= plru_victim
[i
].eq(plru
.lru_o
)
392 # TLB hit detection and real address generation
393 def itlb_lookup(self
, m
, tlb_req_index
, itlb
,
394 real_addr
, ra_valid
, eaa_priv
,
395 priv_fault
, access_ok
):
401 pte
= Signal(TLB_PTE_BITS
)
402 ttag
= Signal(TLB_EA_TAG_BITS
)
404 comb
+= tlb_req_index
.eq(hash_ea(i_in
.nia
))
405 comb
+= pte
.eq(itlb
[tlb_req_index
].pte
)
406 comb
+= ttag
.eq(itlb
[tlb_req_index
].tag
)
408 with m
.If(i_in
.virt_mode
):
409 comb
+= real_addr
.eq(Cat(
410 i_in
.nia
[:TLB_LG_PGSZ
],
411 pte
[TLB_LG_PGSZ
:REAL_ADDR_BITS
]
414 with m
.If(ttag
== i_in
.nia
[TLB_LG_PGSZ
+ TLB_BITS
:64]):
415 comb
+= ra_valid
.eq(itlb
[tlb_req_index
].valid
)
417 comb
+= eaa_priv
.eq(pte
[3])
420 comb
+= real_addr
.eq(i_in
.nia
[:REAL_ADDR_BITS
])
421 comb
+= ra_valid
.eq(1)
422 comb
+= eaa_priv
.eq(1)
424 # No IAMR, so no KUEP support for now
425 comb
+= priv_fault
.eq(eaa_priv
& ~i_in
.priv_mode
)
426 comb
+= access_ok
.eq(ra_valid
& ~priv_fault
)
429 def itlb_update(self
, m
, itlb
):
435 wr_index
= Signal(TLB_SIZE
)
436 comb
+= wr_index
.eq(hash_ea(m_in
.addr
))
438 with m
.If(m_in
.tlbie
& m_in
.doall
):
439 # Clear all valid bits
440 for i
in range(TLB_SIZE
):
441 sync
+= itlb
[i
].valid
.eq(0)
443 with m
.Elif(m_in
.tlbie
):
444 # Clear entry regardless of hit or miss
445 sync
+= itlb
[wr_index
].valid
.eq(0)
447 with m
.Elif(m_in
.tlbld
):
448 sync
+= itlb
[wr_index
].tag
.eq(m_in
.addr
[TLB_LG_PGSZ
+ TLB_BITS
:64])
449 sync
+= itlb
[wr_index
].pte
.eq(m_in
.pte
)
450 sync
+= itlb
[wr_index
].valid
.eq(1)
452 # Cache hit detection, output to fetch2 and other misc logic
453 def icache_comb(self
, m
, use_previous
, r
, req_index
, req_row
,
454 req_hit_way
, req_tag
, real_addr
, req_laddr
,
455 cache_valid_bits
, cache_tags
, access_ok
,
456 req_is_hit
, req_is_miss
, replace_way
,
457 plru_victim
, cache_out_row
):
461 i_in
, i_out
, bus
= self
.i_in
, self
.i_out
, self
.bus
462 flush_in
, stall_out
= self
.flush_in
, self
.stall_out
465 hit_way
= Signal(WAY_BITS
)
467 # i_in.sequential means that i_in.nia this cycle is 4 more than
468 # last cycle. If we read more than 32 bits at a time, had a
469 # cache hit last cycle, and we don't want the first 32-bit chunk
470 # then we can keep the data we read last cycle and just use that.
471 with m
.If(i_in
.nia
[2:INSN_BITS
+2] != 0):
472 comb
+= use_previous
.eq(i_in
.sequential
& r
.hit_valid
)
474 # Extract line, row and tag from request
475 comb
+= req_index
.eq(get_index(i_in
.nia
))
476 comb
+= req_row
.eq(get_row(i_in
.nia
))
477 comb
+= req_tag
.eq(get_tag(real_addr
))
479 # Calculate address of beginning of cache row, will be
480 # used for cache miss processing if needed
481 comb
+= req_laddr
.eq(Cat(
482 Const(0, ROW_OFF_BITS
),
483 real_addr
[ROW_OFF_BITS
:REAL_ADDR_BITS
],
486 # Test if pending request is a hit on any way
488 comb
+= hitcond
.eq((r
.state
== State
.WAIT_ACK
)
489 & (req_index
== r
.store_index
)
490 & r
.rows_valid
[req_row
% ROW_PER_LINE
]
493 cvb
= Signal(NUM_WAYS
)
494 ctag
= Signal(TAG_RAM_WIDTH
)
495 comb
+= ctag
.eq(cache_tags
[req_index
])
496 comb
+= cvb
.eq(cache_valid_bits
[req_index
])
497 for i
in range(NUM_WAYS
):
498 tagi
= Signal(TAG_BITS
, name
="tag_i%d" % i
)
499 comb
+= tagi
.eq(read_tag(i
, ctag
))
500 hit_test
= Signal(name
="hit_test%d" % i
)
501 comb
+= hit_test
.eq(i
== r
.store_way
)
502 with m
.If((cvb
[i
] |
(hitcond
& hit_test
)) & (tagi
== req_tag
)):
503 comb
+= hit_way
.eq(i
)
506 # Generate the "hit" and "miss" signals
507 # for the synchronous blocks
508 with m
.If(i_in
.req
& access_ok
& ~flush_in
):
509 comb
+= req_is_hit
.eq(is_hit
)
510 comb
+= req_is_miss
.eq(~is_hit
)
512 comb
+= req_hit_way
.eq(hit_way
)
514 # The way to replace on a miss
515 with m
.If(r
.state
== State
.CLR_TAG
):
516 comb
+= replace_way
.eq(plru_victim
[r
.store_index
])
518 comb
+= replace_way
.eq(r
.store_way
)
520 # Output instruction from current cache row
522 # Note: This is a mild violation of our design principle of
523 # having pipeline stages output from a clean latch. In this
524 # case we output the result of a mux. The alternative would
525 # be output an entire row which I prefer not to do just yet
526 # as it would force fetch2 to know about some of the cache
527 # geometry information.
528 comb
+= i_out
.insn
.eq(read_insn_word(r
.hit_nia
, cache_out_row
))
529 comb
+= i_out
.valid
.eq(r
.hit_valid
)
530 comb
+= i_out
.nia
.eq(r
.hit_nia
)
531 comb
+= i_out
.stop_mark
.eq(r
.hit_smark
)
532 comb
+= i_out
.fetch_failed
.eq(r
.fetch_failed
)
534 # Stall fetch1 if we have a miss on cache or TLB
535 # or a protection fault
536 comb
+= stall_out
.eq(~
(is_hit
& access_ok
))
538 # Wishbone requests output (from the cache miss reload machine)
539 comb
+= bus
.we
.eq(r
.wb
.we
)
540 comb
+= bus
.adr
.eq(r
.wb
.adr
)
541 comb
+= bus
.sel
.eq(r
.wb
.sel
)
542 comb
+= bus
.stb
.eq(r
.wb
.stb
)
543 comb
+= bus
.dat_w
.eq(r
.wb
.dat
)
544 comb
+= bus
.cyc
.eq(r
.wb
.cyc
)
546 # Cache hit synchronous machine
547 def icache_hit(self
, m
, use_previous
, r
, req_is_hit
, req_hit_way
,
548 req_index
, req_tag
, real_addr
):
551 i_in
, stall_in
= self
.i_in
, self
.stall_in
552 flush_in
= self
.flush_in
554 # keep outputs to fetch2 unchanged on a stall
555 # except that flush or reset sets valid to 0
556 # If use_previous, keep the same data as last
557 # cycle and use the second half
558 with m
.If(stall_in | use_previous
):
560 sync
+= r
.hit_valid
.eq(0)
562 # On a hit, latch the request for the next cycle,
563 # when the BRAM data will be available on the
564 # cache_out output of the corresponding way
565 sync
+= r
.hit_valid
.eq(req_is_hit
)
567 with m
.If(req_is_hit
):
568 sync
+= r
.hit_way
.eq(req_hit_way
)
570 "cache hit nia:%x IR:%x SM:%x idx:%x tag:%x " \
571 "way:%x RA:%x", i_in
.nia
, i_in
.virt_mode
, \
572 i_in
.stop_mark
, req_index
, req_tag
, \
573 req_hit_way
, real_addr
)
575 with m
.If(~stall_in
):
576 # Send stop marks and NIA down regardless of validity
577 sync
+= r
.hit_smark
.eq(i_in
.stop_mark
)
578 sync
+= r
.hit_nia
.eq(i_in
.nia
)
580 def icache_miss_idle(self
, m
, r
, req_is_miss
, req_laddr
,
581 req_index
, req_tag
, replace_way
, real_addr
):
587 # Reset per-row valid flags, only used in WAIT_ACK
588 for i
in range(ROW_PER_LINE
):
589 sync
+= r
.rows_valid
[i
].eq(0)
591 # We need to read a cache line
592 with m
.If(req_is_miss
):
594 "cache miss nia:%x IR:%x SM:%x idx:%x "
595 " way:%x tag:%x RA:%x", i_in
.nia
,
596 i_in
.virt_mode
, i_in
.stop_mark
, req_index
,
597 replace_way
, req_tag
, real_addr
)
599 # Keep track of our index and way for subsequent stores
600 st_row
= Signal(ROW_BITS
)
601 comb
+= st_row
.eq(get_row(req_laddr
))
602 sync
+= r
.store_index
.eq(req_index
)
603 sync
+= r
.store_row
.eq(st_row
)
604 sync
+= r
.store_tag
.eq(req_tag
)
605 sync
+= r
.store_valid
.eq(1)
606 sync
+= r
.end_row_ix
.eq(get_row_of_line(st_row
) - 1)
608 # Prep for first wishbone read. We calculate the address
609 # of the start of the cache line and start the WB cycle.
610 sync
+= r
.req_adr
.eq(req_laddr
)
611 sync
+= r
.wb
.cyc
.eq(1)
612 sync
+= r
.wb
.stb
.eq(1)
614 # Track that we had one request sent
615 sync
+= r
.state
.eq(State
.CLR_TAG
)
617 def icache_miss_clr_tag(self
, m
, r
, replace_way
,
618 cache_valid_bits
, req_index
,
623 # Get victim way from plru
624 sync
+= r
.store_way
.eq(replace_way
)
626 # Force misses on that way while reloading that line
627 cv
= Signal(INDEX_BITS
)
628 comb
+= cv
.eq(cache_valid_bits
[req_index
])
629 comb
+= cv
.bit_select(replace_way
, 1).eq(0)
630 sync
+= cache_valid_bits
[req_index
].eq(cv
)
632 for i
in range(NUM_WAYS
):
633 with m
.If(i
== replace_way
):
634 comb
+= tagset
.eq(cache_tags
[r
.store_index
])
635 comb
+= write_tag(i
, tagset
, r
.store_tag
)
636 sync
+= cache_tags
[r
.store_index
].eq(tagset
)
638 sync
+= r
.state
.eq(State
.WAIT_ACK
)
640 def icache_miss_wait_ack(self
, m
, r
, replace_way
, inval_in
,
641 stbs_done
, cache_valid_bits
):
647 # Requests are all sent if stb is 0
649 comb
+= stbs_zero
.eq(r
.wb
.stb
== 0)
650 comb
+= stbs_done
.eq(stbs_zero
)
652 # If we are still sending requests, was one accepted?
653 with m
.If(~bus
.stall
& ~stbs_zero
):
654 # That was the last word? We are done sending.
655 # Clear stb and set stbs_done so we can handle
656 # an eventual last ack on the same cycle.
657 with m
.If(is_last_row_addr(r
.req_adr
, r
.end_row_ix
)):
659 "IS_LAST_ROW_ADDR r.wb.addr:%x " \
660 "r.end_row_ix:%x r.wb.stb:%x stbs_zero:%x " \
661 "stbs_done:%x", r
.wb
.adr
, r
.end_row_ix
,
662 r
.wb
.stb
, stbs_zero
, stbs_done
664 sync
+= r
.wb
.stb
.eq(0)
665 comb
+= stbs_done
.eq(1)
667 # Calculate the next row address
668 rarange
= Signal(LINE_OFF_BITS
- ROW_OFF_BITS
)
669 comb
+= rarange
.eq(r
.req_adr
[ROW_OFF_BITS
:LINE_OFF_BITS
] + 1)
670 sync
+= r
.req_adr
[ROW_OFF_BITS
:LINE_OFF_BITS
].eq(rarange
)
671 sync
+= Display("RARANGE r.req_adr:%x rarange:%x "
672 "stbs_zero:%x stbs_done:%x",
673 r
.req_adr
, rarange
, stbs_zero
, stbs_done
)
675 # Incoming acks processing
677 sync
+= Display("WB_IN_ACK data:%x stbs_zero:%x "
679 bus
.dat_r
, stbs_zero
, stbs_done
)
681 sync
+= r
.rows_valid
[r
.store_row
% ROW_PER_LINE
].eq(1)
683 # Check for completion
684 with m
.If(stbs_done
& is_last_row(r
.store_row
, r
.end_row_ix
)):
685 # Complete wishbone cycle
686 sync
+= r
.wb
.cyc
.eq(0)
687 # be nice, clear addr
688 sync
+= r
.req_adr
.eq(0)
690 # Cache line is now valid
691 cv
= Signal(INDEX_BITS
)
692 comb
+= cv
.eq(cache_valid_bits
[r
.store_index
])
693 comb
+= cv
.bit_select(replace_way
, 1).eq(
694 r
.store_valid
& ~inval_in
696 sync
+= cache_valid_bits
[r
.store_index
].eq(cv
)
698 sync
+= r
.state
.eq(State
.IDLE
)
700 # move on to next request in row
701 # Increment store row counter
702 sync
+= r
.store_row
.eq(next_row(r
.store_row
))
704 # Cache miss/reload synchronous machine
705 def icache_miss(self
, m
, cache_valid_bits
, r
, req_is_miss
,
706 req_index
, req_laddr
, req_tag
, replace_way
,
707 cache_tags
, access_ok
, real_addr
):
711 i_in
, bus
, m_in
= self
.i_in
, self
.bus
, self
.m_in
712 stall_in
, flush_in
= self
.stall_in
, self
.flush_in
713 inval_in
= self
.inval_in
715 tagset
= Signal(TAG_RAM_WIDTH
)
718 comb
+= r
.wb
.sel
.eq(-1)
719 comb
+= r
.wb
.adr
.eq(r
.req_adr
[3:])
721 # Process cache invalidations
723 for i
in range(NUM_LINES
):
724 sync
+= cache_valid_bits
[i
].eq(0)
725 sync
+= r
.store_valid
.eq(0)
728 with m
.Switch(r
.state
):
730 with m
.Case(State
.IDLE
):
731 self
.icache_miss_idle(m
, r
, req_is_miss
, req_laddr
,
732 req_index
, req_tag
, replace_way
,
735 with m
.Case(State
.CLR_TAG
, State
.WAIT_ACK
):
736 with m
.If(r
.state
== State
.CLR_TAG
):
737 self
.icache_miss_clr_tag(m
, r
, replace_way
,
738 cache_valid_bits
, req_index
,
741 self
.icache_miss_wait_ack(m
, r
, replace_way
, inval_in
,
742 stbs_done
, cache_valid_bits
)
744 # TLB miss and protection fault processing
745 with m
.If(flush_in | m_in
.tlbld
):
746 sync
+= r
.fetch_failed
.eq(0)
747 with m
.Elif(i_in
.req
& ~access_ok
& ~stall_in
):
748 sync
+= r
.fetch_failed
.eq(1)
750 # icache_log: if LOG_LENGTH > 0 generate
751 def icache_log(self
, m
, req_hit_way
, ra_valid
, access_ok
,
752 req_is_miss
, req_is_hit
, lway
, wstate
, r
):
756 bus
, i_out
= self
.bus
, self
.i_out
757 log_out
, stall_out
= self
.log_out
, self
.stall_out
759 # Output data to logger
760 for i
in range(LOG_LENGTH
):
761 log_data
= Signal(54)
762 lway
= Signal(WAY_BITS
)
765 sync
+= lway
.eq(req_hit_way
)
768 with m
.If(r
.state
!= State
.IDLE
):
771 sync
+= log_data
.eq(Cat(
772 ra_valid
, access_ok
, req_is_miss
, req_is_hit
,
773 lway
, wstate
, r
.hit_nia
[2:6], r
.fetch_failed
,
774 stall_out
, bus
.stall
, r
.wb
.cyc
, r
.wb
.stb
,
775 r
.real_addr
[3:6], bus
.ack
, i_out
.insn
, i_out
.valid
777 comb
+= log_out
.eq(log_data
)
779 def elaborate(self
, platform
):
784 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
785 cache_tags
= CacheTagArray()
786 cache_valid_bits
= CacheValidBitsArray()
791 # TODO to be passed to nmigen as ram attributes
792 # attribute ram_style of itlb_tags : signal is "distributed";
793 # attribute ram_style of itlb_ptes : signal is "distributed";
795 # Privilege bit from PTE EAA field
800 # Async signal on incoming request
801 req_index
= Signal(INDEX_BITS
)
802 req_row
= Signal(ROW_BITS
)
803 req_hit_way
= Signal(WAY_BITS
)
804 req_tag
= Signal(TAG_BITS
)
805 req_is_hit
= Signal()
806 req_is_miss
= Signal()
807 req_laddr
= Signal(64)
809 tlb_req_index
= Signal(TLB_SIZE
)
810 real_addr
= Signal(REAL_ADDR_BITS
)
812 priv_fault
= Signal()
814 use_previous
= Signal()
816 cache_out_row
= Signal(ROW_SIZE_BITS
)
818 plru_victim
= PLRUOut()
819 replace_way
= Signal(WAY_BITS
)
821 # fake-up the wishbone stall signal to comply with pipeline mode
822 # same thing is done in dcache.py
823 comb
+= self
.bus
.stall
.eq(self
.bus
.cyc
& ~self
.bus
.ack
)
825 # call sub-functions putting everything together,
826 # using shared signals established above
827 self
.rams(m
, r
, cache_out_row
, use_previous
, replace_way
, req_row
)
828 self
.maybe_plrus(m
, r
, plru_victim
)
829 self
.itlb_lookup(m
, tlb_req_index
, itlb
, real_addr
,
830 ra_valid
, eaa_priv
, priv_fault
,
832 self
.itlb_update(m
, itlb
)
833 self
.icache_comb(m
, use_previous
, r
, req_index
, req_row
, req_hit_way
,
834 req_tag
, real_addr
, req_laddr
, cache_valid_bits
,
835 cache_tags
, access_ok
, req_is_hit
, req_is_miss
,
836 replace_way
, plru_victim
, cache_out_row
)
837 self
.icache_hit(m
, use_previous
, r
, req_is_hit
, req_hit_way
,
838 req_index
, req_tag
, real_addr
)
839 self
.icache_miss(m
, cache_valid_bits
, r
, req_is_miss
, req_index
,
840 req_laddr
, req_tag
, replace_way
, cache_tags
,
841 access_ok
, real_addr
)
842 #self.icache_log(m, log_out, req_hit_way, ra_valid, access_ok,
843 # req_is_miss, req_is_hit, lway, wstate, r)
853 yield i_in
.priv_mode
.eq(1)
856 yield i_in
.stop_mark
.eq(0)
857 yield m_out
.tlbld
.eq(0)
858 yield m_out
.tlbie
.eq(0)
859 yield m_out
.addr
.eq(0)
860 yield m_out
.pte
.eq(0)
866 # miss, stalls for a bit
868 yield i_in
.nia
.eq(Const(0x0000000000000004, 64))
870 valid
= yield i_out
.valid
873 valid
= yield i_out
.valid
876 insn
= yield i_out
.insn
877 nia
= yield i_out
.nia
878 assert insn
== 0x00000001, \
879 "insn @%x=%x expected 00000001" % (nia
, insn
)
885 yield i_in
.nia
.eq(Const(0x0000000000000008, 64))
887 valid
= yield i_out
.valid
890 valid
= yield i_out
.valid
893 nia
= yield i_out
.nia
894 insn
= yield i_out
.insn
896 assert insn
== 0x00000002, \
897 "insn @%x=%x expected 00000002" % (nia
, insn
)
901 yield i_in
.nia
.eq(Const(0x0000000000000040, 64))
903 valid
= yield i_out
.valid
906 valid
= yield i_out
.valid
910 insn
= yield i_out
.insn
911 assert insn
== 0x00000010, \
912 "insn @%x=%x expected 00000010" % (nia
, insn
)
914 # test something that aliases (this only works because
915 # the unit test SRAM is a depth of 512)
917 yield i_in
.nia
.eq(Const(0x0000000000000100, 64))
920 valid
= yield i_out
.valid
925 insn
= yield i_out
.insn
926 valid
= yield i_out
.valid
927 insn
= yield i_out
.insn
929 assert insn
== 0x00000040, \
930 "insn @%x=%x expected 00000040" % (nia
, insn
)
935 def test_icache(mem
):
938 memory
= Memory(width
=64, depth
=512, init
=mem
)
939 sram
= SRAM(memory
=memory
, granularity
=8)
943 m
.submodules
.icache
= dut
944 m
.submodules
.sram
= sram
946 m
.d
.comb
+= sram
.bus
.cyc
.eq(dut
.bus
.cyc
)
947 m
.d
.comb
+= sram
.bus
.stb
.eq(dut
.bus
.stb
)
948 m
.d
.comb
+= sram
.bus
.we
.eq(dut
.bus
.we
)
949 m
.d
.comb
+= sram
.bus
.sel
.eq(dut
.bus
.sel
)
950 m
.d
.comb
+= sram
.bus
.adr
.eq(dut
.bus
.adr
)
951 m
.d
.comb
+= sram
.bus
.dat_w
.eq(dut
.bus
.dat_w
)
953 m
.d
.comb
+= dut
.bus
.ack
.eq(sram
.bus
.ack
)
954 m
.d
.comb
+= dut
.bus
.dat_r
.eq(sram
.bus
.dat_r
)
960 sim
.add_sync_process(wrap(icache_sim(dut
)))
961 with sim
.write_vcd('test_icache.vcd'):
964 if __name__
== '__main__':
966 vl
= rtlil
.convert(dut
, ports
=[])
967 with
open("test_icache.il", "w") as f
:
970 # set up memory every 32-bits with incrementing values 0 1 2 ...
973 mem
.append((i
*2) |
((i
*2+1)<<32))