3 based on Anton Blanchard microwatt icache.vhdl
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
22 from enum
import Enum
, unique
23 from nmigen
import (Module
, Signal
, Elaboratable
, Cat
, Array
, Const
)
24 from nmigen
.cli
import main
25 from nmigen
.cli
import rtlil
26 from nmutil
.iocontrol
import RecordObject
27 from nmutil
.byterev
import byte_reverse
28 from nmutil
.mask
import Mask
29 from nmigen
.utils
import log2_int
30 from nmutil
.util
import Display
32 from soc
.experiment
.mem_types
import (Fetch1ToICacheType
,
36 from soc
.experiment
.wb_types
import (WB_ADDR_BITS
, WB_DATA_BITS
,
37 WB_SEL_BITS
, WBAddrType
, WBDataType
,
38 WBSelType
, WBMasterOut
, WBSlaveOut
,
39 WBMasterOutVector
, WBSlaveOutVector
,
40 WBIOMasterOut
, WBIOSlaveOut
)
42 from soc
.experiment
.cache_ram
import CacheRam
43 from soc
.experiment
.plru
import PLRU
46 from nmigen_soc
.wishbone
.sram
import SRAM
47 from nmigen
import Memory
48 from nmigen
.cli
import rtlil
50 from nmigen
.back
.pysim
import Simulator
, Delay
, Settle
52 from nmigen
.sim
.cxxsim
import Simulator
, Delay
, Settle
53 from nmutil
.util
import wrap
59 # BRAM organisation: We never access more than wishbone_data_bits
60 # at a time so to save resources we make the array only that wide,
61 # and use consecutive indices for to make a cache "line"
63 # ROW_SIZE is the width in bytes of the BRAM (based on WB, so 64-bits)
64 ROW_SIZE
= WB_DATA_BITS
// 8
65 # Number of lines in a set
69 # L1 ITLB number of entries (direct mapped)
71 # L1 ITLB log_2(page_size)
73 # Number of real address bits that we store
75 # Non-zero to enable log data collection
78 ROW_SIZE_BITS
= ROW_SIZE
* 8
79 # ROW_PER_LINE is the number of row
80 # (wishbone) transactions in a line
81 ROW_PER_LINE
= LINE_SIZE
// ROW_SIZE
82 # BRAM_ROWS is the number of rows in
83 # BRAM needed to represent the full icache
84 BRAM_ROWS
= NUM_LINES
* ROW_PER_LINE
85 # INSN_PER_ROW is the number of 32bit
86 # instructions per BRAM row
87 INSN_PER_ROW
= ROW_SIZE_BITS
// 32
89 # Bit fields counts in the address
91 # INSN_BITS is the number of bits to
92 # select an instruction in a row
93 INSN_BITS
= log2_int(INSN_PER_ROW
)
94 # ROW_BITS is the number of bits to
96 ROW_BITS
= log2_int(BRAM_ROWS
)
97 # ROW_LINEBITS is the number of bits to
98 # select a row within a line
99 ROW_LINE_BITS
= log2_int(ROW_PER_LINE
)
100 # LINE_OFF_BITS is the number of bits for
101 # the offset in a cache line
102 LINE_OFF_BITS
= log2_int(LINE_SIZE
)
103 # ROW_OFF_BITS is the number of bits for
104 # the offset in a row
105 ROW_OFF_BITS
= log2_int(ROW_SIZE
)
106 # INDEX_BITS is the number of bits to
107 # select a cache line
108 INDEX_BITS
= log2_int(NUM_LINES
)
109 # SET_SIZE_BITS is the log base 2 of
111 SET_SIZE_BITS
= LINE_OFF_BITS
+ INDEX_BITS
112 # TAG_BITS is the number of bits of
113 # the tag part of the address
114 TAG_BITS
= REAL_ADDR_BITS
- SET_SIZE_BITS
115 # WAY_BITS is the number of bits to
117 WAY_BITS
= log2_int(NUM_WAYS
)
118 TAG_RAM_WIDTH
= TAG_BITS
* NUM_WAYS
121 # constant TLB_BITS : natural := log2(TLB_SIZE);
122 # constant TLB_EA_TAG_BITS : natural := 64 - (TLB_LG_PGSZ + TLB_BITS);
123 # constant TLB_PTE_BITS : natural := 64;
124 TLB_BITS
= log2_int(TLB_SIZE
)
125 TLB_EA_TAG_BITS
= 64 - (TLB_LG_PGSZ
+ TLB_BITS
)
128 # architecture rtl of icache is
129 #constant ROW_SIZE_BITS : natural := ROW_SIZE*8;
130 #-- ROW_PER_LINE is the number of row (wishbone
131 #-- transactions) in a line
132 #constant ROW_PER_LINE : natural := LINE_SIZE / ROW_SIZE;
133 #-- BRAM_ROWS is the number of rows in BRAM
134 #-- needed to represent the full
136 #constant BRAM_ROWS : natural := NUM_LINES * ROW_PER_LINE;
137 #-- INSN_PER_ROW is the number of 32bit instructions per BRAM row
138 #constant INSN_PER_ROW : natural := ROW_SIZE_BITS / 32;
139 #-- Bit fields counts in the address
141 #-- INSN_BITS is the number of bits to select
142 #-- an instruction in a row
143 #constant INSN_BITS : natural := log2(INSN_PER_ROW);
144 #-- ROW_BITS is the number of bits to select a row
145 #constant ROW_BITS : natural := log2(BRAM_ROWS);
146 #-- ROW_LINEBITS is the number of bits to
147 #-- select a row within a line
148 #constant ROW_LINEBITS : natural := log2(ROW_PER_LINE);
149 #-- LINE_OFF_BITS is the number of bits for the offset
151 #constant LINE_OFF_BITS : natural := log2(LINE_SIZE);
152 #-- ROW_OFF_BITS is the number of bits for the offset in a row
153 #constant ROW_OFF_BITS : natural := log2(ROW_SIZE);
154 #-- INDEX_BITS is the number of bits to select a cache line
155 #constant INDEX_BITS : natural := log2(NUM_LINES);
156 #-- SET_SIZE_BITS is the log base 2 of the set size
157 #constant SET_SIZE_BITS : natural := LINE_OFF_BITS + INDEX_BITS;
158 #-- TAG_BITS is the number of bits of the tag part of the address
159 #constant TAG_BITS : natural := REAL_ADDR_BITS - SET_SIZE_BITS;
160 #-- WAY_BITS is the number of bits to select a way
161 #constant WAY_BITS : natural := log2(NUM_WAYS);
163 #-- Example of layout for 32 lines of 64 bytes:
165 #-- .. tag |index| line |
167 #-- .. | | | |00| zero (2)
168 #-- .. | | |-| | INSN_BITS (1)
169 #-- .. | |---| | ROW_LINEBITS (3)
170 #-- .. | |--- - --| LINE_OFF_BITS (6)
171 #-- .. | |- --| ROW_OFF_BITS (3)
172 #-- .. |----- ---| | ROW_BITS (8)
173 #-- .. |-----| | INDEX_BITS (5)
174 #-- .. --------| | TAG_BITS (53)
175 # Example of layout for 32 lines of 64 bytes:
177 # .. tag |index| line |
179 # .. | | | |00| zero (2)
180 # .. | | |-| | INSN_BITS (1)
181 # .. | |---| | ROW_LINEBITS (3)
182 # .. | |--- - --| LINE_OFF_BITS (6)
183 # .. | |- --| ROW_OFF_BITS (3)
184 # .. |----- ---| | ROW_BITS (8)
185 # .. |-----| | INDEX_BITS (5)
186 # .. --------| | TAG_BITS (53)
188 #subtype row_t is integer range 0 to BRAM_ROWS-1;
189 #subtype index_t is integer range 0 to NUM_LINES-1;
190 #subtype way_t is integer range 0 to NUM_WAYS-1;
191 #subtype row_in_line_t is unsigned(ROW_LINEBITS-1 downto 0);
193 #-- The cache data BRAM organized as described above for each way
194 #subtype cache_row_t is std_ulogic_vector(ROW_SIZE_BITS-1 downto 0);
196 #-- The cache tags LUTRAM has a row per set. Vivado is a pain and will
197 #-- not handle a clean (commented) definition of the cache tags as a 3d
198 #-- memory. For now, work around it by putting all the tags
199 #subtype cache_tag_t is std_logic_vector(TAG_BITS-1 downto 0);
200 # type cache_tags_set_t is array(way_t) of cache_tag_t;
201 # type cache_tags_array_t is array(index_t) of cache_tags_set_t;
202 #constant TAG_RAM_WIDTH : natural := TAG_BITS * NUM_WAYS;
203 #subtype cache_tags_set_t is std_logic_vector(TAG_RAM_WIDTH-1 downto 0);
204 #type cache_tags_array_t is array(index_t) of cache_tags_set_t;
206 return Array(Signal(TAG_RAM_WIDTH
) for x
in range(NUM_LINES
))
208 #-- The cache valid bits
209 #subtype cache_way_valids_t is std_ulogic_vector(NUM_WAYS-1 downto 0);
210 #type cache_valids_t is array(index_t) of cache_way_valids_t;
211 #type row_per_line_valid_t is array(0 to ROW_PER_LINE - 1) of std_ulogic;
212 def CacheValidBitsArray():
213 return Array(Signal(NUM_WAYS
) for x
in range(NUM_LINES
))
215 def RowPerLineValidArray():
216 return Array(Signal() for x
in range(ROW_PER_LINE
))
219 #attribute ram_style : string;
220 #attribute ram_style of cache_tags : signal is "distributed";
221 # TODO to be passed to nigmen as ram attributes
222 # attribute ram_style : string;
223 # attribute ram_style of cache_tags : signal is "distributed";
226 #subtype tlb_index_t is integer range 0 to TLB_SIZE - 1;
227 #type tlb_valids_t is array(tlb_index_t) of std_ulogic;
228 #subtype tlb_tag_t is std_ulogic_vector(TLB_EA_TAG_BITS - 1 downto 0);
229 #type tlb_tags_t is array(tlb_index_t) of tlb_tag_t;
230 #subtype tlb_pte_t is std_ulogic_vector(TLB_PTE_BITS - 1 downto 0);
231 #type tlb_ptes_t is array(tlb_index_t) of tlb_pte_t;
232 def TLBValidBitsArray():
233 return Array(Signal() for x
in range(TLB_SIZE
))
236 return Array(Signal(TLB_EA_TAG_BITS
) for x
in range(TLB_SIZE
))
239 return Array(Signal(TLB_PTE_BITS
) for x
in range(TLB_SIZE
))
242 #-- Cache RAM interface
243 #type cache_ram_out_t is array(way_t) of cache_row_t;
244 # Cache RAM interface
246 return Array(Signal(ROW_SIZE_BITS
) for x
in range(NUM_WAYS
))
248 #-- PLRU output interface
249 #type plru_out_t is array(index_t) of
250 # std_ulogic_vector(WAY_BITS-1 downto 0);
251 # PLRU output interface
253 return Array(Signal(WAY_BITS
) for x
in range(NUM_LINES
))
255 # -- Return the cache line index (tag index) for an address
256 # function get_index(addr: std_ulogic_vector(63 downto 0))
259 # return to_integer(unsigned(
260 # addr(SET_SIZE_BITS - 1 downto LINE_OFF_BITS)
263 # Return the cache line index (tag index) for an address
265 return addr
[LINE_OFF_BITS
:SET_SIZE_BITS
]
267 # -- Return the cache row index (data memory) for an address
268 # function get_row(addr: std_ulogic_vector(63 downto 0))
271 # return to_integer(unsigned(
272 # addr(SET_SIZE_BITS - 1 downto ROW_OFF_BITS)
275 # Return the cache row index (data memory) for an address
277 return addr
[ROW_OFF_BITS
:SET_SIZE_BITS
]
279 # -- Return the index of a row within a line
280 # function get_row_of_line(row: row_t) return row_in_line_t is
281 # variable row_v : unsigned(ROW_BITS-1 downto 0);
283 # row_v := to_unsigned(row, ROW_BITS);
284 # return row_v(ROW_LINEBITS-1 downto 0);
286 # Return the index of a row within a line
287 def get_row_of_line(row
):
290 # -- Returns whether this is the last row of a line
291 # function is_last_row_addr(addr: wishbone_addr_type;
292 # last: row_in_line_t
297 # addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS)
300 # Returns whether this is the last row of a line
301 def is_last_row_addr(addr
, last
):
302 return addr
[ROW_OFF_BITS
:LINE_OFF_BITS
] == last
304 # -- Returns whether this is the last row of a line
305 # function is_last_row(row: row_t;
306 # last: row_in_line_t) return boolean is
308 # return get_row_of_line(row) = last;
310 # Returns whether this is the last row of a line
311 def is_last_row(row
, last
):
312 return get_row_of_line(row
) == last
314 # -- Return the address of the next row in the current cache line
315 # function next_row_addr(addr: wishbone_addr_type)
316 # return std_ulogic_vector is
317 # variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
318 # variable result : wishbone_addr_type;
320 # -- Is there no simpler way in VHDL to generate that 3 bits adder ?
321 # row_idx := addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS);
322 # row_idx := std_ulogic_vector(unsigned(row_idx) + 1);
324 # result(LINE_OFF_BITS-1 downto ROW_OFF_BITS) := row_idx;
327 # Return the address of the next row in the current cache line
328 def next_row_addr(addr
):
329 # TODO no idea what's going on here, looks like double assignments
330 # overriding earlier assignments ??? Help please!
333 # -- Return the next row in the current cache line. We use a dedicated
334 # -- function in order to limit the size of the generated adder to be
335 # -- only the bits within a cache line (3 bits with default settings)
336 # function next_row(row: row_t) return row_t is
337 # variable row_v : std_ulogic_vector(ROW_BITS-1 downto 0);
338 # variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
339 # variable result : std_ulogic_vector(ROW_BITS-1 downto 0);
341 # row_v := std_ulogic_vector(to_unsigned(row, ROW_BITS));
342 # row_idx := row_v(ROW_LINEBITS-1 downto 0);
343 # row_v(ROW_LINEBITS-1 downto 0) :=
344 # std_ulogic_vector(unsigned(row_idx) + 1);
345 # return to_integer(unsigned(row_v));
347 # Return the next row in the current cache line. We use a dedicated
348 # function in order to limit the size of the generated adder to be
349 # only the bits within a cache line (3 bits with default settings)
351 # TODO no idea what's going on here, looks like double assignments
352 # overriding earlier assignments ??? Help please!
355 # -- Read the instruction word for the given address in the
356 # -- current cache row
357 # function read_insn_word(addr: std_ulogic_vector(63 downto 0);
358 # data: cache_row_t) return std_ulogic_vector is
359 # variable word: integer range 0 to INSN_PER_ROW-1;
361 # word := to_integer(unsigned(addr(INSN_BITS+2-1 downto 2)));
362 # return data(31+word*32 downto word*32);
364 # Read the instruction word for the given address
365 # in the current cache row
366 def read_insn_word(addr
, data
):
367 word
= addr
[2:INSN_BITS
+3]
368 return data
.word_select(word
, 32)
370 # -- Get the tag value from the address
372 # addr: std_ulogic_vector(REAL_ADDR_BITS - 1 downto 0)
374 # return cache_tag_t is
376 # return addr(REAL_ADDR_BITS - 1 downto SET_SIZE_BITS);
378 # Get the tag value from the address
380 return addr
[SET_SIZE_BITS
:REAL_ADDR_BITS
]
382 # -- Read a tag from a tag memory row
383 # function read_tag(way: way_t; tagset: cache_tags_set_t)
384 # return cache_tag_t is
386 # return tagset((way+1) * TAG_BITS - 1 downto way * TAG_BITS);
388 # Read a tag from a tag memory row
389 def read_tag(way
, tagset
):
390 return tagset
[way
* TAG_BITS
:(way
+ 1) * TAG_BITS
]
392 # -- Write a tag to tag memory row
393 # procedure write_tag(way: in way_t;
394 # tagset: inout cache_tags_set_t; tag: cache_tag_t) is
396 # tagset((way+1) * TAG_BITS - 1 downto way * TAG_BITS) := tag;
398 # Write a tag to tag memory row
399 def write_tag(way
, tagset
, tag
):
400 tagset
[way
* TAG_BITS
:(way
+ 1) * TAG_BITS
] = tag
402 # -- Simple hash for direct-mapped TLB index
403 # function hash_ea(addr: std_ulogic_vector(63 downto 0))
404 # return tlb_index_t is
405 # variable hash : std_ulogic_vector(TLB_BITS - 1 downto 0);
407 # hash := addr(TLB_LG_PGSZ + TLB_BITS - 1 downto TLB_LG_PGSZ)
409 # TLB_LG_PGSZ + 2 * TLB_BITS - 1 downto
410 # TLB_LG_PGSZ + TLB_BITS
413 # TLB_LG_PGSZ + 3 * TLB_BITS - 1 downto
414 # TLB_LG_PGSZ + 2 * TLB_BITS
416 # return to_integer(unsigned(hash));
418 # Simple hash for direct-mapped TLB index
420 hsh
= addr
[TLB_LG_PGSZ
:TLB_LG_PGSZ
+ TLB_BITS
] ^ addr
[
421 TLB_LG_PGSZ
+ TLB_BITS
:TLB_LG_PGSZ
+ 2 * TLB_BITS
423 TLB_LG_PGSZ
+ 2 * TLB_BITS
:TLB_LG_PGSZ
+ 3 * TLB_BITS
429 # assert LINE_SIZE mod ROW_SIZE = 0;
430 # assert ispow2(LINE_SIZE) report "LINE_SIZE not power of 2"
432 # assert ispow2(NUM_LINES) report "NUM_LINES not power of 2"
434 # assert ispow2(ROW_PER_LINE) report "ROW_PER_LINE not power of 2"
436 # assert ispow2(INSN_PER_ROW) report "INSN_PER_ROW not power of 2"
438 # assert (ROW_BITS = INDEX_BITS + ROW_LINEBITS)
439 # report "geometry bits don't add up" severity FAILURE;
440 # assert (LINE_OFF_BITS = ROW_OFF_BITS + ROW_LINEBITS)
441 # report "geometry bits don't add up" severity FAILURE;
442 # assert (REAL_ADDR_BITS = TAG_BITS + INDEX_BITS + LINE_OFF_BITS)
443 # report "geometry bits don't add up" severity FAILURE;
444 # assert (REAL_ADDR_BITS = TAG_BITS + ROW_BITS + ROW_OFF_BITS)
445 # report "geometry bits don't add up" severity FAILURE;
447 # sim_debug: if SIM generate
450 # report "ROW_SIZE = " & natural'image(ROW_SIZE);
451 # report "ROW_PER_LINE = " & natural'image(ROW_PER_LINE);
452 # report "BRAM_ROWS = " & natural'image(BRAM_ROWS);
453 # report "INSN_PER_ROW = " & natural'image(INSN_PER_ROW);
454 # report "INSN_BITS = " & natural'image(INSN_BITS);
455 # report "ROW_BITS = " & natural'image(ROW_BITS);
456 # report "ROW_LINEBITS = " & natural'image(ROW_LINEBITS);
457 # report "LINE_OFF_BITS = " & natural'image(LINE_OFF_BITS);
458 # report "ROW_OFF_BITS = " & natural'image(ROW_OFF_BITS);
459 # report "INDEX_BITS = " & natural'image(INDEX_BITS);
460 # report "TAG_BITS = " & natural'image(TAG_BITS);
461 # report "WAY_BITS = " & natural'image(WAY_BITS);
466 # Cache reload state machine
473 # type reg_internal_t is record
474 # -- Cache hit state (Latches for 1 cycle BRAM access)
476 # hit_nia : std_ulogic_vector(63 downto 0);
477 # hit_smark : std_ulogic;
478 # hit_valid : std_ulogic;
480 # -- Cache miss state (reload state machine)
482 # wb : wishbone_master_out;
484 # store_index : index_t;
486 # store_tag : cache_tag_t;
487 # store_valid : std_ulogic;
488 # end_row_ix : row_in_line_t;
489 # rows_valid : row_per_line_valid_t;
492 # fetch_failed : std_ulogic;
494 class RegInternal(RecordObject
):
497 # Cache hit state (Latches for 1 cycle BRAM access)
498 self
.hit_way
= Signal(NUM_WAYS
)
499 self
.hit_nia
= Signal(64)
500 self
.hit_smark
= Signal()
501 self
.hit_valid
= Signal()
503 # Cache miss state (reload state machine)
504 self
.state
= Signal(State
)
505 self
.wb
= WBMasterOut()
506 self
.store_way
= Signal(NUM_WAYS
)
507 self
.store_index
= Signal(NUM_LINES
)
508 self
.store_row
= Signal(BRAM_ROWS
)
509 self
.store_tag
= Signal(TAG_BITS
)
510 self
.store_valid
= Signal()
511 self
.end_row_ix
= Signal(ROW_LINE_BITS
)
512 self
.rows_valid
= RowPerLineValidArray()
515 self
.fetch_failed
= Signal()
517 # -- 64 bit direct mapped icache. All instructions are 4B aligned.
521 # SIM : boolean := false;
522 # -- Line size in bytes
523 # LINE_SIZE : positive := 64;
524 # -- BRAM organisation: We never access more
525 # -- than wishbone_data_bits
526 # -- at a time so to save resources we make the
527 # -- array only that wide,
528 # -- and use consecutive indices for to make a cache "line"
530 # -- ROW_SIZE is the width in bytes of the BRAM (based on WB,
532 # ROW_SIZE : positive := wishbone_data_bits / 8;
533 # -- Number of lines in a set
534 # NUM_LINES : positive := 32;
536 # NUM_WAYS : positive := 4;
537 # -- L1 ITLB number of entries (direct mapped)
538 # TLB_SIZE : positive := 64;
539 # -- L1 ITLB log_2(page_size)
540 # TLB_LG_PGSZ : positive := 12;
541 # -- Number of real address bits that we store
542 # REAL_ADDR_BITS : positive := 56;
543 # -- Non-zero to enable log data collection
544 # LOG_LENGTH : natural := 0
547 # clk : in std_ulogic;
548 # rst : in std_ulogic;
550 # i_in : in Fetch1ToIcacheType;
551 # i_out : out IcacheToDecode1Type;
553 # m_in : in MmuToIcacheType;
555 # stall_in : in std_ulogic;
556 # stall_out : out std_ulogic;
557 # flush_in : in std_ulogic;
558 # inval_in : in std_ulogic;
560 # wishbone_out : out wishbone_master_out;
561 # wishbone_in : in wishbone_slave_out;
563 # log_out : out std_ulogic_vector(53 downto 0)
566 # 64 bit direct mapped icache. All instructions are 4B aligned.
567 class ICache(Elaboratable
):
568 """64 bit direct mapped icache. All instructions are 4B aligned."""
570 self
.i_in
= Fetch1ToICacheType()
571 self
.i_out
= ICacheToDecode1Type()
573 self
.m_in
= MMUToICacheType()
575 self
.stall_in
= Signal()
576 self
.stall_out
= Signal()
577 self
.flush_in
= Signal()
578 self
.inval_in
= Signal()
580 self
.wb_out
= WBMasterOut()
581 self
.wb_in
= WBSlaveOut()
583 self
.log_out
= Signal(54)
586 # -- Generate a cache RAM for each way
587 # rams: for i in 0 to NUM_WAYS-1 generate
588 # signal do_read : std_ulogic;
589 # signal do_write : std_ulogic;
590 # signal rd_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
591 # signal wr_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
592 # signal dout : cache_row_t;
593 # signal wr_sel : std_ulogic_vector(ROW_SIZE-1 downto 0);
595 # way: entity work.cache_ram
597 # ROW_BITS => ROW_BITS,
598 # WIDTH => ROW_SIZE_BITS
603 # rd_addr => rd_addr,
606 # wr_addr => wr_addr,
607 # wr_data => wishbone_in.dat
611 # do_read <= not (stall_in or use_previous);
613 # if wishbone_in.ack = '1' and replace_way = i then
616 # cache_out(i) <= dout;
618 # std_ulogic_vector(to_unsigned(req_row, ROW_BITS));
620 # std_ulogic_vector(to_unsigned(r.store_row, ROW_BITS));
621 # for i in 0 to ROW_SIZE-1 loop
622 # wr_sel(i) <= do_write;
626 def rams(self
, m
, r
, cache_out
, use_previous
, replace_way
, req_row
):
629 wb_in
, stall_in
= self
.wb_in
, self
.stall_in
633 rd_addr
= Signal(ROW_BITS
)
634 wr_addr
= Signal(ROW_BITS
)
635 _d_out
= Signal(ROW_SIZE_BITS
)
636 wr_sel
= Signal(ROW_SIZE
)
638 for i
in range(NUM_WAYS
):
639 way
= CacheRam(ROW_BITS
, ROW_SIZE_BITS
)
640 comb
+= way
.rd_en
.eq(do_read
)
641 comb
+= way
.rd_addr
.eq(rd_addr
)
642 comb
+= way
.rd_data_o
.eq(_d_out
)
643 comb
+= way
.wr_sel
.eq(wr_sel
)
644 comb
+= way
.wr_addr
.eq(wr_addr
)
645 comb
+= way
.wr_data
.eq(wb_in
.dat
)
647 comb
+= do_read
.eq(~
(stall_in | use_previous
))
648 comb
+= do_write
.eq(0)
650 with m
.If(wb_in
.ack
& (replace_way
== i
)):
651 comb
+= do_write
.eq(1)
653 comb
+= cache_out
[i
].eq(_d_out
)
654 comb
+= rd_addr
.eq(req_row
)
655 comb
+= wr_addr
.eq(r
.store_row
)
656 for j
in range(ROW_SIZE
):
657 comb
+= wr_sel
[j
].eq(do_write
)
660 # maybe_plrus: if NUM_WAYS > 1 generate
662 # plrus: for i in 0 to NUM_LINES-1 generate
664 # signal plru_acc : std_ulogic_vector(WAY_BITS-1 downto 0);
665 # signal plru_acc_en : std_ulogic;
666 # signal plru_out : std_ulogic_vector(WAY_BITS-1 downto 0);
669 # plru : entity work.plru
677 # acc_en => plru_acc_en,
684 # if get_index(r.hit_nia) = i then
685 # plru_acc_en <= r.hit_valid;
687 # plru_acc_en <= '0';
690 # std_ulogic_vector(to_unsigned(r.hit_way, WAY_BITS));
691 # plru_victim(i) <= plru_out;
695 def maybe_plrus(self
, m
, r
, plru_victim
):
698 with m
.If(NUM_WAYS
> 1):
699 for i
in range(NUM_LINES
):
700 plru_acc
= Signal(WAY_BITS
)
701 plru_acc_en
= Signal()
702 plru_out
= Signal(WAY_BITS
)
703 plru
= PLRU(WAY_BITS
)
704 comb
+= plru
.acc
.eq(plru_acc
)
705 comb
+= plru
.acc_en
.eq(plru_acc_en
)
706 comb
+= plru
.lru_o
.eq(plru_out
)
709 with m
.If(get_index(r
.hit_nia
) == i
):
710 comb
+= plru
.acc_en
.eq(r
.hit_valid
)
713 comb
+= plru
.acc_en
.eq(0)
715 comb
+= plru
.acc
.eq(r
.hit_way
)
716 comb
+= plru_victim
[i
].eq(plru
.lru_o
)
718 # -- TLB hit detection and real address generation
719 # itlb_lookup : process(all)
720 # variable pte : tlb_pte_t;
721 # variable ttag : tlb_tag_t;
723 # tlb_req_index <= hash_ea(i_in.nia);
724 # pte := itlb_ptes(tlb_req_index);
725 # ttag := itlb_tags(tlb_req_index);
726 # if i_in.virt_mode = '1' then
727 # real_addr <= pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ) &
728 # i_in.nia(TLB_LG_PGSZ - 1 downto 0);
729 # if ttag = i_in.nia(63 downto TLB_LG_PGSZ + TLB_BITS) then
730 # ra_valid <= itlb_valids(tlb_req_index);
734 # eaa_priv <= pte(3);
736 # real_addr <= i_in.nia(REAL_ADDR_BITS - 1 downto 0);
741 # -- no IAMR, so no KUEP support for now
742 # priv_fault <= eaa_priv and not i_in.priv_mode;
743 # access_ok <= ra_valid and not priv_fault;
745 # TLB hit detection and real address generation
746 def itlb_lookup(self
, m
, tlb_req_index
, itlb_ptes
, itlb_tags
,
747 real_addr
, itlb_valid_bits
, ra_valid
, eaa_priv
,
748 priv_fault
, access_ok
):
753 pte
= Signal(TLB_PTE_BITS
)
754 ttag
= Signal(TLB_EA_TAG_BITS
)
756 comb
+= tlb_req_index
.eq(hash_ea(i_in
.nia
))
757 comb
+= pte
.eq(itlb_ptes
[tlb_req_index
])
758 comb
+= ttag
.eq(itlb_tags
[tlb_req_index
])
760 with m
.If(i_in
.virt_mode
):
761 comb
+= real_addr
.eq(Cat(
762 i_in
.nia
[:TLB_LG_PGSZ
],
763 pte
[TLB_LG_PGSZ
:REAL_ADDR_BITS
]
766 with m
.If(ttag
== i_in
.nia
[TLB_LG_PGSZ
+ TLB_BITS
:64]):
767 comb
+= ra_valid
.eq(itlb_valid_bits
[tlb_req_index
])
770 comb
+= ra_valid
.eq(0)
773 comb
+= real_addr
.eq(i_in
.nia
[:REAL_ADDR_BITS
])
774 comb
+= ra_valid
.eq(1)
775 comb
+= eaa_priv
.eq(1)
777 # No IAMR, so no KUEP support for now
778 comb
+= priv_fault
.eq(eaa_priv
& ~i_in
.priv_mode
)
779 comb
+= access_ok
.eq(ra_valid
& ~priv_fault
)
782 # itlb_update: process(clk)
783 # variable wr_index : tlb_index_t;
785 # if rising_edge(clk) then
786 # wr_index := hash_ea(m_in.addr);
788 # (m_in.tlbie = '1' and m_in.doall = '1') then
789 # -- clear all valid bits
790 # for i in tlb_index_t loop
791 # itlb_valids(i) <= '0';
793 # elsif m_in.tlbie = '1' then
794 # -- clear entry regardless of hit or miss
795 # itlb_valids(wr_index) <= '0';
796 # elsif m_in.tlbld = '1' then
797 # itlb_tags(wr_index) <=
798 # m_in.addr(63 downto TLB_LG_PGSZ + TLB_BITS);
799 # itlb_ptes(wr_index) <= m_in.pte;
800 # itlb_valids(wr_index) <= '1';
805 def itlb_update(self
, m
, itlb_valid_bits
, itlb_tags
, itlb_ptes
):
811 wr_index
= Signal(TLB_SIZE
)
812 comb
+= wr_index
.eq(hash_ea(m_in
.addr
))
814 with m
.If(m_in
.tlbie
& m_in
.doall
):
815 # Clear all valid bits
816 for i
in range(TLB_SIZE
):
817 sync
+= itlb_valid_bits
[i
].eq(0)
819 with m
.Elif(m_in
.tlbie
):
820 # Clear entry regardless of hit or miss
821 sync
+= itlb_valid_bits
[wr_index
].eq(0)
823 with m
.Elif(m_in
.tlbld
):
824 sync
+= itlb_tags
[wr_index
].eq(
825 m_in
.addr
[TLB_LG_PGSZ
+ TLB_BITS
:64]
827 sync
+= itlb_ptes
[wr_index
].eq(m_in
.pte
)
828 sync
+= itlb_valid_bits
[wr_index
].eq(1)
830 # -- Cache hit detection, output to fetch2 and other misc logic
831 # icache_comb : process(all)
832 # Cache hit detection, output to fetch2 and other misc logic
833 def icache_comb(self
, m
, use_previous
, r
, req_index
, req_row
,
834 req_tag
, real_addr
, req_laddr
, cache_valid_bits
,
835 cache_tags
, access_ok
, req_is_hit
,
836 req_is_miss
, replace_way
, plru_victim
, cache_out
):
837 # variable is_hit : std_ulogic;
838 # variable hit_way : way_t;
841 i_in
, i_out
, wb_out
= self
.i_in
, self
.i_out
, self
.wb_out
842 flush_in
, stall_out
= self
.flush_in
, self
.stall_out
845 hit_way
= Signal(NUM_WAYS
)
847 # -- i_in.sequential means that i_in.nia this cycle
848 # -- is 4 more than last cycle. If we read more
849 # -- than 32 bits at a time, had a cache hit last
850 # -- cycle, and we don't want the first 32-bit chunk
851 # -- then we can keep the data we read last cycle
852 # -- and just use that.
853 # if unsigned(i_in.nia(INSN_BITS+2-1 downto 2)) /= 0 then
854 # use_previous <= i_in.sequential and r.hit_valid;
856 # use_previous <= '0';
858 # i_in.sequential means that i_in.nia this cycle is 4 more than
859 # last cycle. If we read more than 32 bits at a time, had a
860 # cache hit last cycle, and we don't want the first 32-bit chunk
861 # then we can keep the data we read last cycle and just use that.
862 with m
.If(i_in
.nia
[2:INSN_BITS
+2] != 0):
863 comb
+= use_previous
.eq(i_in
.sequential
& r
.hit_valid
)
866 comb
+= use_previous
.eq(0)
868 # -- Extract line, row and tag from request
869 # req_index <= get_index(i_in.nia);
870 # req_row <= get_row(i_in.nia);
871 # req_tag <= get_tag(real_addr);
872 # Extract line, row and tag from request
873 comb
+= req_index
.eq(get_index(i_in
.nia
))
874 comb
+= req_row
.eq(get_row(i_in
.nia
))
875 comb
+= req_tag
.eq(get_tag(real_addr
))
877 # -- Calculate address of beginning of cache row, will be
878 # -- used for cache miss processing if needed
880 # (63 downto REAL_ADDR_BITS => '0') &
881 # real_addr(REAL_ADDR_BITS - 1 downto ROW_OFF_BITS) &
882 # (ROW_OFF_BITS-1 downto 0 => '0');
883 # Calculate address of beginning of cache row, will be
884 # used for cache miss processing if needed
885 comb
+= req_laddr
.eq(Cat(
886 Const(0b0, ROW_OFF_BITS
),
887 real_addr
[ROW_OFF_BITS
:REAL_ADDR_BITS
],
888 Const(0, REAL_ADDR_BITS
)
891 # -- Test if pending request is a hit on any way
894 # for i in way_t loop
895 # if i_in.req = '1' and
896 # (cache_valids(req_index)(i) = '1' or
897 # (r.state = WAIT_ACK and
898 # req_index = r.store_index and
899 # i = r.store_way and
900 # r.rows_valid(req_row mod ROW_PER_LINE) = '1')) then
901 # if read_tag(i, cache_tags(req_index)) = req_tag then
907 # Test if pending request is a hit on any way
908 for i
in range(NUM_WAYS
):
910 (cache_valid_bits
[req_index
][i
] |
911 ((r
.state
== State
.WAIT_ACK
)
912 & (req_index
== r
.store_index
)
914 & r
.rows_valid
[req_row
% ROW_PER_LINE
]))):
915 with m
.If(read_tag(i
, cache_tags
[req_index
]) == req_tag
):
916 comb
+= hit_way
.eq(i
)
919 # -- Generate the "hit" and "miss" signals
920 # -- for the synchronous blocks
921 # if i_in.req = '1' and access_ok = '1' and flush_in = '0'
923 # req_is_hit <= is_hit;
924 # req_is_miss <= not is_hit;
927 # req_is_miss <= '0';
929 # req_hit_way <= hit_way;
930 # Generate the "hit" and "miss" signals
931 # for the synchronous blocks
932 with m
.If(i_in
.req
& access_ok
& ~flush_in
):
933 comb
+= req_is_hit
.eq(is_hit
)
934 comb
+= req_is_miss
.eq(~is_hit
)
937 comb
+= req_is_hit
.eq(0)
938 comb
+= req_is_miss
.eq(0)
940 # -- The way to replace on a miss
941 # if r.state = CLR_TAG then
943 # to_integer(unsigned(plru_victim(r.store_index)));
945 # replace_way <= r.store_way;
947 # The way to replace on a miss
948 with m
.If(r
.state
== State
.CLR_TAG
):
949 comb
+= replace_way
.eq(plru_victim
[r
.store_index
])
952 comb
+= replace_way
.eq(r
.store_way
)
954 # -- Output instruction from current cache row
956 # -- Note: This is a mild violation of our design principle of
957 # -- having pipeline stages output from a clean latch. In this
958 # -- case we output the result of a mux. The alternative would
959 # -- be output an entire row which I prefer not to do just yet
960 # -- as it would force fetch2 to know about some of the cache
961 # -- geometry information.
962 # i_out.insn <= read_insn_word(r.hit_nia, cache_out(r.hit_way));
963 # i_out.valid <= r.hit_valid;
964 # i_out.nia <= r.hit_nia;
965 # i_out.stop_mark <= r.hit_smark;
966 # i_out.fetch_failed <= r.fetch_failed;
967 # Output instruction from current cache row
969 # Note: This is a mild violation of our design principle of
970 # having pipeline stages output from a clean latch. In this
971 # case we output the result of a mux. The alternative would
972 # be output an entire row which I prefer not to do just yet
973 # as it would force fetch2 to know about some of the cache
974 # geometry information.
975 comb
+= i_out
.insn
.eq(
976 read_insn_word(r
.hit_nia
, cache_out
[r
.hit_way
])
978 comb
+= i_out
.valid
.eq(r
.hit_valid
)
979 comb
+= i_out
.nia
.eq(r
.hit_nia
)
980 comb
+= i_out
.stop_mark
.eq(r
.hit_smark
)
981 comb
+= i_out
.fetch_failed
.eq(r
.fetch_failed
)
983 # -- Stall fetch1 if we have a miss on cache or TLB
984 # -- or a protection fault
985 # stall_out <= not (is_hit and access_ok);
986 # Stall fetch1 if we have a miss on cache or TLB
987 # or a protection fault
988 comb
+= stall_out
.eq(~
(is_hit
& access_ok
))
990 # -- Wishbone requests output (from the cache miss reload machine)
991 # wishbone_out <= r.wb;
992 # Wishbone requests output (from the cache miss reload machine)
993 comb
+= wb_out
.eq(r
.wb
)
996 # -- Cache hit synchronous machine
997 # icache_hit : process(clk)
998 # Cache hit synchronous machine
999 def icache_hit(self
, m
, use_previous
, r
, req_is_hit
, req_hit_way
,
1000 req_index
, req_tag
, real_addr
):
1003 i_in
, stall_in
= self
.i_in
, self
.stall_in
1004 flush_in
= self
.flush_in
1007 # if rising_edge(clk) then
1008 # -- keep outputs to fetch2 unchanged on a stall
1009 # -- except that flush or reset sets valid to 0
1010 # -- If use_previous, keep the same data as last
1011 # -- cycle and use the second half
1012 # if stall_in = '1' or use_previous = '1' then
1013 # if rst = '1' or flush_in = '1' then
1014 # r.hit_valid <= '0';
1016 # keep outputs to fetch2 unchanged on a stall
1017 # except that flush or reset sets valid to 0
1018 # If use_previous, keep the same data as last
1019 # cycle and use the second half
1020 with m
.If(stall_in | use_previous
):
1021 with m
.If(flush_in
):
1022 sync
+= r
.hit_valid
.eq(0)
1024 # -- On a hit, latch the request for the next cycle,
1025 # -- when the BRAM data will be available on the
1026 # -- cache_out output of the corresponding way
1027 # r.hit_valid <= req_is_hit;
1028 # if req_is_hit = '1' then
1029 # r.hit_way <= req_hit_way;
1031 # On a hit, latch the request for the next cycle,
1032 # when the BRAM data will be available on the
1033 # cache_out output of the corresponding way
1034 sync
+= r
.hit_valid
.eq(req_is_hit
)
1036 with m
.If(req_is_hit
):
1037 sync
+= r
.hit_way
.eq(req_hit_way
)
1039 # report "cache hit nia:" & to_hstring(i_in.nia) &
1040 # " IR:" & std_ulogic'image(i_in.virt_mode) &
1041 # " SM:" & std_ulogic'image(i_in.stop_mark) &
1042 # " idx:" & integer'image(req_index) &
1043 # " tag:" & to_hstring(req_tag) &
1044 # " way:" & integer'image(req_hit_way) &
1045 # " RA:" & to_hstring(real_addr);
1046 print(f
"cache hit nia:{i_in.nia}, " \
1047 f
"IR:{i_in.virt_mode}, " \
1048 f
"SM:{i_in.stop_mark}, idx:{req_index}, " \
1049 f
"tag:{req_tag}, way:{req_hit_way}, " \
1053 # if stall_in = '0' then
1054 # -- Send stop marks and NIA down regardless of validity
1055 # r.hit_smark <= i_in.stop_mark;
1056 # r.hit_nia <= i_in.nia;
1058 with m
.If(~stall_in
):
1059 # Send stop marks and NIA down regardless of validity
1060 sync
+= r
.hit_smark
.eq(i_in
.stop_mark
)
1061 sync
+= r
.hit_nia
.eq(i_in
.nia
)
1065 # -- Cache miss/reload synchronous machine
1066 # icache_miss : process(clk)
1067 # Cache miss/reload synchronous machine
1068 def icache_miss(self
, m
, cache_valid_bits
, r
, req_is_miss
,
1069 req_index
, req_laddr
, req_tag
, replace_way
,
1070 cache_tags
, access_ok
):
1074 i_in
, wb_in
, m_in
= self
.i_in
, self
.wb_in
, self
.m_in
1075 stall_in
, flush_in
= self
.stall_in
, self
.flush_in
1076 inval_in
= self
.inval_in
1078 # variable tagset : cache_tags_set_t;
1079 # variable stbs_done : boolean;
1081 tagset
= Signal(TAG_RAM_WIDTH
)
1082 stbs_done
= Signal()
1085 # if rising_edge(clk) then
1086 # -- On reset, clear all valid bits to force misses
1088 # On reset, clear all valid bits to force misses
1089 # for i in index_t loop
1090 # cache_valids(i) <= (others => '0');
1095 # -- We only ever do reads on wishbone
1096 # r.wb.dat <= (others => '0');
1097 # r.wb.sel <= "11111111";
1100 # We only ever do reads on wishbone
1101 comb
+= r
.wb
.sel
.eq(~
0) # set to all 1s
1103 # -- Not useful normally but helps avoiding
1104 # -- tons of sim warnings
1105 # r.wb.adr <= (others => '0');
1109 # -- Process cache invalidations
1110 # if inval_in = '1' then
1111 # for i in index_t loop
1112 # cache_valids(i) <= (others => '0');
1114 # r.store_valid <= '0';
1116 # Process cache invalidations
1117 with m
.If(inval_in
):
1118 for i
in range(NUM_LINES
):
1119 sync
+= cache_valid_bits
[i
].eq(~
1) # NO just set to zero.
1120 # look again: others == 0
1122 sync
+= r
.store_valid
.eq(0)
1124 # -- Main state machine
1126 # Main state machine
1127 with m
.Switch(r
.state
):
1130 with m
.Case(State
.IDLE
):
1131 # -- Reset per-row valid flags,
1132 # -- only used in WAIT_ACK
1133 # for i in 0 to ROW_PER_LINE - 1 loop
1134 # r.rows_valid(i) <= '0';
1136 # Reset per-row valid flags,
1137 # only used in WAIT_ACK
1138 for i
in range(ROW_PER_LINE
):
1139 sync
+= r
.rows_valid
[i
].eq(0)
1141 # -- We need to read a cache line
1142 # if req_is_miss = '1' then
1143 # report "cache miss nia:" & to_hstring(i_in.nia) &
1144 # " IR:" & std_ulogic'image(i_in.virt_mode) &
1145 # " SM:" & std_ulogic'image(i_in.stop_mark) &
1146 # " idx:" & integer'image(req_index) &
1147 # " way:" & integer'image(replace_way) &
1148 # " tag:" & to_hstring(req_tag) &
1149 # " RA:" & to_hstring(real_addr);
1150 # We need to read a cache line
1151 with m
.If(req_is_miss
):
1152 print(f
"cache miss nia:{i_in.nia} " \
1153 f
"IR:{i_in.virt_mode} " \
1154 f
"SM:{i_in.stop_mark} " \
1155 F
"idx:{req_index} " \
1156 f
"way:{replace_way} tag:{req_tag} " \
1159 # -- Keep track of our index and way for
1160 # -- subsequent stores
1161 # r.store_index <= req_index;
1162 # r.store_row <= get_row(req_laddr);
1163 # r.store_tag <= req_tag;
1164 # r.store_valid <= '1';
1166 # get_row_of_line(get_row(req_laddr)) - 1;
1167 # Keep track of our index and way
1168 # for subsequent stores
1169 sync
+= r
.store_index
.eq(req_index
)
1170 sync
+= r
.store_row
.eq(get_row(req_laddr
))
1171 sync
+= r
.store_tag
.eq(req_tag
)
1172 sync
+= r
.store_valid
.eq(1)
1173 sync
+= r
.end_row_ix
.eq(
1179 # -- Prep for first wishbone read. We calculate the
1180 # -- address of the start of the cache line and
1181 # -- start the WB cycle.
1182 # r.wb.adr <= req_laddr(r.wb.adr'left downto 0);
1185 # Prep for first wishbone read.
1187 # address of the start of the cache line and
1188 # start the WB cycle.
1189 sync
+= r
.wb
.adr
.eq(
1190 req_laddr
[:r
.wb
.adr
]
1193 # -- Track that we had one request sent
1194 # r.state <= CLR_TAG;
1195 # Track that we had one request sent
1196 sync
+= r
.state
.eq(State
.CLR_TAG
)
1199 # when CLR_TAG | WAIT_ACK =>
1200 with m
.Case(State
.CLR_TAG
, State
.WAIT_ACK
):
1201 # if r.state = CLR_TAG then
1202 with m
.If(r
.state
== State
.CLR_TAG
):
1203 # -- Get victim way from plru
1204 # r.store_way <= replace_way;
1205 # Get victim way from plru
1206 sync
+= r
.store_way
.eq(replace_way
)
1208 # -- Force misses on that way while
1209 # -- reloading that line
1210 # cache_valids(req_index)(replace_way) <= '0';
1211 # Force misses on that way while
1212 # realoading that line
1213 sync
+= cache_valid_bits
[
1215 ][replace_way
].eq(0)
1217 # -- Store new tag in selected way
1218 # for i in 0 to NUM_WAYS-1 loop
1219 # if i = replace_way then
1220 # tagset := cache_tags(r.store_index);
1221 # write_tag(i, tagset, r.store_tag);
1222 # cache_tags(r.store_index) <= tagset;
1225 for i
in range(NUM_WAYS
):
1226 with m
.If(i
== replace_way
):
1228 cache_tags
[r
.store_index
]
1231 i
, tagset
, r
.store_tag
1233 sync
+= cache_tags
[r
.store_index
].eq(
1237 # r.state <= WAIT_ACK;
1238 sync
+= r
.state
.eq(State
.WAIT_ACK
)
1241 # -- Requests are all sent if stb is 0
1242 # stbs_done := r.wb.stb = '0';
1243 # Requests are all sent if stb is 0
1244 comb
+= stbs_done
.eq(r
.wb
.stb
== 0)
1246 # -- If we are still sending requests,
1247 # -- was one accepted ?
1248 # if wishbone_in.stall = '0' and not stbs_done then
1249 # If we are still sending requests,
1251 with m
.If(~wb_in
.stall
& ~stbs_done
):
1252 # -- That was the last word ? We are done sending.
1253 # -- Clear stb and set stbs_done so we can handle
1254 # -- an eventual last ack on the same cycle.
1255 # if is_last_row_addr(r.wb.adr, r.end_row_ix) then
1257 # stbs_done := true;
1259 # That was the last word ?
1260 # We are done sending.
1261 # Clear stb and set stbs_done
1263 # an eventual last ack on
1265 with m
.If(is_last_row_addr(
1266 r
.wb
.adr
, r
.end_row_ix
)):
1267 sync
+= r
.wb
.stb
.eq(0)
1270 # -- Calculate the next row address
1271 # r.wb.adr <= next_row_addr(r.wb.adr);
1272 # Calculate the next row address
1273 sync
+= r
.wb
.adr
.eq(next_row_addr(r
.wb
.adr
))
1276 # -- Incoming acks processing
1277 # if wishbone_in.ack = '1' then
1278 # Incoming acks processing
1279 with m
.If(wb_in
.ack
):
1280 # r.rows_valid(r.store_row mod ROW_PER_LINE)
1282 sync
+= r
.rows_valid
[
1283 r
.store_row
& ROW_PER_LINE
1286 # -- Check for completion
1288 # is_last_row(r.store_row, r.end_row_ix) then
1289 # Check for completion
1290 with m
.If(stbs_done
& is_last_row(
1291 r
.store_row
, r
.end_row_ix
)):
1292 # -- Complete wishbone cycle
1294 # Complete wishbone cycle
1295 sync
+= r
.wb
.cyc
.eq(0)
1297 # -- Cache line is now valid
1298 # cache_valids(r.store_index)(replace_way) <=
1299 # r.store_valid and not inval_in;
1300 # Cache line is now valid
1301 sync
+= cache_valid_bits
[
1304 r
.store_valid
& ~inval_in
1310 sync
+= r
.state
.eq(State
.IDLE
)
1313 # -- Increment store row counter
1314 # r.store_row <= next_row(r.store_row);
1315 # Increment store row counter
1316 sync
+= store_row
.eq(next_row(r
.store_row
))
1321 # -- TLB miss and protection fault processing
1322 # if rst = '1' or flush_in = '1' or m_in.tlbld = '1' then
1323 # r.fetch_failed <= '0';
1324 # elsif i_in.req = '1' and access_ok = '0' and
1325 # stall_in = '0' then
1326 # r.fetch_failed <= '1';
1328 # TLB miss and protection fault processing
1329 with m
.If('''TODO nmigen rst''' | flush_in | m_in
.tlbld
):
1330 sync
+= r
.fetch_failed
.eq(0)
1332 with m
.Elif(i_in
.req
& ~access_ok
& ~stall_in
):
1333 sync
+= r
.fetch_failed
.eq(1)
1337 # icache_log: if LOG_LENGTH > 0 generate
1338 def icache_log(self
, m
, req_hit_way
, ra_valid
, access_ok
,
1339 req_is_miss
, req_is_hit
, lway
, wstate
, r
):
1343 wb_in
, i_out
= self
.wb_in
, self
.i_out
1344 log_out
, stall_out
= self
.log_out
, self
.stall_out
1346 # -- Output data to logger
1347 # signal log_data : std_ulogic_vector(53 downto 0);
1349 # data_log: process(clk)
1350 # variable lway: way_t;
1351 # variable wstate: std_ulogic;
1352 # Output data to logger
1353 for i
in range(LOG_LENGTH
):
1354 # Output data to logger
1355 log_data
= Signal(54)
1356 lway
= Signal(NUM_WAYS
)
1360 # if rising_edge(clk) then
1361 # lway := req_hit_way;
1363 comb
+= lway
.eq(req_hit_way
)
1364 comb
+= wstate
.eq(0)
1366 # if r.state /= IDLE then
1369 with m
.If(r
.state
!= State
.IDLE
):
1370 sync
+= wstate
.eq(1)
1372 # log_data <= i_out.valid &
1375 # r.wb.adr(5 downto 3) &
1376 # r.wb.stb & r.wb.cyc &
1377 # wishbone_in.stall &
1380 # r.hit_nia(5 downto 2) &
1382 # std_ulogic_vector(to_unsigned(lway, 3)) &
1383 # req_is_hit & req_is_miss &
1386 sync
+= log_data
.eq(Cat(
1387 ra_valid
, access_ok
, req_is_miss
, req_is_hit
,
1388 lway
, wstate
, r
.hit_nia
[2:6],
1389 r
.fetch_failed
, stall_out
, wb_in
.stall
, r
.wb
.cyc
,
1390 r
.wb
.stb
, r
.wb
.adr
[3:6], wb_in
.ack
, i_out
.insn
,
1395 # log_out <= log_data;
1396 comb
+= log_out
.eq(log_data
)
1400 def elaborate(self
, platform
):
1405 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1406 cache_tags
= CacheTagArray()
1407 cache_valid_bits
= CacheValidBitsArray()
1409 # signal itlb_valids : tlb_valids_t;
1410 # signal itlb_tags : tlb_tags_t;
1411 # signal itlb_ptes : tlb_ptes_t;
1412 # attribute ram_style of itlb_tags : signal is "distributed";
1413 # attribute ram_style of itlb_ptes : signal is "distributed";
1414 itlb_valid_bits
= TLBValidBitsArray()
1415 itlb_tags
= TLBTagArray()
1416 itlb_ptes
= TLBPTEArray()
1417 # TODO to be passed to nmigen as ram attributes
1418 # attribute ram_style of itlb_tags : signal is "distributed";
1419 # attribute ram_style of itlb_ptes : signal is "distributed";
1421 # -- Privilege bit from PTE EAA field
1422 # signal eaa_priv : std_ulogic;
1423 # Privilege bit from PTE EAA field
1426 # signal r : reg_internal_t;
1429 # -- Async signals on incoming request
1430 # signal req_index : index_t;
1431 # signal req_row : row_t;
1432 # signal req_hit_way : way_t;
1433 # signal req_tag : cache_tag_t;
1434 # signal req_is_hit : std_ulogic;
1435 # signal req_is_miss : std_ulogic;
1436 # signal req_laddr : std_ulogic_vector(63 downto 0);
1437 # Async signal on incoming request
1438 req_index
= Signal(NUM_LINES
)
1439 req_row
= Signal(BRAM_ROWS
)
1440 req_hit_way
= Signal(NUM_WAYS
)
1441 req_tag
= Signal(TAG_BITS
)
1442 req_is_hit
= Signal()
1443 req_is_miss
= Signal()
1444 req_laddr
= Signal(64)
1446 # signal tlb_req_index : tlb_index_t;
1447 # signal real_addr : std_ulogic_vector(
1448 # REAL_ADDR_BITS - 1 downto 0
1450 # signal ra_valid : std_ulogic;
1451 # signal priv_fault : std_ulogic;
1452 # signal access_ok : std_ulogic;
1453 # signal use_previous : std_ulogic;
1454 tlb_req_index
= Signal(TLB_SIZE
)
1455 real_addr
= Signal(REAL_ADDR_BITS
)
1457 priv_fault
= Signal()
1458 access_ok
= Signal()
1459 use_previous
= Signal()
1461 # signal cache_out : cache_ram_out_t;
1462 cache_out
= CacheRamOut()
1464 # signal plru_victim : plru_out_t;
1465 # signal replace_way : way_t;
1466 plru_victim
= PLRUOut()
1467 replace_way
= Signal(NUM_WAYS
)
1469 # call sub-functions putting everything together, using shared
1470 # signals established above
1471 self
.rams(m
, r
, cache_out
, use_previous
, replace_way
, req_row
)
1472 self
.maybe_plrus(m
, r
, plru_victim
)
1473 self
.itlb_lookup(m
, tlb_req_index
, itlb_ptes
, itlb_tags
,
1474 real_addr
, itlb_valid_bits
, ra_valid
, eaa_priv
,
1475 priv_fault
, access_ok
)
1476 self
.itlb_update(m
, itlb_valid_bits
, itlb_tags
, itlb_ptes
)
1477 self
.icache_comb(m
, use_previous
, r
, req_index
, req_row
,
1478 req_tag
, real_addr
, req_laddr
, cache_valid_bits
,
1479 cache_tags
, access_ok
, req_is_hit
, req_is_miss
,
1480 replace_way
, plru_victim
, cache_out
)
1481 self
.icache_hit(m
, use_previous
, r
, req_is_hit
, req_hit_way
,
1482 req_index
, req_tag
, real_addr
)
1483 self
.icache_miss(m
, cache_valid_bits
, r
, req_is_miss
, req_index
,
1484 req_laddr
, req_tag
, replace_way
, cache_tags
,
1486 #self.icache_log(m, log_out, req_hit_way, ra_valid, access_ok,
1487 # req_is_miss, req_is_hit, lway, wstate, r)
1495 # use ieee.std_logic_1164.all;
1498 # use work.common.all;
1499 # use work.wishbone_types.all;
1501 # entity icache_tb is
1504 # architecture behave of icache_tb is
1505 # signal clk : std_ulogic;
1506 # signal rst : std_ulogic;
1508 # signal i_out : Fetch1ToIcacheType;
1509 # signal i_in : IcacheToDecode1Type;
1511 # signal m_out : MmuToIcacheType;
1513 # signal wb_bram_in : wishbone_master_out;
1514 # signal wb_bram_out : wishbone_slave_out;
1516 # constant clk_period : time := 10 ns;
1518 # icache0: entity work.icache
1532 # wishbone_out => wb_bram_in,
1533 # wishbone_in => wb_bram_out
1536 # -- BRAM Memory slave
1537 # bram0: entity work.wishbone_bram_wrapper
1539 # MEMORY_SIZE => 1024,
1540 # RAM_INIT_FILE => "icache_test.bin"
1545 # wishbone_in => wb_bram_in,
1546 # wishbone_out => wb_bram_out
1549 # clk_process: process
1552 # wait for clk_period/2;
1554 # wait for clk_period/2;
1557 # rst_process: process
1560 # wait for 2*clk_period;
1568 # i_out.nia <= (others => '0');
1569 # i_out.stop_mark <= '0';
1571 # m_out.tlbld <= '0';
1572 # m_out.tlbie <= '0';
1573 # m_out.addr <= (others => '0');
1574 # m_out.pte <= (others => '0');
1576 # wait until rising_edge(clk);
1577 # wait until rising_edge(clk);
1578 # wait until rising_edge(clk);
1579 # wait until rising_edge(clk);
1582 # i_out.nia <= x"0000000000000004";
1584 # wait for 30*clk_period;
1585 # wait until rising_edge(clk);
1587 # assert i_in.valid = '1' severity failure;
1588 # assert i_in.insn = x"00000001"
1589 # report "insn @" & to_hstring(i_out.nia) &
1590 # "=" & to_hstring(i_in.insn) &
1591 # " expected 00000001"
1596 # wait until rising_edge(clk);
1600 # i_out.nia <= x"0000000000000008";
1601 # wait until rising_edge(clk);
1602 # wait until rising_edge(clk);
1603 # assert i_in.valid = '1' severity failure;
1604 # assert i_in.insn = x"00000002"
1605 # report "insn @" & to_hstring(i_out.nia) &
1606 # "=" & to_hstring(i_in.insn) &
1607 # " expected 00000002"
1609 # wait until rising_edge(clk);
1613 # i_out.nia <= x"0000000000000040";
1615 # wait for 30*clk_period;
1616 # wait until rising_edge(clk);
1618 # assert i_in.valid = '1' severity failure;
1619 # assert i_in.insn = x"00000010"
1620 # report "insn @" & to_hstring(i_out.nia) &
1621 # "=" & to_hstring(i_in.insn) &
1622 # " expected 00000010"
1625 # -- test something that aliases
1627 # i_out.nia <= x"0000000000000100";
1628 # wait until rising_edge(clk);
1629 # wait until rising_edge(clk);
1630 # assert i_in.valid = '0' severity failure;
1631 # wait until rising_edge(clk);
1633 # wait for 30*clk_period;
1634 # wait until rising_edge(clk);
1636 # assert i_in.valid = '1' severity failure;
1637 # assert i_in.insn = x"00000040"
1638 # report "insn @" & to_hstring(i_out.nia) &
1639 # "=" & to_hstring(i_in.insn) &
1640 # " expected 00000040"
1648 def icache_sim(dut
):
1649 i_out
, i_in
, m_out
, m_in
= dut
.i_out
, dut
.i_in
, dut
.m_out
, dut
.m_in
1651 yield i_out
.req
.eq(0)
1652 yield i_out
.nia
.eq(~
1)
1653 yield i_out
.stop_mark
.eq(0)
1654 yield m_out
.tlbld
.eq(0)
1655 yield m_out
.tlbie
.eq(0)
1656 yield m_out
.addr
.eq(~
1)
1657 yield m_out
.pte
.eq(~
1)
1662 yield i_out
.req
.eq(1)
1663 yield i_out
.nia
.eq(Const(0x0000000000000004, 64))
1668 assert i_in
.insn
== Const(0x00000001, 32), \
1669 ("insn @%x=%x expected 00000001" % i_out
.nia
, i_in
.insn
)
1670 yield i_out
.req
.eq(0)
1674 yield i_out
.req
.eq(1)
1675 yield i_out
.nia
.eq(Const(0x0000000000000008, 64))
1679 assert i_in
.insn
== Const(0x00000002, 32), \
1680 ("insn @%x=%x expected 00000002" % i_out
.nia
, i_in
.insn
)
1685 yield i_out
.nia
.eq(Const(0x0000000000000040, 64))
1690 assert i_in
.insn
== Const(0x00000010, 32), \
1691 ("insn @%x=%x expected 00000010" % i_out
.nia
, i_in
.insn
)
1693 # test something that aliases
1694 yield i_out
.req
.eq(1)
1695 yield i_out
.nia
.eq(Const(0x0000000000000100, 64))
1703 assert i_in
.insn
== Const(0x00000040, 32), \
1704 ("insn @%x=%x expected 00000040" % i_out
.nia
, i_in
.insn
)
1705 yield i_out
.req
.eq(0)
1712 m
.submodules
.icache
= dut
1718 sim
.add_sync_process(wrap(icache_sim(dut
)))
1719 with sim
.write_vcd('test_icache.vcd'):
1722 if __name__
== '__main__':
1724 vl
= rtlil
.convert(dut
, ports
=[])
1725 with
open("test_icache.il", "w") as f
: