52c09a205919d10a7d1056b5fcc7f26f325dcdd5
3 based on Anton Blanchard microwatt icache.vhdl
7 TODO (in no specific order):
8 * Add debug interface to inspect cache content
9 * Add snoop/invalidate path
10 * Add multi-hit error detection
11 * Pipelined bus interface (wb or axi)
12 * Maybe add parity? There's a few bits free in each BRAM row on Xilinx
13 * Add optimization: service hits on partially loaded lines
14 * Add optimization: (maybe) interrupt reload on fluch/redirect
15 * Check if playing with the geometry of the cache tags allow for more
16 efficient use of distributed RAM and less logic/muxes. Currently we
17 write TAG_BITS width which may not match full ram blocks and might
18 cause muxes to be inferred for "partial writes".
19 * Check if making the read size of PLRU a ROM helps utilization
22 from enum
import Enum
, unique
23 from nmigen
import (Module
, Signal
, Elaboratable
, Cat
, Array
, Const
)
24 from nmigen
.cli
import main
25 from nmigen
.cli
import rtlil
26 from nmutil
.iocontrol
import RecordObject
27 from nmutil
.byterev
import byte_reverse
28 from nmutil
.mask
import Mask
29 from nmigen
.utils
import log2_int
30 from nmutil
.util
import Display
32 from soc
.experiment
.mem_types
import (Fetch1ToICacheType
,
36 from soc
.experiment
.wb_types
import (WB_ADDR_BITS
, WB_DATA_BITS
,
37 WB_SEL_BITS
, WBAddrType
, WBDataType
,
38 WBSelType
, WBMasterOut
, WBSlaveOut
,
39 WBMasterOutVector
, WBSlaveOutVector
,
40 WBIOMasterOut
, WBIOSlaveOut
)
45 # BRAM organisation: We never access more than wishbone_data_bits
46 # at a time so to save resources we make the array only that wide,
47 # and use consecutive indices for to make a cache "line"
49 # ROW_SIZE is the width in bytes of the BRAM (based on WB, so 64-bits)
50 ROW_SIZE
= WB_DATA_BITS
// 8
51 # Number of lines in a set
55 # L1 ITLB number of entries (direct mapped)
57 # L1 ITLB log_2(page_size)
59 # Number of real address bits that we store
61 # Non-zero to enable log data collection
64 ROW_SIZE_BITS
= ROW_SIZE
* 8
65 # ROW_PER_LINE is the number of row
66 # (wishbone) transactions in a line
67 ROW_PER_LINE
= LINE_SIZE
// ROW_SIZE
68 # BRAM_ROWS is the number of rows in
69 # BRAM needed to represent the full icache
70 BRAM_ROWS
= NUM_LINES
* ROW_PER_LINE
71 # INSN_PER_ROW is the number of 32bit
72 # instructions per BRAM row
73 INSN_PER_ROW
= ROW_SIZE_BITS
// 32
75 # Bit fields counts in the address
77 # INSN_BITS is the number of bits to
78 # select an instruction in a row
79 INSN_BITS
= log2_int(INSN_PER_ROW
)
80 # ROW_BITS is the number of bits to
82 ROW_BITS
= log2_int(BRAM_ROWS
)
83 # ROW_LINEBITS is the number of bits to
84 # select a row within a line
85 ROW_LINE_BITS
= log2_int(ROW_PER_LINE
)
86 # LINE_OFF_BITS is the number of bits for
87 # the offset in a cache line
88 LINE_OFF_BITS
= log2_int(LINE_SIZE
)
89 # ROW_OFF_BITS is the number of bits for
91 ROW_OFF_BITS
= log2_int(ROW_SIZE
)
92 # INDEX_BITS is the number of bits to
94 INDEX_BITS
= log2_int(NUM_LINES
)
95 # SET_SIZE_BITS is the log base 2 of
97 SET_SIZE_BITS
= LINE_OFF_BITS
+ INDEX_BITS
98 # TAG_BITS is the number of bits of
99 # the tag part of the address
100 TAG_BITS
= REAL_ADDR_BITS
- SET_SIZE_BITS
101 # WAY_BITS is the number of bits to
103 WAY_BITS
= log2_int(NUM_WAYS
)
104 TAG_RAM_WIDTH
= TAG_BITS
* NUM_WAYS
107 # constant TLB_BITS : natural := log2(TLB_SIZE);
108 # constant TLB_EA_TAG_BITS : natural := 64 - (TLB_LG_PGSZ + TLB_BITS);
109 # constant TLB_PTE_BITS : natural := 64;
110 TLB_BITS
= log2_int(TLB_SIZE
)
111 TLB_EA_TAG_BITS
= 64 - (TLB_LG_PGSZ
+ TLB_BITS
)
114 # architecture rtl of icache is
115 #constant ROW_SIZE_BITS : natural := ROW_SIZE*8;
116 #-- ROW_PER_LINE is the number of row (wishbone
117 #-- transactions) in a line
118 #constant ROW_PER_LINE : natural := LINE_SIZE / ROW_SIZE;
119 #-- BRAM_ROWS is the number of rows in BRAM
120 #-- needed to represent the full
122 #constant BRAM_ROWS : natural := NUM_LINES * ROW_PER_LINE;
123 #-- INSN_PER_ROW is the number of 32bit instructions per BRAM row
124 #constant INSN_PER_ROW : natural := ROW_SIZE_BITS / 32;
125 #-- Bit fields counts in the address
127 #-- INSN_BITS is the number of bits to select
128 #-- an instruction in a row
129 #constant INSN_BITS : natural := log2(INSN_PER_ROW);
130 #-- ROW_BITS is the number of bits to select a row
131 #constant ROW_BITS : natural := log2(BRAM_ROWS);
132 #-- ROW_LINEBITS is the number of bits to
133 #-- select a row within a line
134 #constant ROW_LINEBITS : natural := log2(ROW_PER_LINE);
135 #-- LINE_OFF_BITS is the number of bits for the offset
137 #constant LINE_OFF_BITS : natural := log2(LINE_SIZE);
138 #-- ROW_OFF_BITS is the number of bits for the offset in a row
139 #constant ROW_OFF_BITS : natural := log2(ROW_SIZE);
140 #-- INDEX_BITS is the number of bits to select a cache line
141 #constant INDEX_BITS : natural := log2(NUM_LINES);
142 #-- SET_SIZE_BITS is the log base 2 of the set size
143 #constant SET_SIZE_BITS : natural := LINE_OFF_BITS + INDEX_BITS;
144 #-- TAG_BITS is the number of bits of the tag part of the address
145 #constant TAG_BITS : natural := REAL_ADDR_BITS - SET_SIZE_BITS;
146 #-- WAY_BITS is the number of bits to select a way
147 #constant WAY_BITS : natural := log2(NUM_WAYS);
149 #-- Example of layout for 32 lines of 64 bytes:
151 #-- .. tag |index| line |
153 #-- .. | | | |00| zero (2)
154 #-- .. | | |-| | INSN_BITS (1)
155 #-- .. | |---| | ROW_LINEBITS (3)
156 #-- .. | |--- - --| LINE_OFF_BITS (6)
157 #-- .. | |- --| ROW_OFF_BITS (3)
158 #-- .. |----- ---| | ROW_BITS (8)
159 #-- .. |-----| | INDEX_BITS (5)
160 #-- .. --------| | TAG_BITS (53)
161 # Example of layout for 32 lines of 64 bytes:
163 # .. tag |index| line |
165 # .. | | | |00| zero (2)
166 # .. | | |-| | INSN_BITS (1)
167 # .. | |---| | ROW_LINEBITS (3)
168 # .. | |--- - --| LINE_OFF_BITS (6)
169 # .. | |- --| ROW_OFF_BITS (3)
170 # .. |----- ---| | ROW_BITS (8)
171 # .. |-----| | INDEX_BITS (5)
172 # .. --------| | TAG_BITS (53)
174 #subtype row_t is integer range 0 to BRAM_ROWS-1;
175 #subtype index_t is integer range 0 to NUM_LINES-1;
176 #subtype way_t is integer range 0 to NUM_WAYS-1;
177 #subtype row_in_line_t is unsigned(ROW_LINEBITS-1 downto 0);
179 #-- The cache data BRAM organized as described above for each way
180 #subtype cache_row_t is std_ulogic_vector(ROW_SIZE_BITS-1 downto 0);
182 #-- The cache tags LUTRAM has a row per set. Vivado is a pain and will
183 #-- not handle a clean (commented) definition of the cache tags as a 3d
184 #-- memory. For now, work around it by putting all the tags
185 #subtype cache_tag_t is std_logic_vector(TAG_BITS-1 downto 0);
186 # type cache_tags_set_t is array(way_t) of cache_tag_t;
187 # type cache_tags_array_t is array(index_t) of cache_tags_set_t;
188 #constant TAG_RAM_WIDTH : natural := TAG_BITS * NUM_WAYS;
189 #subtype cache_tags_set_t is std_logic_vector(TAG_RAM_WIDTH-1 downto 0);
190 #type cache_tags_array_t is array(index_t) of cache_tags_set_t;
192 return Array(Signal(TAG_RAM_WIDTH
) for x
in range(NUM_LINES
))
194 #-- The cache valid bits
195 #subtype cache_way_valids_t is std_ulogic_vector(NUM_WAYS-1 downto 0);
196 #type cache_valids_t is array(index_t) of cache_way_valids_t;
197 #type row_per_line_valid_t is array(0 to ROW_PER_LINE - 1) of std_ulogic;
198 def CacheValidBitsArray():
199 return Array(Signal() for x
in range(ROW_PER_LINE
))
201 def RowPerLineValidArray():
202 return Array(Signal() for x
in range(ROW_PER_LINE
))
205 #attribute ram_style : string;
206 #attribute ram_style of cache_tags : signal is "distributed";
207 # TODO to be passed to nigmen as ram attributes
208 # attribute ram_style : string;
209 # attribute ram_style of cache_tags : signal is "distributed";
212 #subtype tlb_index_t is integer range 0 to TLB_SIZE - 1;
213 #type tlb_valids_t is array(tlb_index_t) of std_ulogic;
214 #subtype tlb_tag_t is std_ulogic_vector(TLB_EA_TAG_BITS - 1 downto 0);
215 #type tlb_tags_t is array(tlb_index_t) of tlb_tag_t;
216 #subtype tlb_pte_t is std_ulogic_vector(TLB_PTE_BITS - 1 downto 0);
217 #type tlb_ptes_t is array(tlb_index_t) of tlb_pte_t;
218 def TLBValidBitsArray():
219 return Array(Signal() for x
in range(TLB_SIZE
))
222 return Array(Signal(TLB_EA_TAG_BITS
) for x
in range(TLB_SIZE
))
225 return Array(Signal(TLB_PTE_BITS
) for x
in range(TLB_SIZE
))
228 #-- Cache RAM interface
229 #type cache_ram_out_t is array(way_t) of cache_row_t;
230 # Cache RAM interface
232 return Array(Signal(ROW_SIZE_BITS
) for x
in range(NUM_WAYS
))
234 #-- PLRU output interface
235 #type plru_out_t is array(index_t) of
236 # std_ulogic_vector(WAY_BITS-1 downto 0);
237 # PLRU output interface
239 return Array(Signal(WAY_BITS
) for x
in range(NUM_LINES
))
243 # assert LINE_SIZE mod ROW_SIZE = 0;
244 # assert ispow2(LINE_SIZE) report "LINE_SIZE not power of 2"
246 # assert ispow2(NUM_LINES) report "NUM_LINES not power of 2"
248 # assert ispow2(ROW_PER_LINE) report "ROW_PER_LINE not power of 2"
250 # assert ispow2(INSN_PER_ROW) report "INSN_PER_ROW not power of 2"
252 # assert (ROW_BITS = INDEX_BITS + ROW_LINEBITS)
253 # report "geometry bits don't add up" severity FAILURE;
254 # assert (LINE_OFF_BITS = ROW_OFF_BITS + ROW_LINEBITS)
255 # report "geometry bits don't add up" severity FAILURE;
256 # assert (REAL_ADDR_BITS = TAG_BITS + INDEX_BITS + LINE_OFF_BITS)
257 # report "geometry bits don't add up" severity FAILURE;
258 # assert (REAL_ADDR_BITS = TAG_BITS + ROW_BITS + ROW_OFF_BITS)
259 # report "geometry bits don't add up" severity FAILURE;
261 # sim_debug: if SIM generate
264 # report "ROW_SIZE = " & natural'image(ROW_SIZE);
265 # report "ROW_PER_LINE = " & natural'image(ROW_PER_LINE);
266 # report "BRAM_ROWS = " & natural'image(BRAM_ROWS);
267 # report "INSN_PER_ROW = " & natural'image(INSN_PER_ROW);
268 # report "INSN_BITS = " & natural'image(INSN_BITS);
269 # report "ROW_BITS = " & natural'image(ROW_BITS);
270 # report "ROW_LINEBITS = " & natural'image(ROW_LINEBITS);
271 # report "LINE_OFF_BITS = " & natural'image(LINE_OFF_BITS);
272 # report "ROW_OFF_BITS = " & natural'image(ROW_OFF_BITS);
273 # report "INDEX_BITS = " & natural'image(INDEX_BITS);
274 # report "TAG_BITS = " & natural'image(TAG_BITS);
275 # report "WAY_BITS = " & natural'image(WAY_BITS);
280 # Cache reload state machine
287 # type reg_internal_t is record
288 # -- Cache hit state (Latches for 1 cycle BRAM access)
290 # hit_nia : std_ulogic_vector(63 downto 0);
291 # hit_smark : std_ulogic;
292 # hit_valid : std_ulogic;
294 # -- Cache miss state (reload state machine)
296 # wb : wishbone_master_out;
298 # store_index : index_t;
300 # store_tag : cache_tag_t;
301 # store_valid : std_ulogic;
302 # end_row_ix : row_in_line_t;
303 # rows_valid : row_per_line_valid_t;
306 # fetch_failed : std_ulogic;
308 class RegInternal(RecordObject
):
311 # Cache hit state (Latches for 1 cycle BRAM access)
312 self
.hit_way
= Signal(NUM_WAYS
)
313 self
.hit_nia
= Signal(64)
314 self
.hit_smark
= Signal()
315 self
.hit_valid
= Signal()
317 # Cache miss state (reload state machine)
318 self
.state
= Signal(State
)
319 self
.wb
= WBMasterOut()
320 self
.store_way
= Signal(NUM_WAYS
)
321 self
.store_index
= Signal(NUM_LINES
)
322 self
.store_row
= Signal(BRAM_ROWS
)
323 self
.store_tag
= Signal(TAG_BITS
)
324 self
.store_valid
= Signal()
325 self
.end_row_ix
= Signal(ROW_LINE_BITS
)
326 self
.rows_valid
= RowPerLineValidArray()
329 self
.fetch_failed
= Signal()
331 # -- 64 bit direct mapped icache. All instructions are 4B aligned.
335 # SIM : boolean := false;
336 # -- Line size in bytes
337 # LINE_SIZE : positive := 64;
338 # -- BRAM organisation: We never access more
339 # -- than wishbone_data_bits
340 # -- at a time so to save resources we make the
341 # -- array only that wide,
342 # -- and use consecutive indices for to make a cache "line"
344 # -- ROW_SIZE is the width in bytes of the BRAM (based on WB,
346 # ROW_SIZE : positive := wishbone_data_bits / 8;
347 # -- Number of lines in a set
348 # NUM_LINES : positive := 32;
350 # NUM_WAYS : positive := 4;
351 # -- L1 ITLB number of entries (direct mapped)
352 # TLB_SIZE : positive := 64;
353 # -- L1 ITLB log_2(page_size)
354 # TLB_LG_PGSZ : positive := 12;
355 # -- Number of real address bits that we store
356 # REAL_ADDR_BITS : positive := 56;
357 # -- Non-zero to enable log data collection
358 # LOG_LENGTH : natural := 0
361 # clk : in std_ulogic;
362 # rst : in std_ulogic;
364 # i_in : in Fetch1ToIcacheType;
365 # i_out : out IcacheToDecode1Type;
367 # m_in : in MmuToIcacheType;
369 # stall_in : in std_ulogic;
370 # stall_out : out std_ulogic;
371 # flush_in : in std_ulogic;
372 # inval_in : in std_ulogic;
374 # wishbone_out : out wishbone_master_out;
375 # wishbone_in : in wishbone_slave_out;
377 # log_out : out std_ulogic_vector(53 downto 0)
380 # 64 bit direct mapped icache. All instructions are 4B aligned.
381 class ICache(Elaboratable
):
382 """64 bit direct mapped icache. All instructions are 4B aligned."""
384 self
.i_in
= Fetch1ToICacheType()
385 self
.i_out
= ICacheToDecode1Type()
387 self
.m_in
= MMUToICacheType()
389 self
.stall_in
= Signal()
390 self
.stall_out
= Signal()
391 self
.flush_in
= Signal()
392 self
.inval_in
= Signal()
394 self
.wb_out
= WBMasterOut()
395 self
.wb_in
= WBSlaveOut()
397 self
.log_out
= Signal(54)
399 # -- Return the cache line index (tag index) for an address
400 # function get_index(addr: std_ulogic_vector(63 downto 0))
403 # return to_integer(unsigned(
404 # addr(SET_SIZE_BITS - 1 downto LINE_OFF_BITS)
407 # Return the cache line index (tag index) for an address
409 return addr
[LINE_OFF_BITS
:SET_SIZE_BITS
]
411 # -- Return the cache row index (data memory) for an address
412 # function get_row(addr: std_ulogic_vector(63 downto 0))
415 # return to_integer(unsigned(
416 # addr(SET_SIZE_BITS - 1 downto ROW_OFF_BITS)
419 # Return the cache row index (data memory) for an address
421 return addr
[ROW_OFF_BITS
:SET_SIZE_BITS
]
423 # -- Return the index of a row within a line
424 # function get_row_of_line(row: row_t) return row_in_line_t is
425 # variable row_v : unsigned(ROW_BITS-1 downto 0);
427 # row_v := to_unsigned(row, ROW_BITS);
428 # return row_v(ROW_LINEBITS-1 downto 0);
430 # Return the index of a row within a line
431 def get_row_of_line(row
):
434 # -- Returns whether this is the last row of a line
435 # function is_last_row_addr(addr: wishbone_addr_type;
436 # last: row_in_line_t
441 # addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS)
444 # Returns whether this is the last row of a line
445 def is_last_row_addr(addr
, last
):
446 return addr
[ROW_OFF_BITS
:LINE_OFF_BITS
] == last
448 # -- Returns whether this is the last row of a line
449 # function is_last_row(row: row_t;
450 # last: row_in_line_t) return boolean is
452 # return get_row_of_line(row) = last;
454 # Returns whether this is the last row of a line
455 def is_last_row(row
, last
):
456 return get_row_of_line(row
) == last
458 # -- Return the address of the next row in the current cache line
459 # function next_row_addr(addr: wishbone_addr_type)
460 # return std_ulogic_vector is
461 # variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
462 # variable result : wishbone_addr_type;
464 # -- Is there no simpler way in VHDL to generate that 3 bits adder ?
465 # row_idx := addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS);
466 # row_idx := std_ulogic_vector(unsigned(row_idx) + 1);
468 # result(LINE_OFF_BITS-1 downto ROW_OFF_BITS) := row_idx;
471 # Return the address of the next row in the current cache line
472 def next_row_addr(addr
):
473 # TODO no idea what's going on here, looks like double assignments
474 # overriding earlier assignments ??? Help please!
477 # -- Return the next row in the current cache line. We use a dedicated
478 # -- function in order to limit the size of the generated adder to be
479 # -- only the bits within a cache line (3 bits with default settings)
480 # function next_row(row: row_t) return row_t is
481 # variable row_v : std_ulogic_vector(ROW_BITS-1 downto 0);
482 # variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
483 # variable result : std_ulogic_vector(ROW_BITS-1 downto 0);
485 # row_v := std_ulogic_vector(to_unsigned(row, ROW_BITS));
486 # row_idx := row_v(ROW_LINEBITS-1 downto 0);
487 # row_v(ROW_LINEBITS-1 downto 0) :=
488 # std_ulogic_vector(unsigned(row_idx) + 1);
489 # return to_integer(unsigned(row_v));
491 # Return the next row in the current cache line. We use a dedicated
492 # function in order to limit the size of the generated adder to be
493 # only the bits within a cache line (3 bits with default settings)
495 # TODO no idea what's going on here, looks like double assignments
496 # overriding earlier assignments ??? Help please!
499 # -- Read the instruction word for the given address in the
500 # -- current cache row
501 # function read_insn_word(addr: std_ulogic_vector(63 downto 0);
502 # data: cache_row_t) return std_ulogic_vector is
503 # variable word: integer range 0 to INSN_PER_ROW-1;
505 # word := to_integer(unsigned(addr(INSN_BITS+2-1 downto 2)));
506 # return data(31+word*32 downto word*32);
508 # Read the instruction word for the given address
509 # in the current cache row
510 def read_insn_word(addr
, data
):
511 word
= addr
[2:INSN_BITS
+3]
512 return data
[word
* 32:32 + word
* 32]
514 # -- Get the tag value from the address
516 # addr: std_ulogic_vector(REAL_ADDR_BITS - 1 downto 0)
518 # return cache_tag_t is
520 # return addr(REAL_ADDR_BITS - 1 downto SET_SIZE_BITS);
522 # Get the tag value from the address
524 return addr
[SET_SIZE_BITS
:REAL_ADDR_BITS
]
526 # -- Read a tag from a tag memory row
527 # function read_tag(way: way_t; tagset: cache_tags_set_t)
528 # return cache_tag_t is
530 # return tagset((way+1) * TAG_BITS - 1 downto way * TAG_BITS);
532 # Read a tag from a tag memory row
533 def read_tag(way
, tagset
):
534 return tagset
[way
* TAG_BITS
:(way
+ 1) * TAG_BITS
]
536 # -- Write a tag to tag memory row
537 # procedure write_tag(way: in way_t;
538 # tagset: inout cache_tags_set_t; tag: cache_tag_t) is
540 # tagset((way+1) * TAG_BITS - 1 downto way * TAG_BITS) := tag;
542 # Write a tag to tag memory row
543 def write_tag(way
, tagset
, tag
):
544 tagset
[way
* TAG_BITS
:(way
+ 1) * TAG_BITS
] = tag
546 # -- Simple hash for direct-mapped TLB index
547 # function hash_ea(addr: std_ulogic_vector(63 downto 0))
548 # return tlb_index_t is
549 # variable hash : std_ulogic_vector(TLB_BITS - 1 downto 0);
551 # hash := addr(TLB_LG_PGSZ + TLB_BITS - 1 downto TLB_LG_PGSZ)
553 # TLB_LG_PGSZ + 2 * TLB_BITS - 1 downto
554 # TLB_LG_PGSZ + TLB_BITS
557 # TLB_LG_PGSZ + 3 * TLB_BITS - 1 downto
558 # TLB_LG_PGSZ + 2 * TLB_BITS
560 # return to_integer(unsigned(hash));
562 # Simple hash for direct-mapped TLB index
564 hsh
= addr
[TLB_LG_PGSZ
:TLB_LG_PGSZ
+ TLB_BITS
] ^ addr
[
565 TLB_LG_PGSZ
+ TLB_BITS
:TLB_LG_PGSZ
+ 2 * TLB_BITS
567 TLB_LG_PGSZ
+ 2 * TLB_BITS
:TLB_LG_PGSZ
+ 3 * TLB_BITS
571 # -- Generate a cache RAM for each way
572 # rams: for i in 0 to NUM_WAYS-1 generate
573 # signal do_read : std_ulogic;
574 # signal do_write : std_ulogic;
575 # signal rd_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
576 # signal wr_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
577 # signal dout : cache_row_t;
578 # signal wr_sel : std_ulogic_vector(ROW_SIZE-1 downto 0);
580 # way: entity work.cache_ram
582 # ROW_BITS => ROW_BITS,
583 # WIDTH => ROW_SIZE_BITS
588 # rd_addr => rd_addr,
591 # wr_addr => wr_addr,
592 # wr_data => wishbone_in.dat
596 # do_read <= not (stall_in or use_previous);
598 # if wishbone_in.ack = '1' and replace_way = i then
601 # cache_out(i) <= dout;
603 # std_ulogic_vector(to_unsigned(req_row, ROW_BITS));
605 # std_ulogic_vector(to_unsigned(r.store_row, ROW_BITS));
606 # for i in 0 to ROW_SIZE-1 loop
607 # wr_sel(i) <= do_write;
616 rd_addr
= Signal(ROW_BITS
)
617 wr_addr
= Signal(ROW_BITS
)
618 _d_out
= Signal(ROW_SIZE_BITS
)
619 wr_sel
= Signal(ROW_SIZE
)
621 for i
in range(NUM_WAYS
):
622 way
= CacheRam(ROW_BITS
, ROW_SIZE_BITS
)
623 comb
+= way
.rd_en
.eq(do_read
)
624 comb
+= way
.rd_addr
.eq(rd_addr
)
625 comb
+= way
.rd_data
.eq(_d_out
)
626 comb
+= way
.wr_sel
.eq(wr_sel
)
627 comb
+= way
.wr_add
.eq(wr_addr
)
628 comb
+= way
.wr_data
.eq(wb_in
.dat
)
630 comb
+= do_read
.eq(~
(stall_in | use_previous
))
631 comb
+= do_write
.eq(0)
633 with m
.If(wb_in
.ack
& (replace_way
== i
)):
636 comb
+= cache_out
[i
].eq(_d_out
)
637 comb
+= rd_addr
.eq(Signal(req_row
))
638 comb
+= wr_addr
.eq(Signal(r
.store_row
))
639 for j
in range(ROW_SIZE
):
640 comb
+= wr_sel
[j
].eq(do_write
)
643 # maybe_plrus: if NUM_WAYS > 1 generate
645 # plrus: for i in 0 to NUM_LINES-1 generate
647 # signal plru_acc : std_ulogic_vector(WAY_BITS-1 downto 0);
648 # signal plru_acc_en : std_ulogic;
649 # signal plru_out : std_ulogic_vector(WAY_BITS-1 downto 0);
652 # plru : entity work.plru
660 # acc_en => plru_acc_en,
667 # if get_index(r.hit_nia) = i then
668 # plru_acc_en <= r.hit_valid;
670 # plru_acc_en <= '0';
673 # std_ulogic_vector(to_unsigned(r.hit_way, WAY_BITS));
674 # plru_victim(i) <= plru_out;
678 def maybe_plrus(self
, m
):
681 with m
.If(NUM_WAYS
> 1):
682 for i
in range(NUM_LINES
):
683 plru_acc
= Signal(WAY_BITS
)
684 plru_acc_en
= Signal()
685 plru_out
= Signal(WAY_BITS
)
686 plru
= PLRU(WAY_BITS
)
687 comb
+= plru
.acc
.eq(plru_acc
)
688 comb
+= plru
.acc_en
.eq(plru_acc_en
)
689 comb
+= plru
.lru
.eq(plru_out
)
692 with m
.If(get_index(r
.hit_nia
) == i
):
693 comb
+= plru
.acc_en
.eq(r
.hit_valid
)
696 comb
+= plru
.acc_en
.eq(0)
698 comb
+= plru
.acc
.eq(r
.hit_way
)
699 comb
+= plru_victim
[i
].eq(plru
.lru
)
701 # -- TLB hit detection and real address generation
702 # itlb_lookup : process(all)
703 # variable pte : tlb_pte_t;
704 # variable ttag : tlb_tag_t;
706 # tlb_req_index <= hash_ea(i_in.nia);
707 # pte := itlb_ptes(tlb_req_index);
708 # ttag := itlb_tags(tlb_req_index);
709 # if i_in.virt_mode = '1' then
710 # real_addr <= pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ) &
711 # i_in.nia(TLB_LG_PGSZ - 1 downto 0);
712 # if ttag = i_in.nia(63 downto TLB_LG_PGSZ + TLB_BITS) then
713 # ra_valid <= itlb_valids(tlb_req_index);
717 # eaa_priv <= pte(3);
719 # real_addr <= i_in.nia(REAL_ADDR_BITS - 1 downto 0);
724 # -- no IAMR, so no KUEP support for now
725 # priv_fault <= eaa_priv and not i_in.priv_mode;
726 # access_ok <= ra_valid and not priv_fault;
728 # TLB hit detection and real address generation
729 def itlb_lookup(self
, m
):
732 comb
+= tlb_req_index
.eq(hash_ea(i_in
.nia
))
733 comb
+= pte
.eq(itlb_ptes
[tlb_req_index
])
734 comb
+= ttag
.eq(itlb_tags
[tlb_req_index
])
736 with m
.If(i_in
.virt_mode
):
737 comb
+= real_addr
.eq(Cat(
738 i_in
.nia
[:TLB_LB_PGSZ
],
739 pte
[TLB_LG_PGSZ
:REAL_ADDR_BITS
]
742 with m
.If(ttag
== i_in
.nia
[TLB_LG_PGSZ
+ TLB_BITS
:64]):
743 comb
+= ra_valid
.eq(itlb_valid_bits
[tlb_req_index
])
746 comb
+= ra_valid
.eq(0)
749 comb
+= real_addr
.eq(i_in
.nia
[:REAL_ADDR_BITS
])
750 comb
+= ra_valid
.eq(1)
751 comb
+= eaa_priv
.eq(1)
753 # No IAMR, so no KUEP support for now
754 comb
+= priv_fault
.eq(eaa_priv
& ~i_in
.priv_mode
)
755 comb
+= access_ok
.eq(ra_valid
& ~priv_fault
)
758 # itlb_update: process(clk)
759 # variable wr_index : tlb_index_t;
761 # if rising_edge(clk) then
762 # wr_index := hash_ea(m_in.addr);
764 # (m_in.tlbie = '1' and m_in.doall = '1') then
765 # -- clear all valid bits
766 # for i in tlb_index_t loop
767 # itlb_valids(i) <= '0';
769 # elsif m_in.tlbie = '1' then
770 # -- clear entry regardless of hit or miss
771 # itlb_valids(wr_index) <= '0';
772 # elsif m_in.tlbld = '1' then
773 # itlb_tags(wr_index) <=
774 # m_in.addr(63 downto TLB_LG_PGSZ + TLB_BITS);
775 # itlb_ptes(wr_index) <= m_in.pte;
776 # itlb_valids(wr_index) <= '1';
781 def itlb_update(self
, m
):
784 wr_index
= Signal(TLB_SIZE
)
785 sync
+= wr_index
.eq(hash_ea(m_in
.addr
))
787 with m
.If('''TODO rst in nmigen''' |
(m_in
.tlbie
& m_in
.doall
)):
788 # Clear all valid bits
789 for i
in range(TLB_SIZE
):
790 sync
+= itlb_vlaids
[i
].eq(0)
792 with m
.Elif(m_in
.tlbie
):
793 # Clear entry regardless of hit or miss
794 sync
+= itlb_valid_bits
[wr_index
].eq(0)
796 with m
.Elif(m_in
.tlbld
):
797 sync
+= itlb_tags
[wr_index
].eq(
798 m_in
.addr
[TLB_LG_PGSZ
+ TLB_BITS
:64]
800 sync
+= itlb_ptes
[wr_index
].eq(m_in
.pte
)
801 sync
+= itlb_valid_bits
[wr_index
].eq(1)
803 # -- Cache hit detection, output to fetch2 and other misc logic
804 # icache_comb : process(all)
805 # Cache hit detection, output to fetch2 and other misc logic
806 def icache_comb(self
, m
):
807 # variable is_hit : std_ulogic;
808 # variable hit_way : way_t;
812 hit_way
= Signal(NUM_WAYS
)
814 # -- i_in.sequential means that i_in.nia this cycle
815 # -- is 4 more than last cycle. If we read more
816 # -- than 32 bits at a time, had a cache hit last
817 # -- cycle, and we don't want the first 32-bit chunk
818 # -- then we can keep the data we read last cycle
819 # -- and just use that.
820 # if unsigned(i_in.nia(INSN_BITS+2-1 downto 2)) /= 0 then
821 # use_previous <= i_in.sequential and r.hit_valid;
823 # use_previous <= '0';
825 # i_in.sequential means that i_in.nia this cycle is 4 more than
826 # last cycle. If we read more than 32 bits at a time, had a
827 # cache hit last cycle, and we don't want the first 32-bit chunk
828 # then we can keep the data we read last cycle and just use that.
829 with m
.If(i_in
.nia
[2:INSN_BITS
+2] != 0):
830 comb
+= use_previous
.eq(i_in
.sequential
& r
.hit_valid
)
833 comb
+= use_previous
.eq(0)
835 # -- Extract line, row and tag from request
836 # req_index <= get_index(i_in.nia);
837 # req_row <= get_row(i_in.nia);
838 # req_tag <= get_tag(real_addr);
839 # Extract line, row and tag from request
840 comb
+= req_index
.eq(get_index(i_in
.nia
))
841 comb
+= req_row
.eq(get_row(i_in
.nia
))
842 comb
+= req_tag
.eq(get_tag(real_addr
))
844 # -- Calculate address of beginning of cache row, will be
845 # -- used for cache miss processing if needed
847 # (63 downto REAL_ADDR_BITS => '0') &
848 # real_addr(REAL_ADDR_BITS - 1 downto ROW_OFF_BITS) &
849 # (ROW_OFF_BITS-1 downto 0 => '0');
850 # Calculate address of beginning of cache row, will be
851 # used for cache miss processing if needed
852 comb
+= req_laddr
.eq(Cat(
853 Const(0b0, ROW_OFF_BITS
),
854 real_addr
[ROW_OFF_BITS
:REAL_ADDR_BITS
],
855 Const(0, REAL_ADDR_BITS
)
858 # -- Test if pending request is a hit on any way
861 # for i in way_t loop
862 # if i_in.req = '1' and
863 # (cache_valids(req_index)(i) = '1' or
864 # (r.state = WAIT_ACK and
865 # req_index = r.store_index and
866 # i = r.store_way and
867 # r.rows_valid(req_row mod ROW_PER_LINE) = '1')) then
868 # if read_tag(i, cache_tags(req_index)) = req_tag then
874 # Test if pending request is a hit on any way
875 for i
in range(NUM_WAYS
):
877 (cache_valid_bits
[req_index
][i
] |
878 ((r
.state
== State
.WAIT_ACK
)
879 & (req_index
== r
.store_index
)
881 & r
.rows_valid
[req_row
% ROW_PER_LINE
]))):
882 with m
.If(read_tag(i
, cahce_tags
[req_index
]) == req_tag
):
883 comb
+= hit_way
.eq(i
)
886 # -- Generate the "hit" and "miss" signals
887 # -- for the synchronous blocks
888 # if i_in.req = '1' and access_ok = '1' and flush_in = '0'
890 # req_is_hit <= is_hit;
891 # req_is_miss <= not is_hit;
894 # req_is_miss <= '0';
896 # req_hit_way <= hit_way;
897 # Generate the "hit" and "miss" signals
898 # for the synchronous blocks
899 with m
.If(i_in
.rq
& access_ok
& ~flush_in
):
900 comb
+= req_is_hit
.eq(is_hit
)
901 comb
+= req_is_miss
.eq(~is_hit
)
904 comb
+= req_is_hit
.eq(0)
905 comb
+= req_is_miss
.eq(0)
907 # -- The way to replace on a miss
908 # if r.state = CLR_TAG then
910 # to_integer(unsigned(plru_victim(r.store_index)));
912 # replace_way <= r.store_way;
914 # The way to replace on a miss
915 with m
.If(r
.state
== State
.CLR_TAG
):
916 comb
+= replace_way
.eq(plru_victim
[r
.store_index
])
919 comb
+= replace_way
.eq(r
.store_way
)
921 # -- Output instruction from current cache row
923 # -- Note: This is a mild violation of our design principle of
924 # -- having pipeline stages output from a clean latch. In this
925 # -- case we output the result of a mux. The alternative would
926 # -- be output an entire row which I prefer not to do just yet
927 # -- as it would force fetch2 to know about some of the cache
928 # -- geometry information.
929 # i_out.insn <= read_insn_word(r.hit_nia, cache_out(r.hit_way));
930 # i_out.valid <= r.hit_valid;
931 # i_out.nia <= r.hit_nia;
932 # i_out.stop_mark <= r.hit_smark;
933 # i_out.fetch_failed <= r.fetch_failed;
934 # Output instruction from current cache row
936 # Note: This is a mild violation of our design principle of
937 # having pipeline stages output from a clean latch. In this
938 # case we output the result of a mux. The alternative would
939 # be output an entire row which I prefer not to do just yet
940 # as it would force fetch2 to know about some of the cache
941 # geometry information.
942 comb
+= i_out
.insn
.eq(
943 read_insn_word(r
.hit_nia
, cache_out
[r
.hit_way
])
945 comb
+= i_out
.valid
.eq(r
.hit_valid
)
946 comb
+= i_out
.nia
.eq(r
.hit_nia
)
947 comb
+= i_out
.stop_mark
.eq(r
.hit_smark
)
948 comb
+= i_out
.fetch_failed
.eq(r
.fetch_failed
)
950 # -- Stall fetch1 if we have a miss on cache or TLB
951 # -- or a protection fault
952 # stall_out <= not (is_hit and access_ok);
953 # Stall fetch1 if we have a miss on cache or TLB
954 # or a protection fault
955 comb
+= stall_out
.eq(~
(is_hit
& access_ok
))
957 # -- Wishbone requests output (from the cache miss reload machine)
958 # wishbone_out <= r.wb;
959 # Wishbone requests output (from the cache miss reload machine)
960 comb
+= wb_out
.eq(r
.wb
)
963 # -- Cache hit synchronous machine
964 # icache_hit : process(clk)
965 # Cache hit synchronous machine
966 def icache_hit(self
, m
):
969 # if rising_edge(clk) then
970 # -- keep outputs to fetch2 unchanged on a stall
971 # -- except that flush or reset sets valid to 0
972 # -- If use_previous, keep the same data as last
973 # -- cycle and use the second half
974 # if stall_in = '1' or use_previous = '1' then
975 # if rst = '1' or flush_in = '1' then
976 # r.hit_valid <= '0';
978 # keep outputs to fetch2 unchanged on a stall
979 # except that flush or reset sets valid to 0
980 # If use_previous, keep the same data as last
981 # cycle and use the second half
982 with m
.If(stall_in | use_previous
):
983 with m
.If('''TODO rst nmigen''' | flush_in
):
984 sync
+= r
.hit_valid
.eq(0)
986 # -- On a hit, latch the request for the next cycle,
987 # -- when the BRAM data will be available on the
988 # -- cache_out output of the corresponding way
989 # r.hit_valid <= req_is_hit;
990 # if req_is_hit = '1' then
991 # r.hit_way <= req_hit_way;
993 # On a hit, latch the request for the next cycle,
994 # when the BRAM data will be available on the
995 # cache_out output of the corresponding way
996 sync
+= r
.hit_valid
.eq(req_is_hit
)
998 with m
.If(req_is_hit
):
999 sync
+= r
.hit_way
.eq(req_hit_way
)
1001 # report "cache hit nia:" & to_hstring(i_in.nia) &
1002 # " IR:" & std_ulogic'image(i_in.virt_mode) &
1003 # " SM:" & std_ulogic'image(i_in.stop_mark) &
1004 # " idx:" & integer'image(req_index) &
1005 # " tag:" & to_hstring(req_tag) &
1006 # " way:" & integer'image(req_hit_way) &
1007 # " RA:" & to_hstring(real_addr);
1008 print(f
"cache hit nia:{i_in.nia}, " \
1009 f
"IR:{i_in.virt_mode}, " \
1010 f
"SM:{i_in.stop_mark}, idx:{req_index}, " \
1011 f
"tag:{req_tag}, way:{req_hit_way}, " \
1015 # if stall_in = '0' then
1016 # -- Send stop marks and NIA down regardless of validity
1017 # r.hit_smark <= i_in.stop_mark;
1018 # r.hit_nia <= i_in.nia;
1020 with m
.If(~stall_in
):
1021 # Send stop marks and NIA down regardless of validity
1022 sync
+= r
.hit_smark
.eq(i_in
.stop_mark
)
1023 sync
+= r
.hit_nia
.eq(i_in
.nia
)
1027 # -- Cache miss/reload synchronous machine
1028 # icache_miss : process(clk)
1029 # Cache miss/reload synchronous machine
1030 def icache_miss(self
, m
):
1034 # variable tagset : cache_tags_set_t;
1035 # variable stbs_done : boolean;
1037 tagset
= Signal(TAG_RAM_WIDTH
)
1038 stbs_done
= Signal()
1041 # if rising_edge(clk) then
1042 # -- On reset, clear all valid bits to force misses
1044 # On reset, clear all valid bits to force misses
1045 with m
.If('''TODO rst nmigen'''):
1046 # for i in index_t loop
1047 # cache_valids(i) <= (others => '0');
1049 for i
in Signal(NUM_LINES
):
1050 sync
+= cache_valid_bits
[i
].eq(~
1)
1055 sync
+= r
.state
.eq(State
.IDLE
)
1056 sync
+= r
.wb
.cyc
.eq(0)
1057 sync
+= r
.wb
.stb
.eq(0)
1059 # -- We only ever do reads on wishbone
1060 # r.wb.dat <= (others => '0');
1061 # r.wb.sel <= "11111111";
1063 # We only ever do reads on wishbone
1064 sync
+= r
.wb
.dat
.eq(~
1)
1065 sync
+= r
.wb
.sel
.eq(Const(0b11111111, 8))
1066 sync
+= r
.wb
.we
.eq(0)
1068 # -- Not useful normally but helps avoiding
1069 # -- tons of sim warnings
1070 # r.wb.adr <= (others => '0');
1071 # Not useful normally but helps avoiding tons of sim warnings
1072 sync
+= r
.wb
.adr
.eq(~
1)
1076 # -- Process cache invalidations
1077 # if inval_in = '1' then
1078 # for i in index_t loop
1079 # cache_valids(i) <= (others => '0');
1081 # r.store_valid <= '0';
1083 # Process cache invalidations
1084 with m
.If(inval_in
):
1085 for i
in range(NUM_LINES
):
1086 sync
+= cache_valid_bits
[i
].eq(~
1)
1088 sync
+= r
.store_valid
.eq(0)
1090 # -- Main state machine
1092 # Main state machine
1093 with m
.Switch(r
.state
):
1096 with m
.Case(State
.IDLE
):
1097 # -- Reset per-row valid flags,
1098 # -- only used in WAIT_ACK
1099 # for i in 0 to ROW_PER_LINE - 1 loop
1100 # r.rows_valid(i) <= '0';
1102 # Reset per-row valid flags,
1103 # only used in WAIT_ACK
1104 for i
in range(ROW_PER_LINE
):
1105 sync
+= r
.rows_valid
[i
].eq(0)
1107 # -- We need to read a cache line
1108 # if req_is_miss = '1' then
1109 # report "cache miss nia:" & to_hstring(i_in.nia) &
1110 # " IR:" & std_ulogic'image(i_in.virt_mode) &
1111 # " SM:" & std_ulogic'image(i_in.stop_mark) &
1112 # " idx:" & integer'image(req_index) &
1113 # " way:" & integer'image(replace_way) &
1114 # " tag:" & to_hstring(req_tag) &
1115 # " RA:" & to_hstring(real_addr);
1116 # We need to read a cache line
1117 with m
.If(req_is_miss
):
1118 print(f
"cache miss nia:{i_in.nia} " \
1119 f
"IR:{i_in.virt_mode} " \
1120 f
"SM:{i_in.stop_mark} " \
1121 F
"idx:{req_index} " \
1122 f
"way:{replace_way} tag:{req_tag} " \
1125 # -- Keep track of our index and way for
1126 # -- subsequent stores
1127 # r.store_index <= req_index;
1128 # r.store_row <= get_row(req_laddr);
1129 # r.store_tag <= req_tag;
1130 # r.store_valid <= '1';
1132 # get_row_of_line(get_row(req_laddr)) - 1;
1133 # Keep track of our index and way
1134 # for subsequent stores
1135 sync
+= r
.store_index
.eq(req_index
)
1136 sync
+= r
.store_row
.eq(get_row(req_laddr
))
1137 sync
+= r
.store_tag
.eq(req_tag
)
1138 sync
+= r
.store_valid
.eq(1)
1139 sync
+= r
.end_row_ix
.eq(
1145 # -- Prep for first wishbone read. We calculate the
1146 # -- address of the start of the cache line and
1147 # -- start the WB cycle.
1148 # r.wb.adr <= req_laddr(r.wb.adr'left downto 0);
1151 # Prep for first wishbone read.
1153 # address of the start of the cache line and
1154 # start the WB cycle.
1155 sync
+= r
.wb
.adr
.eq(
1156 req_laddr
[:r
.wb
.adr
]
1159 # -- Track that we had one request sent
1160 # r.state <= CLR_TAG;
1161 # Track that we had one request sent
1162 sync
+= r
.state
.eq(State
.CLR_TAG
)
1165 # when CLR_TAG | WAIT_ACK =>
1166 with m
.Case(State
.CLR_TAG
, State
.WAIT_ACK
):
1167 # if r.state = CLR_TAG then
1168 with m
.If(r
.state
== State
.CLR_TAG
):
1169 # -- Get victim way from plru
1170 # r.store_way <= replace_way;
1171 # Get victim way from plru
1172 sync
+= r
.store_way
.eq(replace_way
)
1174 # -- Force misses on that way while
1175 # -- reloading that line
1176 # cache_valids(req_index)(replace_way) <= '0';
1177 # Force misses on that way while
1178 # realoading that line
1179 sync
+= cache_valid_bits
[
1181 ][replace_way
].eq(0)
1183 # -- Store new tag in selected way
1184 # for i in 0 to NUM_WAYS-1 loop
1185 # if i = replace_way then
1186 # tagset := cache_tags(r.store_index);
1187 # write_tag(i, tagset, r.store_tag);
1188 # cache_tags(r.store_index) <= tagset;
1191 for i
in range(NUM_WAYS
):
1192 with m
.If(i
== replace_way
):
1194 cache_tags
[r
.store_index
]
1197 i
, tagset
, r
.store_tag
1199 sync
+= cache_tags(r
.store_index
).eq(
1203 # r.state <= WAIT_ACK;
1204 sync
+= r
.state
.eq(State
.WAIT_ACK
)
1207 # -- Requests are all sent if stb is 0
1208 # stbs_done := r.wb.stb = '0';
1209 # Requests are all sent if stb is 0
1210 comb
+= stbs_done
.eq(r
.wb
.stb
== 0)
1212 # -- If we are still sending requests,
1213 # -- was one accepted ?
1214 # if wishbone_in.stall = '0' and not stbs_done then
1215 # If we are still sending requests,
1217 with m
.If(~wb_in
.stall
& ~stbs_done
):
1218 # -- That was the last word ? We are done sending.
1219 # -- Clear stb and set stbs_done so we can handle
1220 # -- an eventual last ack on the same cycle.
1221 # if is_last_row_addr(r.wb.adr, r.end_row_ix) then
1223 # stbs_done := true;
1225 # That was the last word ?
1226 # We are done sending.
1227 # Clear stb and set stbs_done
1229 # an eventual last ack on
1231 with m
.If(is_last_row_addr(
1232 r
.wb
.adr
, r
.end_row_ix
)):
1233 sync
+= r
.wb
.stb
.eq(0)
1236 # -- Calculate the next row address
1237 # r.wb.adr <= next_row_addr(r.wb.adr);
1238 # Calculate the next row address
1239 sync
+= r
.wb
.adr
.eq(next_row_addr(r
.wb
.adr
))
1242 # -- Incoming acks processing
1243 # if wishbone_in.ack = '1' then
1244 # Incoming acks processing
1245 with m
.If(wb_in
.ack
):
1246 # r.rows_valid(r.store_row mod ROW_PER_LINE)
1248 sync
+= r
.rows_valid
[
1249 r
.store_row
& ROW_PER_LINE
1252 # -- Check for completion
1254 # is_last_row(r.store_row, r.end_row_ix) then
1255 # Check for completion
1256 with m
.If(stbs_done
& is_last_row(
1257 r
.store_row
, r
.end_row_ix
)):
1258 # -- Complete wishbone cycle
1260 # Complete wishbone cycle
1261 sync
+= r
.wb
.cyc
.eq(0)
1263 # -- Cache line is now valid
1264 # cache_valids(r.store_index)(replace_way) <=
1265 # r.store_valid and not inval_in;
1266 # Cache line is now valid
1267 sync
+= cache_valid_bits
[
1270 r
.store_valid
& ~inval_in
1276 sync
+= r
.state
.eq(State
.IDLE
)
1279 # -- Increment store row counter
1280 # r.store_row <= next_row(r.store_row);
1281 # Increment store row counter
1282 sync
+= store_row
.eq(next_row(r
.store_row
))
1287 # -- TLB miss and protection fault processing
1288 # if rst = '1' or flush_in = '1' or m_in.tlbld = '1' then
1289 # r.fetch_failed <= '0';
1290 # elsif i_in.req = '1' and access_ok = '0' and
1291 # stall_in = '0' then
1292 # r.fetch_failed <= '1';
1294 # TLB miss and protection fault processing
1295 with m
.If('''TODO nmigen rst''' | flush_in | m_in
.tlbld
):
1296 sync
+= r
.fetch_failed
.eq(0)
1298 with m
.Elif(i_in
.req
& ~access_ok
& ~stall_in
):
1299 sync
+= r
.fetch_failed
.eq(1)
1303 # icache_log: if LOG_LENGTH > 0 generate
1304 def icache_log(self
, m
, log_out
):
1308 # -- Output data to logger
1309 # signal log_data : std_ulogic_vector(53 downto 0);
1311 # data_log: process(clk)
1312 # variable lway: way_t;
1313 # variable wstate: std_ulogic;
1314 # Output data to logger
1315 for i
in range(LOG_LENGTH
):
1316 # Output data to logger
1317 log_data
= Signal(54)
1318 lway
= Signal(NUM_WAYS
)
1322 # if rising_edge(clk) then
1323 # lway := req_hit_way;
1325 comb
+= lway
.eq(req_hit_way
)
1326 comb
+= wstate
.eq(0)
1328 # if r.state /= IDLE then
1331 with m
.If(r
.state
!= State
.IDLE
):
1332 comb
+= wstate
.eq(1)
1334 # log_data <= i_out.valid &
1337 # r.wb.adr(5 downto 3) &
1338 # r.wb.stb & r.wb.cyc &
1339 # wishbone_in.stall &
1342 # r.hit_nia(5 downto 2) &
1344 # std_ulogic_vector(to_unsigned(lway, 3)) &
1345 # req_is_hit & req_is_miss &
1348 sync
+= log_data
.eq(Cat(
1349 ra_valid
, access_ok
, req_is_miss
, req_is_hit
,
1350 lway
, wstate
, r
.hit_nia
[2:6],
1351 r
.fetch_failed
, stall_out
, wb_in
.stall
, r
.wb
.cyc
,
1352 r
.wb
.stb
, r
.wb
.adr
[3:6], wb_in
.ack
, i_out
.insn
,
1357 # log_out <= log_data;
1358 comb
+= log_out
.eq(log_data
)
1362 def elaborate(self
, platform
):
1368 # -- Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1369 # signal cache_tags : cache_tags_array_t;
1370 # signal cache_valids : cache_valids_t;
1371 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1372 cache_tags
= CacheTagArray()
1373 cache_valid_bits
= CacheValidBitsArray()
1375 # signal itlb_valids : tlb_valids_t;
1376 # signal itlb_tags : tlb_tags_t;
1377 # signal itlb_ptes : tlb_ptes_t;
1378 # attribute ram_style of itlb_tags : signal is "distributed";
1379 # attribute ram_style of itlb_ptes : signal is "distributed";
1380 itlb_valid_bits
= TLBValidBitsArray()
1381 itlb_tags
= TLBTagArray()
1382 itlb_ptes
= TLBPTEArray()
1383 # TODO to be passed to nmigen as ram attributes
1384 # attribute ram_style of itlb_tags : signal is "distributed";
1385 # attribute ram_style of itlb_ptes : signal is "distributed";
1387 # -- Privilege bit from PTE EAA field
1388 # signal eaa_priv : std_ulogic;
1389 # Privilege bit from PTE EAA field
1392 # signal r : reg_internal_t;
1395 # -- Async signals on incoming request
1396 # signal req_index : index_t;
1397 # signal req_row : row_t;
1398 # signal req_hit_way : way_t;
1399 # signal req_tag : cache_tag_t;
1400 # signal req_is_hit : std_ulogic;
1401 # signal req_is_miss : std_ulogic;
1402 # signal req_laddr : std_ulogic_vector(63 downto 0);
1403 # Async signal on incoming request
1404 req_index
= Signal(NUM_LINES
)
1405 req_row
= Signal(BRAM_ROWS
)
1406 req_hit_way
= Signal(NUM_WAYS
)
1407 req_tag
= Signal(TAG_BITS
)
1408 req_is_hit
= Signal()
1409 req_is_miss
= Signal()
1410 req_laddr
= Signal(64)
1412 # signal tlb_req_index : tlb_index_t;
1413 # signal real_addr : std_ulogic_vector(
1414 # REAL_ADDR_BITS - 1 downto 0
1416 # signal ra_valid : std_ulogic;
1417 # signal priv_fault : std_ulogic;
1418 # signal access_ok : std_ulogic;
1419 # signal use_previous : std_ulogic;
1420 tlb_req_index
= Signal(TLB_SIZE
)
1421 real_addr
= Signal(REAL_ADDR_BITS
)
1423 priv_fault
= Signal()
1424 access_ok
= Signal()
1425 use_previous
= Signal()
1427 # signal cache_out : cache_ram_out_t;
1428 cache_out
= CacheRamOut()
1430 # signal plru_victim : plru_out_t;
1431 # signal replace_way : way_t;
1432 plru_victim
= PLRUOut()
1433 replace_way
= Signal(NUM_WAYS
)
1440 # use ieee.std_logic_1164.all;
1443 # use work.common.all;
1444 # use work.wishbone_types.all;
1446 # entity icache_tb is
1449 # architecture behave of icache_tb is
1450 # signal clk : std_ulogic;
1451 # signal rst : std_ulogic;
1453 # signal i_out : Fetch1ToIcacheType;
1454 # signal i_in : IcacheToDecode1Type;
1456 # signal m_out : MmuToIcacheType;
1458 # signal wb_bram_in : wishbone_master_out;
1459 # signal wb_bram_out : wishbone_slave_out;
1461 # constant clk_period : time := 10 ns;
1463 # icache0: entity work.icache
1477 # wishbone_out => wb_bram_in,
1478 # wishbone_in => wb_bram_out
1481 # -- BRAM Memory slave
1482 # bram0: entity work.wishbone_bram_wrapper
1484 # MEMORY_SIZE => 1024,
1485 # RAM_INIT_FILE => "icache_test.bin"
1490 # wishbone_in => wb_bram_in,
1491 # wishbone_out => wb_bram_out
1494 # clk_process: process
1497 # wait for clk_period/2;
1499 # wait for clk_period/2;
1502 # rst_process: process
1505 # wait for 2*clk_period;
1513 # i_out.nia <= (others => '0');
1514 # i_out.stop_mark <= '0';
1516 # m_out.tlbld <= '0';
1517 # m_out.tlbie <= '0';
1518 # m_out.addr <= (others => '0');
1519 # m_out.pte <= (others => '0');
1521 # wait until rising_edge(clk);
1522 # wait until rising_edge(clk);
1523 # wait until rising_edge(clk);
1524 # wait until rising_edge(clk);
1527 # i_out.nia <= x"0000000000000004";
1529 # wait for 30*clk_period;
1530 # wait until rising_edge(clk);
1532 # assert i_in.valid = '1' severity failure;
1533 # assert i_in.insn = x"00000001"
1534 # report "insn @" & to_hstring(i_out.nia) &
1535 # "=" & to_hstring(i_in.insn) &
1536 # " expected 00000001"
1541 # wait until rising_edge(clk);
1545 # i_out.nia <= x"0000000000000008";
1546 # wait until rising_edge(clk);
1547 # wait until rising_edge(clk);
1548 # assert i_in.valid = '1' severity failure;
1549 # assert i_in.insn = x"00000002"
1550 # report "insn @" & to_hstring(i_out.nia) &
1551 # "=" & to_hstring(i_in.insn) &
1552 # " expected 00000002"
1554 # wait until rising_edge(clk);
1558 # i_out.nia <= x"0000000000000040";
1560 # wait for 30*clk_period;
1561 # wait until rising_edge(clk);
1563 # assert i_in.valid = '1' severity failure;
1564 # assert i_in.insn = x"00000010"
1565 # report "insn @" & to_hstring(i_out.nia) &
1566 # "=" & to_hstring(i_in.insn) &
1567 # " expected 00000010"
1570 # -- test something that aliases
1572 # i_out.nia <= x"0000000000000100";
1573 # wait until rising_edge(clk);
1574 # wait until rising_edge(clk);
1575 # assert i_in.valid = '0' severity failure;
1576 # wait until rising_edge(clk);
1578 # wait for 30*clk_period;
1579 # wait until rising_edge(clk);
1581 # assert i_in.valid = '1' severity failure;
1582 # assert i_in.insn = x"00000040"
1583 # report "insn @" & to_hstring(i_out.nia) &
1584 # "=" & to_hstring(i_in.insn) &
1585 # " expected 00000040"
1593 def icache_sim(dut
):
1594 i_out
, i_in
, m_out
, m_in
= dut
.i_out
, dut
.i_in
, dut
.m_out
, dut
.m_in
1596 yield i_out
.req
.eq(0)
1597 yield i_out
.nia
.eq(~
1)
1598 yield i_out
.stop_mark
.eq(0)
1599 yield m_out
.tlbld
.eq(0)
1600 yield m_out
.tlbie
.eq(0)
1601 yield m_out
.addr
.eq(~
1)
1602 yield m_out
.pte
.eq(~
1)
1607 yield i_out
.req
.eq(1)
1608 yield i_out
.nia
.eq(Const(0x0000000000000004, 64))
1613 assert i_in
.insn
== Const(0x00000001, 32), \
1614 ("insn @%x=%x expected 00000001" % i_out
.nia
, i_in
.insn
)
1615 yield i_out
.req
.eq(0)
1619 yield i_out
.req
.eq(1)
1620 yield i_out
.nia
.eq(Const(0x0000000000000008, 64))
1624 assert i_in
.insn
== Const(0x00000002, 32), \
1625 ("insn @%x=%x expected 00000002" % i_out
.nia
, i_in
.insn
)
1630 yield i_out
.nia
.eq(Const(0x0000000000000040, 64))
1635 assert i_in
.insn
== Const(0x00000010, 32), \
1636 ("insn @%x=%x expected 00000010" % i_out
.nia
, i_in
.insn
)
1638 # test something that aliases
1639 yield i_out
.req
.eq(1)
1640 yield i_out
.nia
.eq(Const(0x0000000000000100, 64))
1648 assert i_in
.insn
== Const(0x00000040, 32), \
1649 ("insn @%x=%x expected 00000040" % i_out
.nia
, i_in
.insn
)
1650 yield i_out
.req
.eq(0)
1655 vl
= rtlil
.convert(dut
, ports
=[])
1656 with
open("test_icache.il", "w") as f
:
1659 #run_simulation(dut, icache_sim(), vcd_name='test_icache.vcd')
1661 if __name__
== '__main__':