dcache: Ease timing on calculation of acks remaining
[microwatt.git] / dcache.vhdl
1 --
2 -- Set associative dcache write-through
3 --
4 -- TODO (in no specific order):
5 --
6 -- * See list in icache.vhdl
7 -- * Complete load misses on the cycle when WB data comes instead of
8 -- at the end of line (this requires dealing with requests coming in
9 -- while not idle...)
10 --
11 library ieee;
12 use ieee.std_logic_1164.all;
13 use ieee.numeric_std.all;
14
15 library work;
16 use work.utils.all;
17 use work.common.all;
18 use work.helpers.all;
19 use work.wishbone_types.all;
20
21 entity dcache is
22 generic (
23 -- Line size in bytes
24 LINE_SIZE : positive := 64;
25 -- Number of lines in a set
26 NUM_LINES : positive := 32;
27 -- Number of ways
28 NUM_WAYS : positive := 4;
29 -- L1 DTLB entries per set
30 TLB_SET_SIZE : positive := 64;
31 -- L1 DTLB number of sets
32 TLB_NUM_WAYS : positive := 2;
33 -- L1 DTLB log_2(page_size)
34 TLB_LG_PGSZ : positive := 12;
35 -- Non-zero to enable log data collection
36 LOG_LENGTH : natural := 0
37 );
38 port (
39 clk : in std_ulogic;
40 rst : in std_ulogic;
41
42 d_in : in Loadstore1ToDcacheType;
43 d_out : out DcacheToLoadstore1Type;
44
45 m_in : in MmuToDcacheType;
46 m_out : out DcacheToMmuType;
47
48 stall_out : out std_ulogic;
49
50 wishbone_out : out wishbone_master_out;
51 wishbone_in : in wishbone_slave_out;
52
53 log_out : out std_ulogic_vector(19 downto 0)
54 );
55 end entity dcache;
56
57 architecture rtl of dcache is
58 -- BRAM organisation: We never access more than wishbone_data_bits at
59 -- a time so to save resources we make the array only that wide, and
60 -- use consecutive indices for to make a cache "line"
61 --
62 -- ROW_SIZE is the width in bytes of the BRAM (based on WB, so 64-bits)
63 constant ROW_SIZE : natural := wishbone_data_bits / 8;
64 -- ROW_PER_LINE is the number of row (wishbone transactions) in a line
65 constant ROW_PER_LINE : natural := LINE_SIZE / ROW_SIZE;
66 -- BRAM_ROWS is the number of rows in BRAM needed to represent the full
67 -- dcache
68 constant BRAM_ROWS : natural := NUM_LINES * ROW_PER_LINE;
69
70 -- Bit fields counts in the address
71
72 -- REAL_ADDR_BITS is the number of real address bits that we store
73 constant REAL_ADDR_BITS : positive := 56;
74 -- ROW_BITS is the number of bits to select a row
75 constant ROW_BITS : natural := log2(BRAM_ROWS);
76 -- ROW_LINEBITS is the number of bits to select a row within a line
77 constant ROW_LINEBITS : natural := log2(ROW_PER_LINE);
78 -- LINE_OFF_BITS is the number of bits for the offset in a cache line
79 constant LINE_OFF_BITS : natural := log2(LINE_SIZE);
80 -- ROW_OFF_BITS is the number of bits for the offset in a row
81 constant ROW_OFF_BITS : natural := log2(ROW_SIZE);
82 -- INDEX_BITS is the number if bits to select a cache line
83 constant INDEX_BITS : natural := log2(NUM_LINES);
84 -- SET_SIZE_BITS is the log base 2 of the set size
85 constant SET_SIZE_BITS : natural := LINE_OFF_BITS + INDEX_BITS;
86 -- TAG_BITS is the number of bits of the tag part of the address
87 constant TAG_BITS : natural := REAL_ADDR_BITS - SET_SIZE_BITS;
88 -- TAG_WIDTH is the width in bits of each way of the tag RAM
89 constant TAG_WIDTH : natural := TAG_BITS + 7 - ((TAG_BITS + 7) mod 8);
90 -- WAY_BITS is the number of bits to select a way
91 constant WAY_BITS : natural := log2(NUM_WAYS);
92
93 -- Example of layout for 32 lines of 64 bytes:
94 --
95 -- .. tag |index| line |
96 -- .. | row | |
97 -- .. | |---| | ROW_LINEBITS (3)
98 -- .. | |--- - --| LINE_OFF_BITS (6)
99 -- .. | |- --| ROW_OFF_BITS (3)
100 -- .. |----- ---| | ROW_BITS (8)
101 -- .. |-----| | INDEX_BITS (5)
102 -- .. --------| | TAG_BITS (45)
103
104 subtype row_t is integer range 0 to BRAM_ROWS-1;
105 subtype index_t is integer range 0 to NUM_LINES-1;
106 subtype way_t is integer range 0 to NUM_WAYS-1;
107 subtype row_in_line_t is unsigned(ROW_LINEBITS-1 downto 0);
108
109 -- The cache data BRAM organized as described above for each way
110 subtype cache_row_t is std_ulogic_vector(wishbone_data_bits-1 downto 0);
111
112 -- The cache tags LUTRAM has a row per set. Vivado is a pain and will
113 -- not handle a clean (commented) definition of the cache tags as a 3d
114 -- memory. For now, work around it by putting all the tags
115 subtype cache_tag_t is std_logic_vector(TAG_BITS-1 downto 0);
116 -- type cache_tags_set_t is array(way_t) of cache_tag_t;
117 -- type cache_tags_array_t is array(index_t) of cache_tags_set_t;
118 constant TAG_RAM_WIDTH : natural := TAG_WIDTH * NUM_WAYS;
119 subtype cache_tags_set_t is std_logic_vector(TAG_RAM_WIDTH-1 downto 0);
120 type cache_tags_array_t is array(index_t) of cache_tags_set_t;
121
122 -- The cache valid bits
123 subtype cache_way_valids_t is std_ulogic_vector(NUM_WAYS-1 downto 0);
124 type cache_valids_t is array(index_t) of cache_way_valids_t;
125 type row_per_line_valid_t is array(0 to ROW_PER_LINE - 1) of std_ulogic;
126
127 -- Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
128 signal cache_tags : cache_tags_array_t;
129 signal cache_tag_set : cache_tags_set_t;
130 signal cache_valids : cache_valids_t;
131
132 attribute ram_style : string;
133 attribute ram_style of cache_tags : signal is "distributed";
134
135 -- L1 TLB.
136 constant TLB_SET_BITS : natural := log2(TLB_SET_SIZE);
137 constant TLB_WAY_BITS : natural := log2(TLB_NUM_WAYS);
138 constant TLB_EA_TAG_BITS : natural := 64 - (TLB_LG_PGSZ + TLB_SET_BITS);
139 constant TLB_TAG_WAY_BITS : natural := TLB_NUM_WAYS * TLB_EA_TAG_BITS;
140 constant TLB_PTE_BITS : natural := 64;
141 constant TLB_PTE_WAY_BITS : natural := TLB_NUM_WAYS * TLB_PTE_BITS;
142
143 subtype tlb_way_t is integer range 0 to TLB_NUM_WAYS - 1;
144 subtype tlb_index_t is integer range 0 to TLB_SET_SIZE - 1;
145 subtype tlb_way_valids_t is std_ulogic_vector(TLB_NUM_WAYS-1 downto 0);
146 type tlb_valids_t is array(tlb_index_t) of tlb_way_valids_t;
147 subtype tlb_tag_t is std_ulogic_vector(TLB_EA_TAG_BITS - 1 downto 0);
148 subtype tlb_way_tags_t is std_ulogic_vector(TLB_TAG_WAY_BITS-1 downto 0);
149 type tlb_tags_t is array(tlb_index_t) of tlb_way_tags_t;
150 subtype tlb_pte_t is std_ulogic_vector(TLB_PTE_BITS - 1 downto 0);
151 subtype tlb_way_ptes_t is std_ulogic_vector(TLB_PTE_WAY_BITS-1 downto 0);
152 type tlb_ptes_t is array(tlb_index_t) of tlb_way_ptes_t;
153 type hit_way_set_t is array(tlb_way_t) of way_t;
154
155 signal dtlb_valids : tlb_valids_t;
156 signal dtlb_tags : tlb_tags_t;
157 signal dtlb_ptes : tlb_ptes_t;
158 attribute ram_style of dtlb_tags : signal is "distributed";
159 attribute ram_style of dtlb_ptes : signal is "distributed";
160
161 -- Record for storing permission, attribute, etc. bits from a PTE
162 type perm_attr_t is record
163 reference : std_ulogic;
164 changed : std_ulogic;
165 nocache : std_ulogic;
166 priv : std_ulogic;
167 rd_perm : std_ulogic;
168 wr_perm : std_ulogic;
169 end record;
170
171 function extract_perm_attr(pte : std_ulogic_vector(TLB_PTE_BITS - 1 downto 0)) return perm_attr_t is
172 variable pa : perm_attr_t;
173 begin
174 pa.reference := pte(8);
175 pa.changed := pte(7);
176 pa.nocache := pte(5);
177 pa.priv := pte(3);
178 pa.rd_perm := pte(2);
179 pa.wr_perm := pte(1);
180 return pa;
181 end;
182
183 constant real_mode_perm_attr : perm_attr_t := (nocache => '0', others => '1');
184
185 -- Type of operation on a "valid" input
186 type op_t is (OP_NONE,
187 OP_BAD, -- NC cache hit, TLB miss, prot/RC failure
188 OP_STCX_FAIL, -- conditional store w/o reservation
189 OP_LOAD_HIT, -- Cache hit on load
190 OP_LOAD_MISS, -- Load missing cache
191 OP_LOAD_NC, -- Non-cachable load
192 OP_STORE_HIT, -- Store hitting cache
193 OP_STORE_MISS); -- Store missing cache
194
195 -- Cache state machine
196 type state_t is (IDLE, -- Normal load hit processing
197 RELOAD_WAIT_ACK, -- Cache reload wait ack
198 STORE_WAIT_ACK, -- Store wait ack
199 NC_LOAD_WAIT_ACK);-- Non-cachable load wait ack
200
201
202 --
203 -- Dcache operations:
204 --
205 -- In order to make timing, we use the BRAMs with an output buffer,
206 -- which means that the BRAM output is delayed by an extra cycle.
207 --
208 -- Thus, the dcache has a 2-stage internal pipeline for cache hits
209 -- with no stalls.
210 --
211 -- All other operations are handled via stalling in the first stage.
212 --
213 -- The second stage can thus complete a hit at the same time as the
214 -- first stage emits a stall for a complex op.
215 --
216
217 -- Stage 0 register, basically contains just the latched request
218 type reg_stage_0_t is record
219 req : Loadstore1ToDcacheType;
220 tlbie : std_ulogic;
221 doall : std_ulogic;
222 tlbld : std_ulogic;
223 mmu_req : std_ulogic; -- indicates source of request
224 end record;
225
226 signal r0 : reg_stage_0_t;
227 signal r0_full : std_ulogic;
228
229 type mem_access_request_t is record
230 op : op_t;
231 valid : std_ulogic;
232 dcbz : std_ulogic;
233 real_addr : std_ulogic_vector(REAL_ADDR_BITS - 1 downto 0);
234 data : std_ulogic_vector(63 downto 0);
235 byte_sel : std_ulogic_vector(7 downto 0);
236 hit_way : way_t;
237 same_tag : std_ulogic;
238 mmu_req : std_ulogic;
239 end record;
240
241 -- First stage register, contains state for stage 1 of load hits
242 -- and for the state machine used by all other operations
243 --
244 type reg_stage_1_t is record
245 -- Info about the request
246 full : std_ulogic; -- have uncompleted request
247 mmu_req : std_ulogic; -- request is from MMU
248 req : mem_access_request_t;
249
250 -- Cache hit state
251 hit_way : way_t;
252 hit_load_valid : std_ulogic;
253 hit_index : index_t;
254 cache_hit : std_ulogic;
255
256 -- TLB hit state
257 tlb_hit : std_ulogic;
258 tlb_hit_way : tlb_way_t;
259 tlb_hit_index : tlb_index_t;
260
261 -- 2-stage data buffer for data forwarded from writes to reads
262 forward_data1 : std_ulogic_vector(63 downto 0);
263 forward_data2 : std_ulogic_vector(63 downto 0);
264 forward_sel1 : std_ulogic_vector(7 downto 0);
265 forward_valid1 : std_ulogic;
266 forward_way1 : way_t;
267 forward_row1 : row_t;
268 use_forward1 : std_ulogic;
269 forward_sel : std_ulogic_vector(7 downto 0);
270
271 -- Cache miss state (reload state machine)
272 state : state_t;
273 dcbz : std_ulogic;
274 write_bram : std_ulogic;
275 write_tag : std_ulogic;
276 slow_valid : std_ulogic;
277 wb : wishbone_master_out;
278 reload_tag : cache_tag_t;
279 store_way : way_t;
280 store_row : row_t;
281 store_index : index_t;
282 end_row_ix : row_in_line_t;
283 rows_valid : row_per_line_valid_t;
284 acks_pending : unsigned(2 downto 0);
285 inc_acks : std_ulogic;
286 dec_acks : std_ulogic;
287
288 -- Signals to complete (possibly with error)
289 ls_valid : std_ulogic;
290 mmu_done : std_ulogic;
291 error_done : std_ulogic;
292 cache_paradox : std_ulogic;
293
294 -- Signal to complete a failed stcx.
295 stcx_fail : std_ulogic;
296 end record;
297
298 signal r1 : reg_stage_1_t;
299
300 -- Reservation information
301 --
302 type reservation_t is record
303 valid : std_ulogic;
304 addr : std_ulogic_vector(63 downto LINE_OFF_BITS);
305 end record;
306
307 signal reservation : reservation_t;
308
309 -- Async signals on incoming request
310 signal req_index : index_t;
311 signal req_row : row_t;
312 signal req_hit_way : way_t;
313 signal req_tag : cache_tag_t;
314 signal req_op : op_t;
315 signal req_data : std_ulogic_vector(63 downto 0);
316 signal req_same_tag : std_ulogic;
317 signal req_go : std_ulogic;
318
319 signal early_req_row : row_t;
320
321 signal cancel_store : std_ulogic;
322 signal set_rsrv : std_ulogic;
323 signal clear_rsrv : std_ulogic;
324
325 signal r0_valid : std_ulogic;
326 signal r0_stall : std_ulogic;
327
328 signal use_forward1_next : std_ulogic;
329 signal use_forward2_next : std_ulogic;
330
331 -- Cache RAM interface
332 type cache_ram_out_t is array(way_t) of cache_row_t;
333 signal cache_out : cache_ram_out_t;
334
335 -- PLRU output interface
336 type plru_out_t is array(index_t) of std_ulogic_vector(WAY_BITS-1 downto 0);
337 signal plru_victim : plru_out_t;
338 signal replace_way : way_t;
339
340 -- Wishbone read/write/cache write formatting signals
341 signal bus_sel : std_ulogic_vector(7 downto 0);
342
343 -- TLB signals
344 signal tlb_tag_way : tlb_way_tags_t;
345 signal tlb_pte_way : tlb_way_ptes_t;
346 signal tlb_valid_way : tlb_way_valids_t;
347 signal tlb_req_index : tlb_index_t;
348 signal tlb_hit : std_ulogic;
349 signal tlb_hit_way : tlb_way_t;
350 signal pte : tlb_pte_t;
351 signal ra : std_ulogic_vector(REAL_ADDR_BITS - 1 downto 0);
352 signal valid_ra : std_ulogic;
353 signal perm_attr : perm_attr_t;
354 signal rc_ok : std_ulogic;
355 signal perm_ok : std_ulogic;
356 signal access_ok : std_ulogic;
357
358 -- TLB PLRU output interface
359 type tlb_plru_out_t is array(tlb_index_t) of std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
360 signal tlb_plru_victim : tlb_plru_out_t;
361
362 --
363 -- Helper functions to decode incoming requests
364 --
365
366 -- Return the cache line index (tag index) for an address
367 function get_index(addr: std_ulogic_vector) return index_t is
368 begin
369 return to_integer(unsigned(addr(SET_SIZE_BITS - 1 downto LINE_OFF_BITS)));
370 end;
371
372 -- Return the cache row index (data memory) for an address
373 function get_row(addr: std_ulogic_vector) return row_t is
374 begin
375 return to_integer(unsigned(addr(SET_SIZE_BITS - 1 downto ROW_OFF_BITS)));
376 end;
377
378 -- Return the index of a row within a line
379 function get_row_of_line(row: row_t) return row_in_line_t is
380 variable row_v : unsigned(ROW_BITS-1 downto 0);
381 begin
382 row_v := to_unsigned(row, ROW_BITS);
383 return row_v(ROW_LINEBITS-1 downto 0);
384 end;
385
386 -- Returns whether this is the last row of a line
387 function is_last_row_addr(addr: wishbone_addr_type; last: row_in_line_t) return boolean is
388 begin
389 return unsigned(addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS)) = last;
390 end;
391
392 -- Returns whether this is the last row of a line
393 function is_last_row(row: row_t; last: row_in_line_t) return boolean is
394 begin
395 return get_row_of_line(row) = last;
396 end;
397
398 -- Return the address of the next row in the current cache line
399 function next_row_addr(addr: wishbone_addr_type) return std_ulogic_vector is
400 variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
401 variable result : wishbone_addr_type;
402 begin
403 -- Is there no simpler way in VHDL to generate that 3 bits adder ?
404 row_idx := addr(LINE_OFF_BITS-1 downto ROW_OFF_BITS);
405 row_idx := std_ulogic_vector(unsigned(row_idx) + 1);
406 result := addr;
407 result(LINE_OFF_BITS-1 downto ROW_OFF_BITS) := row_idx;
408 return result;
409 end;
410
411 -- Return the next row in the current cache line. We use a dedicated
412 -- function in order to limit the size of the generated adder to be
413 -- only the bits within a cache line (3 bits with default settings)
414 --
415 function next_row(row: row_t) return row_t is
416 variable row_v : std_ulogic_vector(ROW_BITS-1 downto 0);
417 variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
418 variable result : std_ulogic_vector(ROW_BITS-1 downto 0);
419 begin
420 row_v := std_ulogic_vector(to_unsigned(row, ROW_BITS));
421 row_idx := row_v(ROW_LINEBITS-1 downto 0);
422 row_v(ROW_LINEBITS-1 downto 0) := std_ulogic_vector(unsigned(row_idx) + 1);
423 return to_integer(unsigned(row_v));
424 end;
425
426 -- Get the tag value from the address
427 function get_tag(addr: std_ulogic_vector) return cache_tag_t is
428 begin
429 return addr(REAL_ADDR_BITS - 1 downto SET_SIZE_BITS);
430 end;
431
432 -- Read a tag from a tag memory row
433 function read_tag(way: way_t; tagset: cache_tags_set_t) return cache_tag_t is
434 begin
435 return tagset(way * TAG_WIDTH + TAG_BITS - 1 downto way * TAG_WIDTH);
436 end;
437
438 -- Read a TLB tag from a TLB tag memory row
439 function read_tlb_tag(way: tlb_way_t; tags: tlb_way_tags_t) return tlb_tag_t is
440 variable j : integer;
441 begin
442 j := way * TLB_EA_TAG_BITS;
443 return tags(j + TLB_EA_TAG_BITS - 1 downto j);
444 end;
445
446 -- Write a TLB tag to a TLB tag memory row
447 procedure write_tlb_tag(way: tlb_way_t; tags: inout tlb_way_tags_t;
448 tag: tlb_tag_t) is
449 variable j : integer;
450 begin
451 j := way * TLB_EA_TAG_BITS;
452 tags(j + TLB_EA_TAG_BITS - 1 downto j) := tag;
453 end;
454
455 -- Read a PTE from a TLB PTE memory row
456 function read_tlb_pte(way: tlb_way_t; ptes: tlb_way_ptes_t) return tlb_pte_t is
457 variable j : integer;
458 begin
459 j := way * TLB_PTE_BITS;
460 return ptes(j + TLB_PTE_BITS - 1 downto j);
461 end;
462
463 procedure write_tlb_pte(way: tlb_way_t; ptes: inout tlb_way_ptes_t; newpte: tlb_pte_t) is
464 variable j : integer;
465 begin
466 j := way * TLB_PTE_BITS;
467 ptes(j + TLB_PTE_BITS - 1 downto j) := newpte;
468 end;
469
470 begin
471
472 assert LINE_SIZE mod ROW_SIZE = 0 report "LINE_SIZE not multiple of ROW_SIZE" severity FAILURE;
473 assert ispow2(LINE_SIZE) report "LINE_SIZE not power of 2" severity FAILURE;
474 assert ispow2(NUM_LINES) report "NUM_LINES not power of 2" severity FAILURE;
475 assert ispow2(ROW_PER_LINE) report "ROW_PER_LINE not power of 2" severity FAILURE;
476 assert (ROW_BITS = INDEX_BITS + ROW_LINEBITS)
477 report "geometry bits don't add up" severity FAILURE;
478 assert (LINE_OFF_BITS = ROW_OFF_BITS + ROW_LINEBITS)
479 report "geometry bits don't add up" severity FAILURE;
480 assert (REAL_ADDR_BITS = TAG_BITS + INDEX_BITS + LINE_OFF_BITS)
481 report "geometry bits don't add up" severity FAILURE;
482 assert (REAL_ADDR_BITS = TAG_BITS + ROW_BITS + ROW_OFF_BITS)
483 report "geometry bits don't add up" severity FAILURE;
484 assert (64 = wishbone_data_bits)
485 report "Can't yet handle a wishbone width that isn't 64-bits" severity FAILURE;
486 assert SET_SIZE_BITS <= TLB_LG_PGSZ report "Set indexed by virtual address" severity FAILURE;
487
488 -- Latch the request in r0.req as long as we're not stalling
489 stage_0 : process(clk)
490 variable r : reg_stage_0_t;
491 begin
492 if rising_edge(clk) then
493 assert (d_in.valid and m_in.valid) = '0' report
494 "request collision loadstore vs MMU";
495 if m_in.valid = '1' then
496 r.req.valid := '1';
497 r.req.load := not (m_in.tlbie or m_in.tlbld);
498 r.req.dcbz := '0';
499 r.req.nc := '0';
500 r.req.reserve := '0';
501 r.req.virt_mode := '0';
502 r.req.priv_mode := '1';
503 r.req.addr := m_in.addr;
504 r.req.data := m_in.pte;
505 r.req.byte_sel := (others => '1');
506 r.tlbie := m_in.tlbie;
507 r.doall := m_in.doall;
508 r.tlbld := m_in.tlbld;
509 r.mmu_req := '1';
510 else
511 r.req := d_in;
512 r.tlbie := '0';
513 r.doall := '0';
514 r.tlbld := '0';
515 r.mmu_req := '0';
516 end if;
517 if rst = '1' then
518 r0_full <= '0';
519 elsif r1.full = '0' or r0_full = '0' then
520 r0 <= r;
521 r0_full <= r.req.valid;
522 end if;
523 end if;
524 end process;
525
526 -- we don't yet handle collisions between loadstore1 requests and MMU requests
527 m_out.stall <= '0';
528
529 -- Hold off the request in r0 when r1 has an uncompleted request
530 r0_stall <= r0_full and r1.full;
531 r0_valid <= r0_full and not r1.full;
532 stall_out <= r0_stall;
533
534 -- TLB
535 -- Operates in the second cycle on the request latched in r0.req.
536 -- TLB updates write the entry at the end of the second cycle.
537 tlb_read : process(clk)
538 variable index : tlb_index_t;
539 variable addrbits : std_ulogic_vector(TLB_SET_BITS - 1 downto 0);
540 begin
541 if rising_edge(clk) then
542 if m_in.valid = '1' then
543 addrbits := m_in.addr(TLB_LG_PGSZ + TLB_SET_BITS - 1 downto TLB_LG_PGSZ);
544 else
545 addrbits := d_in.addr(TLB_LG_PGSZ + TLB_SET_BITS - 1 downto TLB_LG_PGSZ);
546 end if;
547 index := to_integer(unsigned(addrbits));
548 -- If we have any op and the previous op isn't finished,
549 -- then keep the same output for next cycle.
550 if r0_stall = '0' then
551 tlb_valid_way <= dtlb_valids(index);
552 tlb_tag_way <= dtlb_tags(index);
553 tlb_pte_way <= dtlb_ptes(index);
554 end if;
555 end if;
556 end process;
557
558 -- Generate TLB PLRUs
559 maybe_tlb_plrus: if TLB_NUM_WAYS > 1 generate
560 begin
561 tlb_plrus: for i in 0 to TLB_SET_SIZE - 1 generate
562 -- TLB PLRU interface
563 signal tlb_plru_acc : std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
564 signal tlb_plru_acc_en : std_ulogic;
565 signal tlb_plru_out : std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
566 begin
567 tlb_plru : entity work.plru
568 generic map (
569 BITS => TLB_WAY_BITS
570 )
571 port map (
572 clk => clk,
573 rst => rst,
574 acc => tlb_plru_acc,
575 acc_en => tlb_plru_acc_en,
576 lru => tlb_plru_out
577 );
578
579 process(all)
580 begin
581 -- PLRU interface
582 if r1.tlb_hit_index = i then
583 tlb_plru_acc_en <= r1.tlb_hit;
584 else
585 tlb_plru_acc_en <= '0';
586 end if;
587 tlb_plru_acc <= std_ulogic_vector(to_unsigned(r1.tlb_hit_way, TLB_WAY_BITS));
588 tlb_plru_victim(i) <= tlb_plru_out;
589 end process;
590 end generate;
591 end generate;
592
593 tlb_search : process(all)
594 variable hitway : tlb_way_t;
595 variable hit : std_ulogic;
596 variable eatag : tlb_tag_t;
597 begin
598 tlb_req_index <= to_integer(unsigned(r0.req.addr(TLB_LG_PGSZ + TLB_SET_BITS - 1
599 downto TLB_LG_PGSZ)));
600 hitway := 0;
601 hit := '0';
602 eatag := r0.req.addr(63 downto TLB_LG_PGSZ + TLB_SET_BITS);
603 for i in tlb_way_t loop
604 if tlb_valid_way(i) = '1' and
605 read_tlb_tag(i, tlb_tag_way) = eatag then
606 hitway := i;
607 hit := '1';
608 end if;
609 end loop;
610 tlb_hit <= hit and r0_valid;
611 tlb_hit_way <= hitway;
612 if tlb_hit = '1' then
613 pte <= read_tlb_pte(hitway, tlb_pte_way);
614 else
615 pte <= (others => '0');
616 end if;
617 valid_ra <= tlb_hit or not r0.req.virt_mode;
618 if r0.req.virt_mode = '1' then
619 ra <= pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ) &
620 r0.req.addr(TLB_LG_PGSZ - 1 downto ROW_OFF_BITS) &
621 (ROW_OFF_BITS-1 downto 0 => '0');
622 perm_attr <= extract_perm_attr(pte);
623 else
624 ra <= r0.req.addr(REAL_ADDR_BITS - 1 downto ROW_OFF_BITS) &
625 (ROW_OFF_BITS-1 downto 0 => '0');
626 perm_attr <= real_mode_perm_attr;
627 end if;
628 end process;
629
630 tlb_update : process(clk)
631 variable tlbie : std_ulogic;
632 variable tlbwe : std_ulogic;
633 variable repl_way : tlb_way_t;
634 variable eatag : tlb_tag_t;
635 variable tagset : tlb_way_tags_t;
636 variable pteset : tlb_way_ptes_t;
637 begin
638 if rising_edge(clk) then
639 tlbie := r0_valid and r0.tlbie;
640 tlbwe := r0_valid and r0.tlbld;
641 if rst = '1' or (tlbie = '1' and r0.doall = '1') then
642 -- clear all valid bits at once
643 for i in tlb_index_t loop
644 dtlb_valids(i) <= (others => '0');
645 end loop;
646 elsif tlbie = '1' then
647 if tlb_hit = '1' then
648 dtlb_valids(tlb_req_index)(tlb_hit_way) <= '0';
649 end if;
650 elsif tlbwe = '1' then
651 if tlb_hit = '1' then
652 repl_way := tlb_hit_way;
653 else
654 repl_way := to_integer(unsigned(tlb_plru_victim(tlb_req_index)));
655 end if;
656 eatag := r0.req.addr(63 downto TLB_LG_PGSZ + TLB_SET_BITS);
657 tagset := tlb_tag_way;
658 write_tlb_tag(repl_way, tagset, eatag);
659 dtlb_tags(tlb_req_index) <= tagset;
660 pteset := tlb_pte_way;
661 write_tlb_pte(repl_way, pteset, r0.req.data);
662 dtlb_ptes(tlb_req_index) <= pteset;
663 dtlb_valids(tlb_req_index)(repl_way) <= '1';
664 end if;
665 end if;
666 end process;
667
668 -- Generate PLRUs
669 maybe_plrus: if NUM_WAYS > 1 generate
670 begin
671 plrus: for i in 0 to NUM_LINES-1 generate
672 -- PLRU interface
673 signal plru_acc : std_ulogic_vector(WAY_BITS-1 downto 0);
674 signal plru_acc_en : std_ulogic;
675 signal plru_out : std_ulogic_vector(WAY_BITS-1 downto 0);
676
677 begin
678 plru : entity work.plru
679 generic map (
680 BITS => WAY_BITS
681 )
682 port map (
683 clk => clk,
684 rst => rst,
685 acc => plru_acc,
686 acc_en => plru_acc_en,
687 lru => plru_out
688 );
689
690 process(all)
691 begin
692 -- PLRU interface
693 if r1.hit_index = i then
694 plru_acc_en <= r1.cache_hit;
695 else
696 plru_acc_en <= '0';
697 end if;
698 plru_acc <= std_ulogic_vector(to_unsigned(r1.hit_way, WAY_BITS));
699 plru_victim(i) <= plru_out;
700 end process;
701 end generate;
702 end generate;
703
704 -- Cache tag RAM read port
705 cache_tag_read : process(clk)
706 variable index : index_t;
707 begin
708 if rising_edge(clk) then
709 if r0_stall = '1' then
710 index := req_index;
711 elsif m_in.valid = '1' then
712 index := get_index(m_in.addr);
713 else
714 index := get_index(d_in.addr);
715 end if;
716 cache_tag_set <= cache_tags(index);
717 end if;
718 end process;
719
720 -- Cache request parsing and hit detection
721 dcache_request : process(all)
722 variable is_hit : std_ulogic;
723 variable hit_way : way_t;
724 variable op : op_t;
725 variable opsel : std_ulogic_vector(2 downto 0);
726 variable go : std_ulogic;
727 variable nc : std_ulogic;
728 variable s_hit : std_ulogic;
729 variable s_tag : cache_tag_t;
730 variable s_pte : tlb_pte_t;
731 variable s_ra : std_ulogic_vector(REAL_ADDR_BITS - 1 downto 0);
732 variable hit_set : std_ulogic_vector(TLB_NUM_WAYS - 1 downto 0);
733 variable hit_way_set : hit_way_set_t;
734 variable rel_matches : std_ulogic_vector(TLB_NUM_WAYS - 1 downto 0);
735 variable rel_match : std_ulogic;
736 begin
737 -- Extract line, row and tag from request
738 req_index <= get_index(r0.req.addr);
739 req_row <= get_row(r0.req.addr);
740 req_tag <= get_tag(ra);
741
742 go := r0_valid and not (r0.tlbie or r0.tlbld) and not r1.error_done;
743
744 -- Test if pending request is a hit on any way
745 -- In order to make timing in virtual mode, when we are using the TLB,
746 -- we compare each way with each of the real addresses from each way of
747 -- the TLB, and then decide later which match to use.
748 hit_way := 0;
749 is_hit := '0';
750 rel_match := '0';
751 if r0.req.virt_mode = '1' then
752 rel_matches := (others => '0');
753 for j in tlb_way_t loop
754 hit_way_set(j) := 0;
755 s_hit := '0';
756 s_pte := read_tlb_pte(j, tlb_pte_way);
757 s_ra := s_pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ) &
758 r0.req.addr(TLB_LG_PGSZ - 1 downto 0);
759 s_tag := get_tag(s_ra);
760 for i in way_t loop
761 if go = '1' and cache_valids(req_index)(i) = '1' and
762 read_tag(i, cache_tag_set) = s_tag and
763 tlb_valid_way(j) = '1' then
764 hit_way_set(j) := i;
765 s_hit := '1';
766 end if;
767 end loop;
768 hit_set(j) := s_hit;
769 if s_tag = r1.reload_tag then
770 rel_matches(j) := '1';
771 end if;
772 end loop;
773 if tlb_hit = '1' then
774 is_hit := hit_set(tlb_hit_way);
775 hit_way := hit_way_set(tlb_hit_way);
776 rel_match := rel_matches(tlb_hit_way);
777 end if;
778 else
779 s_tag := get_tag(r0.req.addr);
780 for i in way_t loop
781 if go = '1' and cache_valids(req_index)(i) = '1' and
782 read_tag(i, cache_tag_set) = s_tag then
783 hit_way := i;
784 is_hit := '1';
785 end if;
786 end loop;
787 if s_tag = r1.reload_tag then
788 rel_match := '1';
789 end if;
790 end if;
791 req_same_tag <= rel_match;
792
793 -- See if the request matches the line currently being reloaded
794 if r1.state = RELOAD_WAIT_ACK and req_index = r1.store_index and
795 rel_match = '1' then
796 -- For a store, consider this a hit even if the row isn't valid
797 -- since it will be by the time we perform the store.
798 -- For a load, check the appropriate row valid bit.
799 is_hit := not r0.req.load or r1.rows_valid(req_row mod ROW_PER_LINE);
800 hit_way := replace_way;
801 end if;
802
803 -- Whether to use forwarded data for a load or not
804 use_forward1_next <= '0';
805 if get_row(r1.req.real_addr) = req_row and r1.req.hit_way = hit_way then
806 -- Only need to consider r1.write_bram here, since if we are
807 -- writing refill data here, then we don't have a cache hit this
808 -- cycle on the line being refilled. (There is the possibility
809 -- that the load following the load miss that started the refill
810 -- could be to the old contents of the victim line, since it is a
811 -- couple of cycles after the refill starts before we see the
812 -- updated cache tag. In that case we don't use the bypass.)
813 use_forward1_next <= r1.write_bram;
814 end if;
815 use_forward2_next <= '0';
816 if r1.forward_row1 = req_row and r1.forward_way1 = hit_way then
817 use_forward2_next <= r1.forward_valid1;
818 end if;
819
820 -- The way that matched on a hit
821 req_hit_way <= hit_way;
822
823 -- The way to replace on a miss
824 if r1.write_tag = '1' then
825 replace_way <= to_integer(unsigned(plru_victim(r1.store_index)));
826 else
827 replace_way <= r1.store_way;
828 end if;
829
830 -- work out whether we have permission for this access
831 -- NB we don't yet implement AMR, thus no KUAP
832 rc_ok <= perm_attr.reference and (r0.req.load or perm_attr.changed);
833 perm_ok <= (r0.req.priv_mode or not perm_attr.priv) and
834 (perm_attr.wr_perm or (r0.req.load and perm_attr.rd_perm));
835 access_ok <= valid_ra and perm_ok and rc_ok;
836
837 -- Combine the request and cache hit status to decide what
838 -- operation needs to be done
839 --
840 nc := r0.req.nc or perm_attr.nocache;
841 op := OP_NONE;
842 if go = '1' then
843 if access_ok = '0' then
844 op := OP_BAD;
845 elsif cancel_store = '1' then
846 op := OP_STCX_FAIL;
847 else
848 opsel := r0.req.load & nc & is_hit;
849 case opsel is
850 when "101" => op := OP_LOAD_HIT;
851 when "100" => op := OP_LOAD_MISS;
852 when "110" => op := OP_LOAD_NC;
853 when "001" => op := OP_STORE_HIT;
854 when "000" => op := OP_STORE_MISS;
855 when "010" => op := OP_STORE_MISS;
856 when "011" => op := OP_BAD;
857 when "111" => op := OP_BAD;
858 when others => op := OP_NONE;
859 end case;
860 end if;
861 end if;
862 req_op <= op;
863 req_go <= go;
864
865 -- Version of the row number that is valid one cycle earlier
866 -- in the cases where we need to read the cache data BRAM.
867 -- If we're stalling then we need to keep reading the last
868 -- row requested.
869 if r0_stall = '0' then
870 if m_in.valid = '1' then
871 early_req_row <= get_row(m_in.addr);
872 else
873 early_req_row <= get_row(d_in.addr);
874 end if;
875 else
876 early_req_row <= req_row;
877 end if;
878 end process;
879
880 -- Wire up wishbone request latch out of stage 1
881 wishbone_out <= r1.wb;
882
883 -- Handle load-with-reservation and store-conditional instructions
884 reservation_comb: process(all)
885 begin
886 cancel_store <= '0';
887 set_rsrv <= '0';
888 clear_rsrv <= '0';
889 if r0_valid = '1' and r0.req.reserve = '1' then
890 -- XXX generate alignment interrupt if address is not aligned
891 -- XXX or if r0.req.nc = '1'
892 if r0.req.load = '1' then
893 -- load with reservation
894 set_rsrv <= '1';
895 else
896 -- store conditional
897 clear_rsrv <= '1';
898 if reservation.valid = '0' or
899 r0.req.addr(63 downto LINE_OFF_BITS) /= reservation.addr then
900 cancel_store <= '1';
901 end if;
902 end if;
903 end if;
904 end process;
905
906 reservation_reg: process(clk)
907 begin
908 if rising_edge(clk) then
909 if rst = '1' then
910 reservation.valid <= '0';
911 elsif r0_valid = '1' and access_ok = '1' then
912 if clear_rsrv = '1' then
913 reservation.valid <= '0';
914 elsif set_rsrv = '1' then
915 reservation.valid <= '1';
916 reservation.addr <= r0.req.addr(63 downto LINE_OFF_BITS);
917 end if;
918 end if;
919 end if;
920 end process;
921
922 -- Return data for loads & completion control logic
923 --
924 writeback_control: process(all)
925 variable data_out : std_ulogic_vector(63 downto 0);
926 variable data_fwd : std_ulogic_vector(63 downto 0);
927 variable j : integer;
928 begin
929 -- Use the bypass if are reading the row that was written 1 or 2 cycles
930 -- ago, including for the slow_valid = 1 case (i.e. completing a load
931 -- miss or a non-cacheable load).
932 if r1.use_forward1 = '1' then
933 data_fwd := r1.forward_data1;
934 else
935 data_fwd := r1.forward_data2;
936 end if;
937 data_out := cache_out(r1.hit_way);
938 for i in 0 to 7 loop
939 j := i * 8;
940 if r1.forward_sel(i) = '1' then
941 data_out(j + 7 downto j) := data_fwd(j + 7 downto j);
942 end if;
943 end loop;
944
945 d_out.valid <= r1.ls_valid;
946 d_out.data <= data_out;
947 d_out.store_done <= not r1.stcx_fail;
948 d_out.error <= r1.error_done;
949 d_out.cache_paradox <= r1.cache_paradox;
950
951 -- Outputs to MMU
952 m_out.done <= r1.mmu_done;
953 m_out.err <= r1.error_done;
954 m_out.data <= data_out;
955
956 -- We have a valid load or store hit or we just completed a slow
957 -- op such as a load miss, a NC load or a store
958 --
959 -- Note: the load hit is delayed by one cycle. However it can still
960 -- not collide with r.slow_valid (well unless I miscalculated) because
961 -- slow_valid can only be set on a subsequent request and not on its
962 -- first cycle (the state machine must have advanced), which makes
963 -- slow_valid at least 2 cycles from the previous hit_load_valid.
964 --
965
966 -- Sanity: Only one of these must be set in any given cycle
967 assert (r1.slow_valid and r1.stcx_fail) /= '1' report
968 "unexpected slow_valid collision with stcx_fail"
969 severity FAILURE;
970 assert ((r1.slow_valid or r1.stcx_fail) and r1.hit_load_valid) /= '1' report
971 "unexpected hit_load_delayed collision with slow_valid"
972 severity FAILURE;
973
974 if r1.mmu_req = '0' then
975 -- Request came from loadstore1...
976 -- Load hit case is the standard path
977 if r1.hit_load_valid = '1' then
978 report "completing load hit data=" & to_hstring(data_out);
979 end if;
980
981 -- error cases complete without stalling
982 if r1.error_done = '1' then
983 report "completing ld/st with error";
984 end if;
985
986 -- Slow ops (load miss, NC, stores)
987 if r1.slow_valid = '1' then
988 report "completing store or load miss data=" & to_hstring(data_out);
989 end if;
990
991 else
992 -- Request came from MMU
993 if r1.hit_load_valid = '1' then
994 report "completing load hit to MMU, data=" & to_hstring(m_out.data);
995 end if;
996
997 -- error cases complete without stalling
998 if r1.error_done = '1' then
999 report "completing MMU ld with error";
1000 end if;
1001
1002 -- Slow ops (i.e. load miss)
1003 if r1.slow_valid = '1' then
1004 report "completing MMU load miss, data=" & to_hstring(m_out.data);
1005 end if;
1006 end if;
1007
1008 end process;
1009
1010 --
1011 -- Generate a cache RAM for each way. This handles the normal
1012 -- reads, writes from reloads and the special store-hit update
1013 -- path as well.
1014 --
1015 -- Note: the BRAMs have an extra read buffer, meaning the output
1016 -- is pipelined an extra cycle. This differs from the
1017 -- icache. The writeback logic needs to take that into
1018 -- account by using 1-cycle delayed signals for load hits.
1019 --
1020 rams: for i in 0 to NUM_WAYS-1 generate
1021 signal do_read : std_ulogic;
1022 signal rd_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
1023 signal do_write : std_ulogic;
1024 signal wr_addr : std_ulogic_vector(ROW_BITS-1 downto 0);
1025 signal wr_data : std_ulogic_vector(wishbone_data_bits-1 downto 0);
1026 signal wr_sel : std_ulogic_vector(ROW_SIZE-1 downto 0);
1027 signal wr_sel_m : std_ulogic_vector(ROW_SIZE-1 downto 0);
1028 signal dout : cache_row_t;
1029 begin
1030 way: entity work.cache_ram
1031 generic map (
1032 ROW_BITS => ROW_BITS,
1033 WIDTH => wishbone_data_bits,
1034 ADD_BUF => true
1035 )
1036 port map (
1037 clk => clk,
1038 rd_en => do_read,
1039 rd_addr => rd_addr,
1040 rd_data => dout,
1041 wr_sel => wr_sel_m,
1042 wr_addr => wr_addr,
1043 wr_data => wr_data
1044 );
1045 process(all)
1046 begin
1047 -- Cache hit reads
1048 do_read <= '1';
1049 rd_addr <= std_ulogic_vector(to_unsigned(early_req_row, ROW_BITS));
1050 cache_out(i) <= dout;
1051
1052 -- Write mux:
1053 --
1054 -- Defaults to wishbone read responses (cache refill),
1055 --
1056 -- For timing, the mux on wr_data/sel/addr is not dependent on anything
1057 -- other than the current state.
1058 --
1059 wr_sel_m <= (others => '0');
1060
1061 do_write <= '0';
1062 if r1.write_bram = '1' then
1063 -- Write store data to BRAM. This happens one cycle after the
1064 -- store is in r0.
1065 wr_data <= r1.req.data;
1066 wr_sel <= r1.req.byte_sel;
1067 wr_addr <= std_ulogic_vector(to_unsigned(get_row(r1.req.real_addr), ROW_BITS));
1068 if i = r1.req.hit_way then
1069 do_write <= '1';
1070 end if;
1071 else
1072 -- Otherwise, we might be doing a reload or a DCBZ
1073 if r1.dcbz = '1' then
1074 wr_data <= (others => '0');
1075 else
1076 wr_data <= wishbone_in.dat;
1077 end if;
1078 wr_addr <= std_ulogic_vector(to_unsigned(r1.store_row, ROW_BITS));
1079 wr_sel <= (others => '1');
1080
1081 if r1.state = RELOAD_WAIT_ACK and wishbone_in.ack = '1' and replace_way = i then
1082 do_write <= '1';
1083 end if;
1084 end if;
1085
1086 -- Mask write selects with do_write since BRAM doesn't
1087 -- have a global write-enable
1088 if do_write = '1' then
1089 wr_sel_m <= wr_sel;
1090 end if;
1091
1092 end process;
1093 end generate;
1094
1095 --
1096 -- Cache hit synchronous machine for the easy case. This handles load hits.
1097 -- It also handles error cases (TLB miss, cache paradox)
1098 --
1099 dcache_fast_hit : process(clk)
1100 begin
1101 if rising_edge(clk) then
1102 if req_op /= OP_NONE then
1103 report "op:" & op_t'image(req_op) &
1104 " addr:" & to_hstring(r0.req.addr) &
1105 " nc:" & std_ulogic'image(r0.req.nc) &
1106 " idx:" & integer'image(req_index) &
1107 " tag:" & to_hstring(req_tag) &
1108 " way: " & integer'image(req_hit_way);
1109 end if;
1110 if r0_valid = '1' then
1111 r1.mmu_req <= r0.mmu_req;
1112 end if;
1113
1114 -- Fast path for load/store hits. Set signals for the writeback controls.
1115 r1.hit_way <= req_hit_way;
1116 r1.hit_index <= req_index;
1117 if req_op = OP_LOAD_HIT then
1118 r1.hit_load_valid <= '1';
1119 else
1120 r1.hit_load_valid <= '0';
1121 end if;
1122 if req_op = OP_LOAD_HIT or req_op = OP_STORE_HIT then
1123 r1.cache_hit <= '1';
1124 else
1125 r1.cache_hit <= '0';
1126 end if;
1127
1128 if req_op = OP_BAD then
1129 report "Signalling ld/st error valid_ra=" & std_ulogic'image(valid_ra) &
1130 " rc_ok=" & std_ulogic'image(rc_ok) & " perm_ok=" & std_ulogic'image(perm_ok);
1131 r1.error_done <= '1';
1132 r1.cache_paradox <= access_ok;
1133 else
1134 r1.error_done <= '0';
1135 r1.cache_paradox <= '0';
1136 end if;
1137
1138 if req_op = OP_STCX_FAIL then
1139 r1.stcx_fail <= '1';
1140 else
1141 r1.stcx_fail <= '0';
1142 end if;
1143
1144 -- Record TLB hit information for updating TLB PLRU
1145 r1.tlb_hit <= tlb_hit;
1146 r1.tlb_hit_way <= tlb_hit_way;
1147 r1.tlb_hit_index <= tlb_req_index;
1148
1149 end if;
1150 end process;
1151
1152 --
1153 -- Memory accesses are handled by this state machine:
1154 --
1155 -- * Cache load miss/reload (in conjunction with "rams")
1156 -- * Load hits for non-cachable forms
1157 -- * Stores (the collision case is handled in "rams")
1158 --
1159 -- All wishbone requests generation is done here. This machine
1160 -- operates at stage 1.
1161 --
1162 dcache_slow : process(clk)
1163 variable stbs_done : boolean;
1164 variable req : mem_access_request_t;
1165 variable acks : unsigned(2 downto 0);
1166 begin
1167 if rising_edge(clk) then
1168 r1.use_forward1 <= use_forward1_next;
1169 r1.forward_sel <= (others => '0');
1170 if use_forward1_next = '1' then
1171 r1.forward_sel <= r1.req.byte_sel;
1172 elsif use_forward2_next = '1' then
1173 r1.forward_sel <= r1.forward_sel1;
1174 end if;
1175
1176 r1.forward_data2 <= r1.forward_data1;
1177 if r1.write_bram = '1' then
1178 r1.forward_data1 <= r1.req.data;
1179 r1.forward_sel1 <= r1.req.byte_sel;
1180 r1.forward_way1 <= r1.req.hit_way;
1181 r1.forward_row1 <= get_row(r1.req.real_addr);
1182 r1.forward_valid1 <= '1';
1183 else
1184 if r1.dcbz = '1' then
1185 r1.forward_data1 <= (others => '0');
1186 else
1187 r1.forward_data1 <= wishbone_in.dat;
1188 end if;
1189 r1.forward_sel1 <= (others => '1');
1190 r1.forward_way1 <= replace_way;
1191 r1.forward_row1 <= r1.store_row;
1192 r1.forward_valid1 <= '0';
1193 end if;
1194
1195 -- On reset, clear all valid bits to force misses
1196 if rst = '1' then
1197 for i in index_t loop
1198 cache_valids(i) <= (others => '0');
1199 end loop;
1200 r1.state <= IDLE;
1201 r1.full <= '0';
1202 r1.slow_valid <= '0';
1203 r1.wb.cyc <= '0';
1204 r1.wb.stb <= '0';
1205 r1.ls_valid <= '0';
1206 r1.mmu_done <= '0';
1207
1208 -- Not useful normally but helps avoiding tons of sim warnings
1209 r1.wb.adr <= (others => '0');
1210 else
1211 -- One cycle pulses reset
1212 r1.slow_valid <= '0';
1213 r1.write_bram <= '0';
1214 r1.inc_acks <= '0';
1215 r1.dec_acks <= '0';
1216
1217 r1.ls_valid <= '0';
1218 -- complete tlbies and TLB loads in the third cycle
1219 r1.mmu_done <= r0_valid and (r0.tlbie or r0.tlbld);
1220 if req_op = OP_LOAD_HIT or req_op = OP_BAD or req_op = OP_STCX_FAIL then
1221 if r0.mmu_req = '0' then
1222 r1.ls_valid <= '1';
1223 else
1224 r1.mmu_done <= '1';
1225 end if;
1226 end if;
1227
1228 if r1.write_tag = '1' then
1229 -- Store new tag in selected way
1230 for i in 0 to NUM_WAYS-1 loop
1231 if i = replace_way then
1232 cache_tags(r1.store_index)((i + 1) * TAG_WIDTH - 1 downto i * TAG_WIDTH) <=
1233 (TAG_WIDTH - 1 downto TAG_BITS => '0') & r1.reload_tag;
1234 end if;
1235 end loop;
1236 r1.store_way <= replace_way;
1237 r1.write_tag <= '0';
1238 end if;
1239
1240 -- Take request from r1.req if there is one there,
1241 -- else from req_op, ra, etc.
1242 if r1.full = '1' then
1243 req := r1.req;
1244 else
1245 req.op := req_op;
1246 req.valid := req_go;
1247 req.mmu_req := r0.mmu_req;
1248 req.dcbz := r0.req.dcbz;
1249 req.real_addr := ra;
1250 req.data := r0.req.data;
1251 req.byte_sel := r0.req.byte_sel;
1252 req.hit_way := req_hit_way;
1253 req.same_tag := req_same_tag;
1254
1255 -- Store the incoming request from r0, if it is a slow request
1256 -- Note that r1.full = 1 implies req_op = OP_NONE
1257 if req_op = OP_LOAD_MISS or req_op = OP_LOAD_NC or
1258 req_op = OP_STORE_MISS or req_op = OP_STORE_HIT then
1259 r1.req <= req;
1260 r1.full <= '1';
1261 end if;
1262 end if;
1263
1264 -- Main state machine
1265 case r1.state is
1266 when IDLE =>
1267 r1.wb.adr <= req.real_addr(r1.wb.adr'left downto 0);
1268 r1.dcbz <= '0';
1269
1270 -- Keep track of our index and way for subsequent stores.
1271 r1.store_index <= get_index(req.real_addr);
1272 r1.store_row <= get_row(req.real_addr);
1273 r1.end_row_ix <= get_row_of_line(get_row(req.real_addr)) - 1;
1274 r1.reload_tag <= get_tag(req.real_addr);
1275 r1.req.same_tag <= '1';
1276
1277 if req.op = OP_STORE_HIT then
1278 r1.store_way <= req.hit_way;
1279 end if;
1280
1281 -- Reset per-row valid bits, ready for handling OP_LOAD_MISS
1282 for i in 0 to ROW_PER_LINE - 1 loop
1283 r1.rows_valid(i) <= '0';
1284 end loop;
1285
1286 case req.op is
1287 when OP_LOAD_HIT =>
1288 -- stay in IDLE state
1289
1290 when OP_LOAD_MISS =>
1291 -- Normal load cache miss, start the reload machine
1292 --
1293 report "cache miss real addr:" & to_hstring(req.real_addr) &
1294 " idx:" & integer'image(get_index(req.real_addr)) &
1295 " tag:" & to_hstring(get_tag(req.real_addr));
1296
1297 -- Start the wishbone cycle
1298 r1.wb.sel <= (others => '1');
1299 r1.wb.we <= '0';
1300 r1.wb.cyc <= '1';
1301 r1.wb.stb <= '1';
1302
1303 -- Track that we had one request sent
1304 r1.state <= RELOAD_WAIT_ACK;
1305 r1.write_tag <= '1';
1306
1307 when OP_LOAD_NC =>
1308 r1.wb.sel <= req.byte_sel;
1309 r1.wb.cyc <= '1';
1310 r1.wb.stb <= '1';
1311 r1.wb.we <= '0';
1312 r1.state <= NC_LOAD_WAIT_ACK;
1313
1314 when OP_STORE_HIT | OP_STORE_MISS =>
1315 if req.dcbz = '0' then
1316 r1.wb.sel <= req.byte_sel;
1317 r1.wb.dat <= req.data;
1318 r1.state <= STORE_WAIT_ACK;
1319 r1.acks_pending <= to_unsigned(1, 3);
1320 r1.full <= '0';
1321 r1.slow_valid <= '1';
1322 if req.mmu_req = '0' then
1323 r1.ls_valid <= '1';
1324 else
1325 r1.mmu_done <= '1';
1326 end if;
1327 if req.op = OP_STORE_HIT then
1328 r1.write_bram <= '1';
1329 end if;
1330 else
1331 -- dcbz is handled much like a load miss except
1332 -- that we are writing to memory instead of reading
1333
1334 -- Start the wishbone writes
1335 r1.wb.sel <= (others => '1');
1336 r1.wb.dat <= (others => '0');
1337
1338 -- Handle the rest like a load miss
1339 r1.state <= RELOAD_WAIT_ACK;
1340 if req.op = OP_STORE_MISS then
1341 r1.write_tag <= '1';
1342 end if;
1343 r1.dcbz <= '1';
1344 end if;
1345 r1.wb.we <= '1';
1346 r1.wb.cyc <= '1';
1347 r1.wb.stb <= '1';
1348
1349 -- OP_NONE and OP_BAD do nothing
1350 -- OP_BAD & OP_STCX_FAIL were handled above already
1351 when OP_NONE =>
1352 when OP_BAD =>
1353 when OP_STCX_FAIL =>
1354 end case;
1355
1356 when RELOAD_WAIT_ACK =>
1357 -- Requests are all sent if stb is 0
1358 stbs_done := r1.wb.stb = '0';
1359
1360 -- If we are still sending requests, was one accepted ?
1361 if wishbone_in.stall = '0' and not stbs_done then
1362 -- That was the last word ? We are done sending. Clear
1363 -- stb and set stbs_done so we can handle an eventual last
1364 -- ack on the same cycle.
1365 --
1366 if is_last_row_addr(r1.wb.adr, r1.end_row_ix) then
1367 r1.wb.stb <= '0';
1368 stbs_done := true;
1369 end if;
1370
1371 -- Calculate the next row address
1372 r1.wb.adr <= next_row_addr(r1.wb.adr);
1373 end if;
1374
1375 -- Incoming acks processing
1376 r1.forward_valid1 <= wishbone_in.ack;
1377 if wishbone_in.ack = '1' then
1378 r1.rows_valid(r1.store_row mod ROW_PER_LINE) <= '1';
1379 -- If this is the data we were looking for, we can
1380 -- complete the request next cycle.
1381 -- Compare the whole address in case the request in
1382 -- r1.req is not the one that started this refill.
1383 if r1.full = '1' and r1.req.same_tag = '1' and
1384 ((r1.dcbz = '1' and r1.req.dcbz = '1') or
1385 (r1.dcbz = '0' and r1.req.op = OP_LOAD_MISS)) and
1386 r1.store_row = get_row(r1.req.real_addr) then
1387 r1.full <= '0';
1388 r1.slow_valid <= '1';
1389 if r1.mmu_req = '0' then
1390 r1.ls_valid <= '1';
1391 else
1392 r1.mmu_done <= '1';
1393 end if;
1394 r1.forward_sel <= (others => '1');
1395 r1.use_forward1 <= '1';
1396 end if;
1397
1398 -- Check for completion
1399 if stbs_done and is_last_row(r1.store_row, r1.end_row_ix) then
1400 -- Complete wishbone cycle
1401 r1.wb.cyc <= '0';
1402
1403 -- Cache line is now valid
1404 cache_valids(r1.store_index)(r1.store_way) <= '1';
1405
1406 r1.state <= IDLE;
1407 end if;
1408
1409 -- Increment store row counter
1410 r1.store_row <= next_row(r1.store_row);
1411 end if;
1412
1413 when STORE_WAIT_ACK =>
1414 stbs_done := r1.wb.stb = '0';
1415 acks := r1.acks_pending;
1416 if r1.inc_acks /= r1.dec_acks then
1417 if r1.inc_acks = '1' then
1418 acks := acks + 1;
1419 else
1420 acks := acks - 1;
1421 end if;
1422 end if;
1423 r1.acks_pending <= acks;
1424 -- Clear stb when slave accepted request
1425 if wishbone_in.stall = '0' then
1426 -- See if there is another store waiting to be done
1427 -- which is in the same real page.
1428 if req.valid = '1' then
1429 r1.wb.adr(SET_SIZE_BITS - 1 downto 0) <=
1430 req.real_addr(SET_SIZE_BITS - 1 downto 0);
1431 r1.wb.dat <= req.data;
1432 r1.wb.sel <= req.byte_sel;
1433 end if;
1434 if acks < 7 and req.same_tag = '1' and
1435 (req.op = OP_STORE_MISS or req.op = OP_STORE_HIT) then
1436 r1.wb.stb <= '1';
1437 stbs_done := false;
1438 if req.op = OP_STORE_HIT then
1439 r1.write_bram <= '1';
1440 end if;
1441 r1.full <= '0';
1442 r1.slow_valid <= '1';
1443 -- Store requests never come from the MMU
1444 r1.ls_valid <= '1';
1445 stbs_done := false;
1446 r1.inc_acks <= '1';
1447 else
1448 r1.wb.stb <= '0';
1449 stbs_done := true;
1450 end if;
1451 end if;
1452
1453 -- Got ack ? See if complete.
1454 if wishbone_in.ack = '1' then
1455 if stbs_done and acks = 1 then
1456 r1.state <= IDLE;
1457 r1.wb.cyc <= '0';
1458 r1.wb.stb <= '0';
1459 end if;
1460 r1.dec_acks <= '1';
1461 end if;
1462
1463 when NC_LOAD_WAIT_ACK =>
1464 -- Clear stb when slave accepted request
1465 if wishbone_in.stall = '0' then
1466 r1.wb.stb <= '0';
1467 end if;
1468
1469 -- Got ack ? complete.
1470 if wishbone_in.ack = '1' then
1471 r1.state <= IDLE;
1472 r1.full <= '0';
1473 r1.slow_valid <= '1';
1474 if r1.mmu_req = '0' then
1475 r1.ls_valid <= '1';
1476 else
1477 r1.mmu_done <= '1';
1478 end if;
1479 r1.forward_sel <= (others => '1');
1480 r1.use_forward1 <= '1';
1481 r1.wb.cyc <= '0';
1482 r1.wb.stb <= '0';
1483 end if;
1484 end case;
1485 end if;
1486 end if;
1487 end process;
1488
1489 dc_log: if LOG_LENGTH > 0 generate
1490 signal log_data : std_ulogic_vector(19 downto 0);
1491 begin
1492 dcache_log: process(clk)
1493 begin
1494 if rising_edge(clk) then
1495 log_data <= r1.wb.adr(5 downto 3) &
1496 wishbone_in.stall &
1497 wishbone_in.ack &
1498 r1.wb.stb & r1.wb.cyc &
1499 d_out.error &
1500 d_out.valid &
1501 std_ulogic_vector(to_unsigned(op_t'pos(req_op), 3)) &
1502 stall_out &
1503 std_ulogic_vector(to_unsigned(tlb_hit_way, 3)) &
1504 valid_ra &
1505 std_ulogic_vector(to_unsigned(state_t'pos(r1.state), 3));
1506 end if;
1507 end process;
1508 log_out <= log_data;
1509 end generate;
1510 end;