82b983bd3fd0166235395a3a33d011049e4ab801
[soc.git] / src / soc / experiment / dcache.py
1 #!/usr/bin/env python3
2 #
3 # Copyright (C) 2020,2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
4 # Copyright (C) 2020 Cole Poirier
5 # Copyright (C) 2020,2021 Cesar Strauss
6 # Copyright (C) 2021 Tobias Platen
7 #
8 # Original dcache.vhdl Copyright of its authors and licensed
9 # by IBM under CC-BY 4.0
10 # https://github.com/antonblanchard/microwatt
11 #
12 # Conversion to nmigen funded by NLnet and NGI POINTER under EU Grants
13 # 871528 and 957073, under the LGPL-v3+ License
14
15 """DCache
16
17 based on Anton Blanchard microwatt dcache.vhdl
18
19 note that the microwatt dcache wishbone interface expects "stall".
20 for simplicity at the moment this is hard-coded to cyc & ~ack.
21 see WB4 spec, p84, section 5.2.1
22
23 IMPORTANT: for store, the data is sampled the cycle AFTER the "valid"
24 is raised. sigh
25
26 Links:
27
28 * https://libre-soc.org/3d_gpu/architecture/set_associative_cache.jpg
29 * https://bugs.libre-soc.org/show_bug.cgi?id=469
30 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
31 (discussion about brams for ECP5)
32
33 """
34
35 import sys
36
37 from nmutil.gtkw import write_gtkw
38
39 sys.setrecursionlimit(1000000)
40
41 from enum import Enum, unique
42
43 from nmigen import (Module, Signal, Elaboratable, Cat, Repl, Array, Const,
44 Record, Memory)
45 from nmutil.util import Display
46 from nmigen.lib.coding import Decoder
47
48 from copy import deepcopy
49 from random import randint, seed
50
51 from nmigen_soc.wishbone.bus import Interface
52
53 from nmigen.cli import main
54 from nmutil.iocontrol import RecordObject
55 from nmigen.utils import log2_int
56 from soc.experiment.mem_types import (LoadStore1ToDCacheType,
57 DCacheToLoadStore1Type,
58 MMUToDCacheType,
59 DCacheToMMUType)
60
61 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS, WB_SEL_BITS,
62 WBAddrType, WBDataType, WBSelType,
63 WBMasterOut, WBSlaveOut,
64 WBMasterOutVector, WBSlaveOutVector,
65 WBIOMasterOut, WBIOSlaveOut)
66
67 from soc.experiment.cache_ram import CacheRam
68 from soc.experiment.plru import PLRU, PLRUs
69 #from nmutil.plru import PLRU, PLRUs
70
71 # for test
72 from soc.bus.sram import SRAM
73 from nmigen import Memory
74 from nmigen.cli import rtlil
75
76 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
77 # Also, check out the cxxsim nmigen branch, and latest yosys from git
78 from nmutil.sim_tmp_alternative import Simulator
79
80 from nmutil.util import wrap
81
82 LOG_LENGTH = 0 # Non-zero to enable log data collection
83
84 def ispow2(x):
85 return (1<<log2_int(x, False)) == x
86
87
88 class DCacheConfig:
89 def __init__(self, LINE_SIZE = 64, # Line size in bytes
90 NUM_LINES = 64, # Number of lines in a set
91 NUM_WAYS = 2, # Number of ways
92 TLB_SET_SIZE = 64, # L1 DTLB entries per set
93 TLB_NUM_WAYS = 2, # L1 DTLB number of sets
94 TLB_LG_PGSZ = 12): # L1 DTLB log_2(page_size)
95 self.LINE_SIZE = LINE_SIZE
96 self.NUM_LINES = NUM_LINES
97 self.NUM_WAYS = NUM_WAYS
98 self.TLB_SET_SIZE = TLB_SET_SIZE
99 self.TLB_NUM_WAYS = TLB_NUM_WAYS
100 self.TLB_LG_PGSZ = TLB_LG_PGSZ
101
102 # BRAM organisation: We never access more than
103 # -- WB_DATA_BITS at a time so to save
104 # -- resources we make the array only that wide, and
105 # -- use consecutive indices to make a cache "line"
106 # --
107 # -- ROW_SIZE is the width in bytes of the BRAM
108 # -- (based on WB, so 64-bits)
109 self.ROW_SIZE = WB_DATA_BITS // 8;
110
111 # ROW_PER_LINE is the number of row (wishbone
112 # transactions) in a line
113 self.ROW_PER_LINE = self.LINE_SIZE // self.ROW_SIZE
114
115 # BRAM_ROWS is the number of rows in BRAM needed
116 # to represent the full dcache
117 self.BRAM_ROWS = self.NUM_LINES * self.ROW_PER_LINE
118
119 print ("ROW_SIZE", self.ROW_SIZE)
120 print ("ROW_PER_LINE", self.ROW_PER_LINE)
121 print ("BRAM_ROWS", self.BRAM_ROWS)
122 print ("NUM_WAYS", self.NUM_WAYS)
123
124 # Bit fields counts in the address
125
126 # REAL_ADDR_BITS is the number of real address
127 # bits that we store
128 self.REAL_ADDR_BITS = 56
129
130 # ROW_BITS is the number of bits to select a row
131 self.ROW_BITS = log2_int(self.BRAM_ROWS)
132
133 # ROW_LINE_BITS is the number of bits to select
134 # a row within a line
135 self.ROW_LINE_BITS = log2_int(self.ROW_PER_LINE)
136
137 # LINE_OFF_BITS is the number of bits for
138 # the offset in a cache line
139 self.LINE_OFF_BITS = log2_int(self.LINE_SIZE)
140
141 # ROW_OFF_BITS is the number of bits for
142 # the offset in a row
143 self.ROW_OFF_BITS = log2_int(self.ROW_SIZE)
144
145 # INDEX_BITS is the number if bits to
146 # select a cache line
147 self.INDEX_BITS = log2_int(self.NUM_LINES)
148
149 # SET_SIZE_BITS is the log base 2 of the set size
150 self.SET_SIZE_BITS = self.LINE_OFF_BITS + self.INDEX_BITS
151
152 # TAG_BITS is the number of bits of
153 # the tag part of the address
154 self.TAG_BITS = self.REAL_ADDR_BITS - self.SET_SIZE_BITS
155
156 # TAG_WIDTH is the width in bits of each way of the tag RAM
157 self.TAG_WIDTH = self.TAG_BITS + 7 - ((self.TAG_BITS + 7) % 8)
158
159 # WAY_BITS is the number of bits to select a way
160 self.WAY_BITS = log2_int(self.NUM_WAYS)
161
162 # Example of layout for 32 lines of 64 bytes:
163 layout = f"""\
164 DCache Layout:
165 |.. -----------------------| REAL_ADDR_BITS ({self.REAL_ADDR_BITS})
166 .. |--------------| SET_SIZE_BITS ({self.SET_SIZE_BITS})
167 .. tag |index| line |
168 .. | row | |
169 .. | |---| | ROW_LINE_BITS ({self.ROW_LINE_BITS})
170 .. | |--- - --| LINE_OFF_BITS ({self.LINE_OFF_BITS})
171 .. | |- --| ROW_OFF_BITS ({self.ROW_OFF_BITS})
172 .. |----- ---| | ROW_BITS ({self.ROW_BITS})
173 .. |-----| | INDEX_BITS ({self.INDEX_BITS})
174 .. --------| | TAG_BITS ({self.TAG_BITS})
175 """
176 print (layout)
177 print ("Dcache TAG %d IDX %d ROW_BITS %d ROFF %d LOFF %d RLB %d" % \
178 (self.TAG_BITS, self.INDEX_BITS, self.ROW_BITS,
179 self.ROW_OFF_BITS, self.LINE_OFF_BITS, self.ROW_LINE_BITS))
180 print ("index @: %d-%d" % (self.LINE_OFF_BITS, self.SET_SIZE_BITS))
181 print ("row @: %d-%d" % (self.LINE_OFF_BITS, self.ROW_OFF_BITS))
182 print ("tag @: %d-%d width %d" % (self.SET_SIZE_BITS,
183 self.REAL_ADDR_BITS, self.TAG_WIDTH))
184
185 self.TAG_RAM_WIDTH = self.TAG_WIDTH * self.NUM_WAYS
186
187 print ("TAG_RAM_WIDTH", self.TAG_RAM_WIDTH)
188 print (" TAG_WIDTH", self.TAG_WIDTH)
189 print (" NUM_WAYS", self.NUM_WAYS)
190 print (" NUM_LINES", self.NUM_LINES)
191
192 # L1 TLB
193 self.TLB_SET_BITS = log2_int(self.TLB_SET_SIZE)
194 self.TLB_WAY_BITS = log2_int(self.TLB_NUM_WAYS)
195 self.TLB_EA_TAG_BITS = 64 - (self.TLB_LG_PGSZ + self.TLB_SET_BITS)
196 self.TLB_TAG_WAY_BITS = self.TLB_NUM_WAYS * self.TLB_EA_TAG_BITS
197 self.TLB_PTE_BITS = 64
198 self.TLB_PTE_WAY_BITS = self.TLB_NUM_WAYS * self.TLB_PTE_BITS;
199
200 assert (self.LINE_SIZE % self.ROW_SIZE) == 0, \
201 "LINE_SIZE not multiple of ROW_SIZE"
202 assert ispow2(self.LINE_SIZE), "LINE_SIZE not power of 2"
203 assert ispow2(self.NUM_LINES), "NUM_LINES not power of 2"
204 assert ispow2(self.ROW_PER_LINE), "ROW_PER_LINE not power of 2"
205 assert self.ROW_BITS == \
206 (self.INDEX_BITS + self.ROW_LINE_BITS), \
207 "geometry bits don't add up"
208 assert (self.LINE_OFF_BITS == \
209 self.ROW_OFF_BITS + self.ROW_LINE_BITS), \
210 "geometry bits don't add up"
211 assert self.REAL_ADDR_BITS == \
212 (self.TAG_BITS + self.INDEX_BITS + self.LINE_OFF_BITS), \
213 "geometry bits don't add up"
214 assert self.REAL_ADDR_BITS == \
215 (self.TAG_BITS + self.ROW_BITS + self.ROW_OFF_BITS), \
216 "geometry bits don't add up"
217 assert 64 == WB_DATA_BITS, \
218 "Can't yet handle wb width that isn't 64-bits"
219 assert self.SET_SIZE_BITS <= self.TLB_LG_PGSZ, \
220 "Set indexed by virtual address"
221
222 def CacheTagArray(self):
223 return Array(Signal(self.TAG_RAM_WIDTH, name="tag%d" % x) \
224 for x in range(self.NUM_LINES))
225
226 def CacheValidsArray(self):
227 return Array(Signal(self.NUM_WAYS, name="tag_valids%d" % x)
228 for x in range(self.NUM_LINES))
229
230 def RowPerLineValidArray(self):
231 return Array(Signal(name="rows_valid%d" % x) \
232 for x in range(self.ROW_PER_LINE))
233
234 def TLBHit(self, name):
235 return Record([('valid', 1),
236 ('way', self.TLB_WAY_BITS)], name=name)
237
238 def TLBTagEAArray(self):
239 return Array(Signal(self.TLB_EA_TAG_BITS, name="tlbtagea%d" % x) \
240 for x in range (self.TLB_NUM_WAYS))
241
242 def TLBRecord(self, name):
243 tlb_layout = [('valid', self.TLB_NUM_WAYS),
244 ('tag', self.TLB_TAG_WAY_BITS),
245 ('pte', self.TLB_PTE_WAY_BITS)
246 ]
247 return Record(tlb_layout, name=name)
248
249 def TLBValidArray(self):
250 return Array(Signal(self.TLB_NUM_WAYS, name="tlb_valid%d" % x)
251 for x in range(self.TLB_SET_SIZE))
252
253 def HitWaySet(self):
254 return Array(Signal(self.WAY_BITS, name="hitway_%d" % x) \
255 for x in range(self.TLB_NUM_WAYS))
256
257 # Cache RAM interface
258 def CacheRamOut(self):
259 return Array(Signal(self.WB_DATA_BITS, name="cache_out%d" % x) \
260 for x in range(self.NUM_WAYS))
261
262 # PLRU output interface
263 def PLRUOut(self):
264 return Array(Signal(self.WAY_BITS, name="plru_out%d" % x) \
265 for x in range(self.NUM_LINES))
266
267 # TLB PLRU output interface
268 def TLBPLRUOut(self):
269 return Array(Signal(self.TLB_WAY_BITS, name="tlbplru_out%d" % x) \
270 for x in range(self.TLB_SET_SIZE))
271
272 # Helper functions to decode incoming requests
273 #
274 # Return the cache line index (tag index) for an address
275 def get_index(self, addr):
276 return addr[self.LINE_OFF_BITS:self.SET_SIZE_BITS]
277
278 # Return the cache row index (data memory) for an address
279 def get_row(self, addr):
280 return addr[self.ROW_OFF_BITS:self.SET_SIZE_BITS]
281
282 # Return the index of a row within a line
283 def get_row_of_line(self, row):
284 return row[:self.ROW_BITS][:self.ROW_LINE_BITS]
285
286 # Returns whether this is the last row of a line
287 def is_last_row_addr(self, addr, last):
288 return addr[self.ROW_OFF_BITS:self.LINE_OFF_BITS] == last
289
290 # Returns whether this is the last row of a line
291 def is_last_row(self, row, last):
292 return self.get_row_of_line(row) == last
293
294 # Return the next row in the current cache line. We use a
295 # dedicated function in order to limit the size of the
296 # generated adder to be only the bits within a cache line
297 # (3 bits with default settings)
298 def next_row(self, row):
299 row_v = row[0:self.ROW_LINE_BITS] + 1
300 return Cat(row_v[:self.ROW_LINE_BITS], row[self.ROW_LINE_BITS:])
301
302 # Get the tag value from the address
303 def get_tag(self, addr):
304 return addr[self.SET_SIZE_BITS:self.REAL_ADDR_BITS]
305
306 # Read a tag from a tag memory row
307 def read_tag(self, way, tagset):
308 return tagset.word_select(way, self.TAG_WIDTH)[:self.TAG_BITS]
309
310 # Read a TLB tag from a TLB tag memory row
311 def read_tlb_tag(self, way, tags):
312 return tags.word_select(way, self.TLB_EA_TAG_BITS)
313
314 # Write a TLB tag to a TLB tag memory row
315 def write_tlb_tag(self, way, tags, tag):
316 return self.read_tlb_tag(way, tags).eq(tag)
317
318 # Read a PTE from a TLB PTE memory row
319 def read_tlb_pte(self, way, ptes):
320 return ptes.word_select(way, self.TLB_PTE_BITS)
321
322 def write_tlb_pte(self, way, ptes, newpte):
323 return self.read_tlb_pte(way, ptes).eq(newpte)
324
325
326 # Record for storing permission, attribute, etc. bits from a PTE
327 class PermAttr(RecordObject):
328 def __init__(self, name=None):
329 super().__init__(name=name)
330 self.reference = Signal()
331 self.changed = Signal()
332 self.nocache = Signal()
333 self.priv = Signal()
334 self.rd_perm = Signal()
335 self.wr_perm = Signal()
336
337
338 def extract_perm_attr(pte):
339 pa = PermAttr()
340 return pa;
341
342
343 # Type of operation on a "valid" input
344 @unique
345 class Op(Enum):
346 OP_NONE = 0
347 OP_BAD = 1 # NC cache hit, TLB miss, prot/RC failure
348 OP_STCX_FAIL = 2 # conditional store w/o reservation
349 OP_LOAD_HIT = 3 # Cache hit on load
350 OP_LOAD_MISS = 4 # Load missing cache
351 OP_LOAD_NC = 5 # Non-cachable load
352 OP_STORE_HIT = 6 # Store hitting cache
353 OP_STORE_MISS = 7 # Store missing cache
354
355
356 # Cache state machine
357 @unique
358 class State(Enum):
359 IDLE = 0 # Normal load hit processing
360 RELOAD_WAIT_ACK = 1 # Cache reload wait ack
361 STORE_WAIT_ACK = 2 # Store wait ack
362 NC_LOAD_WAIT_ACK = 3 # Non-cachable load wait ack
363
364
365 # Dcache operations:
366 #
367 # In order to make timing, we use the BRAMs with
368 # an output buffer, which means that the BRAM
369 # output is delayed by an extra cycle.
370 #
371 # Thus, the dcache has a 2-stage internal pipeline
372 # for cache hits with no stalls.
373 #
374 # All other operations are handled via stalling
375 # in the first stage.
376 #
377 # The second stage can thus complete a hit at the same
378 # time as the first stage emits a stall for a complex op.
379 #
380 # Stage 0 register, basically contains just the latched request
381
382 class RegStage0(RecordObject):
383 def __init__(self, name=None):
384 super().__init__(name=name)
385 self.req = LoadStore1ToDCacheType(name="lsmem")
386 self.tlbie = Signal() # indicates a tlbie request (from MMU)
387 self.doall = Signal() # with tlbie, indicates flush whole TLB
388 self.tlbld = Signal() # indicates a TLB load request (from MMU)
389 self.mmu_req = Signal() # indicates source of request
390 self.d_valid = Signal() # indicates req.data is valid now
391
392
393 class MemAccessRequest(RecordObject):
394 def __init__(self, cfg, name=None):
395 super().__init__(name=name)
396 self.op = Signal(Op)
397 self.valid = Signal()
398 self.dcbz = Signal()
399 self.real_addr = Signal(cfg.REAL_ADDR_BITS)
400 self.data = Signal(64)
401 self.byte_sel = Signal(8)
402 self.hit_way = Signal(cfg.WAY_BITS)
403 self.same_tag = Signal()
404 self.mmu_req = Signal()
405
406
407 # First stage register, contains state for stage 1 of load hits
408 # and for the state machine used by all other operations
409 class RegStage1(RecordObject):
410 def __init__(self, cfg, name=None):
411 super().__init__(name=name)
412 # Info about the request
413 self.full = Signal() # have uncompleted request
414 self.mmu_req = Signal() # request is from MMU
415 self.req = MemAccessRequest(cfg, name="reqmem")
416
417 # Cache hit state
418 self.hit_way = Signal(cfg.WAY_BITS)
419 self.hit_load_valid = Signal()
420 self.hit_index = Signal(cfg.INDEX_BITS)
421 self.cache_hit = Signal()
422
423 # TLB hit state
424 self.tlb_hit = cfg.TLBHit("tlb_hit")
425 self.tlb_hit_index = Signal(cfg.TLB_SET_BITS)
426
427 # 2-stage data buffer for data forwarded from writes to reads
428 self.forward_data1 = Signal(64)
429 self.forward_data2 = Signal(64)
430 self.forward_sel1 = Signal(8)
431 self.forward_valid1 = Signal()
432 self.forward_way1 = Signal(cfg.WAY_BITS)
433 self.forward_row1 = Signal(cfg.ROW_BITS)
434 self.use_forward1 = Signal()
435 self.forward_sel = Signal(8)
436
437 # Cache miss state (reload state machine)
438 self.state = Signal(State)
439 self.dcbz = Signal()
440 self.write_bram = Signal()
441 self.write_tag = Signal()
442 self.slow_valid = Signal()
443 self.wb = WBMasterOut("wb")
444 self.reload_tag = Signal(cfg.TAG_BITS)
445 self.store_way = Signal(cfg.WAY_BITS)
446 self.store_row = Signal(cfg.ROW_BITS)
447 self.store_index = Signal(cfg.INDEX_BITS)
448 self.end_row_ix = Signal(cfg.ROW_LINE_BITS)
449 self.rows_valid = cfg.RowPerLineValidArray()
450 self.acks_pending = Signal(3)
451 self.inc_acks = Signal()
452 self.dec_acks = Signal()
453
454 # Signals to complete (possibly with error)
455 self.ls_valid = Signal()
456 self.ls_error = Signal()
457 self.mmu_done = Signal()
458 self.mmu_error = Signal()
459 self.cache_paradox = Signal()
460
461 # Signal to complete a failed stcx.
462 self.stcx_fail = Signal()
463
464
465 # Reservation information
466 class Reservation(RecordObject):
467 def __init__(self, cfg, name=None):
468 super().__init__(name=name)
469 self.valid = Signal()
470 self.addr = Signal(64-cfg.LINE_OFF_BITS)
471
472
473 class DTLBUpdate(Elaboratable):
474 def __init__(self, cfg):
475 self.cfg = cfg
476 self.tlbie = Signal()
477 self.tlbwe = Signal()
478 self.doall = Signal()
479 self.tlb_hit = cfg.TLBHit("tlb_hit")
480 self.tlb_req_index = Signal(cfg.TLB_SET_BITS)
481
482 self.repl_way = Signal(cfg.TLB_WAY_BITS)
483 self.eatag = Signal(cfg.TLB_EA_TAG_BITS)
484 self.pte_data = Signal(cfg.TLB_PTE_BITS)
485
486 # read from dtlb array
487 self.tlb_read = Signal()
488 self.tlb_read_index = Signal(cfg.TLB_SET_BITS)
489 self.tlb_way = cfg.TLBRecord("o_tlb_way")
490
491 def elaborate(self, platform):
492 m = Module()
493 comb = m.d.comb
494 sync = m.d.sync
495 cfg = self.cfg
496
497 # there are 3 parts to this:
498 # QTY TLB_NUM_WAYs TAGs - of width (say) 46 bits of Effective Address
499 # QTY TLB_NUM_WAYs PTEs - of width (say) 64 bits
500 # "Valid" bits, one per "way", of QTY TLB_NUM_WAYs. these cannot
501 # be a Memory because they can all be cleared (tlbie, doall), i mean,
502 # we _could_, in theory, by overriding the Reset Signal of the Memory,
503 # hmmm....
504
505 dtlb_valid = cfg.TLBValidArray()
506 tlb_req_index = self.tlb_req_index
507
508 print ("TLB_TAG_WAY_BITS", cfg.TLB_TAG_WAY_BITS)
509 print (" TLB_EA_TAG_BITS", cfg.TLB_EA_TAG_BITS)
510 print (" TLB_NUM_WAYS", cfg.TLB_NUM_WAYS)
511 print ("TLB_PTE_WAY_BITS", cfg.TLB_PTE_WAY_BITS)
512 print (" TLB_PTE_BITS", cfg.TLB_PTE_BITS)
513 print (" TLB_NUM_WAYS", cfg.TLB_NUM_WAYS)
514
515 # TAG and PTE Memory SRAMs. transparent, write-enables are TLB_NUM_WAYS
516 tagway = Memory(depth=cfg.TLB_SET_SIZE, width=cfg.TLB_TAG_WAY_BITS,
517 attrs={'syn_ramstyle': "block_ram"})
518 m.submodules.rd_tagway = rd_tagway = tagway.read_port()
519 m.submodules.wr_tagway = wr_tagway = tagway.write_port(
520 granularity=cfg.TLB_EA_TAG_BITS)
521
522 pteway = Memory(depth=cfg.TLB_SET_SIZE, width=cfg.TLB_PTE_WAY_BITS,
523 attrs={'syn_ramstyle': "block_ram"})
524 m.submodules.rd_pteway = rd_pteway = pteway.read_port()
525 m.submodules.wr_pteway = wr_pteway = pteway.write_port(
526 granularity=cfg.TLB_PTE_BITS)
527
528 # commented out for now, can be put in if Memory.reset can be
529 # used for tlbie&doall to reset the entire Memory to zero in 1 cycle
530 #validm = Memory(depth=TLB_SET_SIZE, width=TLB_NUM_WAYS)
531 #m.submodules.rd_valid = rd_valid = validm.read_port()
532 #m.submodules.wr_valid = wr_valid = validm.write_port(
533 #granularity=1)
534
535 # connect up read and write addresses to Valid/PTE/TAG SRAMs
536 m.d.comb += rd_pteway.addr.eq(self.tlb_read_index)
537 m.d.comb += rd_tagway.addr.eq(self.tlb_read_index)
538 #m.d.comb += rd_valid.addr.eq(self.tlb_read_index)
539 m.d.comb += wr_tagway.addr.eq(tlb_req_index)
540 m.d.comb += wr_pteway.addr.eq(tlb_req_index)
541 #m.d.comb += wr_valid.addr.eq(tlb_req_index)
542
543 updated = Signal()
544 v_updated = Signal()
545 tb_out = Signal(cfg.TLB_TAG_WAY_BITS) # tlb_way_tags_t
546 db_out = Signal(cfg.TLB_NUM_WAYS) # tlb_way_valids_t
547 pb_out = Signal(cfg.TLB_PTE_WAY_BITS) # tlb_way_ptes_t
548 dv = Signal(cfg.TLB_NUM_WAYS) # tlb_way_valids_t
549
550 comb += dv.eq(dtlb_valid[tlb_req_index])
551 comb += db_out.eq(dv)
552
553 with m.If(self.tlbie & self.doall):
554 # clear all valid bits at once
555 # XXX hmmm, validm _could_ use Memory reset here...
556 for i in range(cfg.TLB_SET_SIZE):
557 sync += dtlb_valid[i].eq(0)
558 with m.Elif(self.tlbie):
559 # invalidate just the hit_way
560 with m.If(self.tlb_hit.valid):
561 comb += db_out.bit_select(self.tlb_hit.way, 1).eq(0)
562 comb += v_updated.eq(1)
563 with m.Elif(self.tlbwe):
564 # write to the requested tag and PTE
565 comb += cfg.write_tlb_tag(self.repl_way, tb_out, self.eatag)
566 comb += cfg.write_tlb_pte(self.repl_way, pb_out, self.pte_data)
567 # set valid bit
568 comb += db_out.bit_select(self.repl_way, 1).eq(1)
569
570 comb += updated.eq(1)
571 comb += v_updated.eq(1)
572
573 # above, sometimes valid is requested to be updated but data not
574 # therefore split them out, here. note the granularity thing matches
575 # with the shift-up of the eatag/pte_data into the correct TLB way.
576 # thus is it not necessary to write the entire lot, just the portion
577 # being altered: hence writing the *old* copy of the row is not needed
578 with m.If(updated): # PTE and TAG to be written
579 comb += wr_pteway.data.eq(pb_out)
580 comb += wr_pteway.en.eq(1<<self.repl_way)
581 comb += wr_tagway.data.eq(tb_out)
582 comb += wr_tagway.en.eq(1<<self.repl_way)
583 with m.If(v_updated): # Valid to be written
584 sync += dtlb_valid[tlb_req_index].eq(db_out)
585 #comb += wr_valid.data.eq(db_out)
586 #comb += wr_valid.en.eq(1<<self.repl_way)
587
588 # select one TLB way, use a register here
589 r_delay = Signal()
590 sync += r_delay.eq(self.tlb_read)
591 # first deal with the valids, which are not in a Memory.
592 # tlb way valid is output on a 1 clock delay with sync,
593 # but have to explicitly deal with "forwarding" here
594 with m.If(self.tlb_read):
595 with m.If(v_updated): # write *and* read in same cycle: forward
596 sync += self.tlb_way.valid.eq(db_out)
597 with m.Else():
598 sync += self.tlb_way.valid.eq(dtlb_valid[self.tlb_read_index])
599 # now deal with the Memory-read case. the output must remain
600 # valid (stable) even when a read-request is not made, but stable
601 # on a one-clock delay, hence the register
602 r_tlb_way = cfg.TLBRecord("r_tlb_way")
603 with m.If(r_delay):
604 # on one clock delay, capture the contents of the read port(s)
605 comb += self.tlb_way.tag.eq(rd_tagway.data)
606 comb += self.tlb_way.pte.eq(rd_pteway.data)
607 sync += r_tlb_way.tag.eq(rd_tagway.data)
608 sync += r_tlb_way.pte.eq(rd_pteway.data)
609 with m.Else():
610 # ... so that the register can output it when no read is requested
611 # it's rather overkill but better to be safe than sorry
612 comb += self.tlb_way.tag.eq(r_tlb_way.tag)
613 comb += self.tlb_way.pte.eq(r_tlb_way.pte)
614 #comb += self.tlb_way.eq(r_tlb_way)
615
616 return m
617
618
619 class DCachePendingHit(Elaboratable):
620
621 def __init__(self, cfg, tlb_way,
622 cache_i_validdx, cache_tag_set,
623 req_addr):
624
625 self.go = Signal()
626 self.virt_mode = Signal()
627 self.is_hit = Signal()
628 self.tlb_hit = cfg.TLBHit("tlb_hit")
629 self.hit_way = Signal(cfg.WAY_BITS)
630 self.rel_match = Signal()
631 self.req_index = Signal(cfg.INDEX_BITS)
632 self.reload_tag = Signal(cfg.TAG_BITS)
633
634 self.tlb_way = tlb_way
635 self.cache_i_validdx = cache_i_validdx
636 self.cache_tag_set = cache_tag_set
637 self.req_addr = req_addr
638 self.cfg = cfg
639
640 def elaborate(self, platform):
641 m = Module()
642 comb = m.d.comb
643 sync = m.d.sync
644
645 go = self.go
646 virt_mode = self.virt_mode
647 is_hit = self.is_hit
648 tlb_way = self.tlb_way
649 cache_i_validdx = self.cache_i_validdx
650 cache_tag_set = self.cache_tag_set
651 req_addr = self.req_addr
652 tlb_hit = self.tlb_hit
653 hit_way = self.hit_way
654 rel_match = self.rel_match
655 req_index = self.req_index
656 reload_tag = self.reload_tag
657 cfg = self.cfg
658
659 hit_set = Array(Signal(name="hit_set_%d" % i) \
660 for i in range(cfg.TLB_NUM_WAYS))
661 rel_matches = Array(Signal(name="rel_matches_%d" % i) \
662 for i in range(cfg.TLB_NUM_WAYS))
663 hit_way_set = cfg.HitWaySet()
664
665 # Test if pending request is a hit on any way
666 # In order to make timing in virtual mode,
667 # when we are using the TLB, we compare each
668 # way with each of the real addresses from each way of
669 # the TLB, and then decide later which match to use.
670
671 with m.If(virt_mode):
672 for j in range(cfg.TLB_NUM_WAYS): # tlb_num_way_t
673 s_tag = Signal(cfg.TAG_BITS, name="s_tag%d" % j)
674 s_hit = Signal(name="s_hit%d" % j)
675 s_pte = Signal(cfg.TLB_PTE_BITS, name="s_pte%d" % j)
676 s_ra = Signal(cfg.REAL_ADDR_BITS, name="s_ra%d" % j)
677 # read the PTE, calc the Real Address, get tge tag
678 comb += s_pte.eq(cfg.read_tlb_pte(j, tlb_way.pte))
679 comb += s_ra.eq(Cat(req_addr[0:cfg.TLB_LG_PGSZ],
680 s_pte[cfg.TLB_LG_PGSZ:cfg.REAL_ADDR_BITS]))
681 comb += s_tag.eq(cfg.get_tag(s_ra))
682 # for each way check tge tag against the cache tag set
683 for i in range(cfg.NUM_WAYS): # way_t
684 is_tag_hit = Signal(name="is_tag_hit_%d_%d" % (j, i))
685 comb += is_tag_hit.eq(go & cache_i_validdx[i] &
686 (cfg.read_tag(i, cache_tag_set) == s_tag)
687 & (tlb_way.valid[j]))
688 with m.If(is_tag_hit):
689 comb += hit_way_set[j].eq(i)
690 comb += s_hit.eq(1)
691 comb += hit_set[j].eq(s_hit)
692 comb += rel_matches[j].eq(s_tag == reload_tag)
693 with m.If(tlb_hit.valid):
694 comb += is_hit.eq(hit_set[tlb_hit.way])
695 comb += hit_way.eq(hit_way_set[tlb_hit.way])
696 comb += rel_match.eq(rel_matches[tlb_hit.way])
697 with m.Else():
698 s_tag = Signal(cfg.TAG_BITS)
699 comb += s_tag.eq(cfg.get_tag(req_addr))
700 for i in range(cfg.NUM_WAYS): # way_t
701 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
702 comb += is_tag_hit.eq(go & cache_i_validdx[i] &
703 (cfg.read_tag(i, cache_tag_set) == s_tag))
704 with m.If(is_tag_hit):
705 comb += hit_way.eq(i)
706 comb += is_hit.eq(1)
707 with m.If(s_tag == reload_tag):
708 comb += rel_match.eq(1)
709
710 return m
711
712
713 class DCache(Elaboratable, DCacheConfig):
714 """Set associative dcache write-through
715
716 TODO (in no specific order):
717 * See list in icache.vhdl
718 * Complete load misses on the cycle when WB data comes instead of
719 at the end of line (this requires dealing with requests coming in
720 while not idle...)
721 """
722 def __init__(self, pspec=None):
723 self.d_in = LoadStore1ToDCacheType("d_in")
724 self.d_out = DCacheToLoadStore1Type("d_out")
725
726 self.m_in = MMUToDCacheType("m_in")
727 self.m_out = DCacheToMMUType("m_out")
728
729 self.stall_out = Signal()
730 self.any_stall_out = Signal()
731 self.dreq_when_stall = Signal()
732 self.mreq_when_stall = Signal()
733
734 # standard naming (wired to non-standard for compatibility)
735 self.bus = Interface(addr_width=32,
736 data_width=64,
737 granularity=8,
738 features={'stall'},
739 #alignment=0,
740 name="dcache")
741
742 self.log_out = Signal(20)
743
744 # test if small cache to be enabled
745 self.small_cache = (hasattr(pspec, "small_cache") and
746 (pspec.small_cache == True))
747 # test if microwatt compatibility is to be enabled
748 self.microwatt_compat = (hasattr(pspec, "microwatt_compat") and
749 (pspec.microwatt_compat == True))
750
751 XLEN = pspec.XLEN
752 TLB_SET_SIZE = 16
753 TLB_NUM_WAYS = 2
754 NUM_LINES = 16
755 NUM_WAYS = 2
756
757 if self.small_cache:
758 # reduce way sizes and num lines to ridiculously small
759 TLB_SET_SIZE = 2
760 TLB_NUM_WAYS = 1
761 NUM_LINES = 2
762 NUM_WAYS = 1
763 if self.microwatt_compat:
764 # reduce way sizes
765 NUM_WAYS = 1
766 TLB_NUM_WAYS = 1
767
768 super().__init__(TLB_SET_SIZE=TLB_SET_SIZE,
769 # XLEN=XLEN, # TODO
770 TLB_NUM_WAYS = TLB_NUM_WAYS,
771 NUM_LINES = NUM_LINES,
772 NUM_WAYS = NUM_WAYS
773 )
774
775 def stage_0(self, m, r0, r1, r0_full):
776 """Latch the request in r0.req as long as we're not stalling
777 """
778 comb = m.d.comb
779 sync = m.d.sync
780 d_in, d_out, m_in = self.d_in, self.d_out, self.m_in
781
782 r = RegStage0("stage0")
783
784 # TODO, this goes in unit tests and formal proofs
785 with m.If(d_in.valid & m_in.valid):
786 sync += Display("request collision loadstore vs MMU")
787
788 with m.If(m_in.valid):
789 comb += r.req.valid.eq(1)
790 comb += r.req.load.eq(~(m_in.tlbie | m_in.tlbld))# no invalidate
791 comb += r.req.dcbz.eq(0)
792 comb += r.req.nc.eq(0)
793 comb += r.req.reserve.eq(0)
794 comb += r.req.virt_mode.eq(0)
795 comb += r.req.priv_mode.eq(1)
796 comb += r.req.addr.eq(m_in.addr)
797 comb += r.req.data.eq(m_in.pte)
798 comb += r.req.byte_sel.eq(~0) # Const -1 sets all to 0b111....
799 comb += r.tlbie.eq(m_in.tlbie)
800 comb += r.doall.eq(m_in.doall)
801 comb += r.tlbld.eq(m_in.tlbld)
802 comb += r.mmu_req.eq(1)
803 comb += r.d_valid.eq(1)
804 m.d.sync += Display(" DCACHE req mmu addr %x pte %x ld %d",
805 m_in.addr, m_in.pte, r.req.load)
806
807 with m.Else():
808 comb += r.req.eq(d_in)
809 comb += r.req.data.eq(0)
810 comb += r.tlbie.eq(0)
811 comb += r.doall.eq(0)
812 comb += r.tlbld.eq(0)
813 comb += r.mmu_req.eq(0)
814 comb += r.d_valid.eq(0)
815
816 sync += r0_full.eq(0)
817 with m.If((~r1.full & ~d_in.hold) | ~r0_full):
818 sync += r0.eq(r)
819 sync += r0_full.eq(r.req.valid)
820 with m.Elif(~r0.d_valid):
821 # Sample data the cycle after a request comes in from loadstore1.
822 # If another request has come in already then the data will get
823 # put directly into req.data below.
824 sync += r0.req.data.eq(d_in.data)
825 sync += r0.d_valid.eq(1)
826 with m.If(d_in.valid):
827 m.d.sync += Display(" DCACHE req cache "
828 "virt %d addr %x data %x ld %d",
829 r.req.virt_mode, r.req.addr,
830 r.req.data, r.req.load)
831
832 def tlb_read(self, m, r0_stall, tlb_way):
833 """TLB
834 Operates in the second cycle on the request latched in r0.req.
835 TLB updates write the entry at the end of the second cycle.
836 """
837 comb = m.d.comb
838 sync = m.d.sync
839 m_in, d_in = self.m_in, self.d_in
840
841 addrbits = Signal(self.TLB_SET_BITS)
842
843 amin = self.TLB_LG_PGSZ
844 amax = self.TLB_LG_PGSZ + self.TLB_SET_BITS
845
846 with m.If(m_in.valid):
847 comb += addrbits.eq(m_in.addr[amin : amax])
848 with m.Else():
849 comb += addrbits.eq(d_in.addr[amin : amax])
850
851 # If we have any op and the previous op isn't finished,
852 # then keep the same output for next cycle.
853 d = self.dtlb_update
854 comb += d.tlb_read_index.eq(addrbits)
855 comb += d.tlb_read.eq(~r0_stall)
856 comb += tlb_way.eq(d.tlb_way)
857
858 def maybe_tlb_plrus(self, m, r1, tlb_plru_victim, tlb_req_index):
859 """Generate TLB PLRUs
860 """
861 comb = m.d.comb
862 sync = m.d.sync
863
864 if self.TLB_NUM_WAYS == 0:
865 return
866
867 # suite of PLRUs with a selection and output mechanism
868 tlb_plrus = PLRUs("d_tlb", self.TLB_SET_SIZE, self.TLB_WAY_BITS)
869 m.submodules.tlb_plrus = tlb_plrus
870 comb += tlb_plrus.way.eq(r1.tlb_hit.way)
871 comb += tlb_plrus.valid.eq(r1.tlb_hit.valid)
872 comb += tlb_plrus.index.eq(r1.tlb_hit_index)
873 comb += tlb_plrus.isel.eq(tlb_req_index) # select victim
874 comb += tlb_plru_victim.eq(tlb_plrus.o_index) # selected victim
875
876 def tlb_search(self, m, tlb_req_index, r0, r0_valid,
877 tlb_way,
878 pte, tlb_hit, valid_ra, perm_attr, ra):
879
880 comb = m.d.comb
881
882 hitway = Signal(self.TLB_WAY_BITS)
883 hit = Signal()
884 eatag = Signal(self.TLB_EA_TAG_BITS)
885
886 self.TLB_LG_END = self.TLB_LG_PGSZ + self.TLB_SET_BITS
887 r0_req_addr = r0.req.addr[self.TLB_LG_PGSZ : self.TLB_LG_END]
888 comb += tlb_req_index.eq(r0_req_addr)
889 comb += eatag.eq(r0.req.addr[self.TLB_LG_END : 64 ])
890
891 for i in range(self.TLB_NUM_WAYS):
892 is_tag_hit = Signal(name="is_tag_hit%d" % i)
893 tlb_tag = Signal(self.TLB_EA_TAG_BITS, name="tlb_tag%d" % i)
894 comb += tlb_tag.eq(self.read_tlb_tag(i, tlb_way.tag))
895 comb += is_tag_hit.eq((tlb_way.valid[i]) & (tlb_tag == eatag))
896 with m.If(is_tag_hit):
897 comb += hitway.eq(i)
898 comb += hit.eq(1)
899
900 comb += tlb_hit.valid.eq(hit & r0_valid)
901 comb += tlb_hit.way.eq(hitway)
902
903 with m.If(tlb_hit.valid):
904 comb += pte.eq(self.read_tlb_pte(hitway, tlb_way.pte))
905 comb += valid_ra.eq(tlb_hit.valid | ~r0.req.virt_mode)
906
907 with m.If(r0.req.virt_mode):
908 comb += ra.eq(Cat(Const(0, self.ROW_OFF_BITS),
909 r0.req.addr[self.ROW_OFF_BITS:self.TLB_LG_PGSZ],
910 pte[self.TLB_LG_PGSZ:self.REAL_ADDR_BITS]))
911 comb += perm_attr.reference.eq(pte[8])
912 comb += perm_attr.changed.eq(pte[7])
913 comb += perm_attr.nocache.eq(pte[5])
914 comb += perm_attr.priv.eq(pte[3])
915 comb += perm_attr.rd_perm.eq(pte[2])
916 comb += perm_attr.wr_perm.eq(pte[1])
917 with m.Else():
918 comb += ra.eq(Cat(Const(0, self.ROW_OFF_BITS),
919 r0.req.addr[self.ROW_OFF_BITS:self.REAL_ADDR_BITS]))
920 comb += perm_attr.reference.eq(1)
921 comb += perm_attr.changed.eq(1)
922 comb += perm_attr.nocache.eq(0)
923 comb += perm_attr.priv.eq(1)
924 comb += perm_attr.rd_perm.eq(1)
925 comb += perm_attr.wr_perm.eq(1)
926
927 with m.If(valid_ra):
928 m.d.sync += Display("DCACHE virt mode %d hit %d ra %x pte %x",
929 r0.req.virt_mode, tlb_hit.valid, ra, pte)
930 m.d.sync += Display(" perm ref=%d", perm_attr.reference)
931 m.d.sync += Display(" perm chg=%d", perm_attr.changed)
932 m.d.sync += Display(" perm noc=%d", perm_attr.nocache)
933 m.d.sync += Display(" perm prv=%d", perm_attr.priv)
934 m.d.sync += Display(" perm rdp=%d", perm_attr.rd_perm)
935 m.d.sync += Display(" perm wrp=%d", perm_attr.wr_perm)
936
937 def tlb_update(self, m, r0_valid, r0, tlb_req_index,
938 tlb_hit, tlb_plru_victim):
939
940 comb = m.d.comb
941 sync = m.d.sync
942
943 tlbie = Signal()
944 tlbwe = Signal()
945
946 comb += tlbie.eq(r0_valid & r0.tlbie)
947 comb += tlbwe.eq(r0_valid & r0.tlbld)
948
949 d = self.dtlb_update
950
951 comb += d.tlbie.eq(tlbie)
952 comb += d.tlbwe.eq(tlbwe)
953 comb += d.doall.eq(r0.doall)
954 comb += d.tlb_hit.eq(tlb_hit)
955 comb += d.tlb_req_index.eq(tlb_req_index)
956
957 with m.If(tlb_hit.valid):
958 comb += d.repl_way.eq(tlb_hit.way)
959 with m.Else():
960 comb += d.repl_way.eq(tlb_plru_victim)
961 comb += d.eatag.eq(r0.req.addr[self.TLB_LG_PGSZ + self.TLB_SET_BITS:64])
962 comb += d.pte_data.eq(r0.req.data)
963
964 def maybe_plrus(self, m, r1, plru_victim):
965 """Generate PLRUs
966 """
967 comb = m.d.comb
968 sync = m.d.sync
969
970 if self.TLB_NUM_WAYS == 0:
971 return
972
973 # suite of PLRUs with a selection and output mechanism
974 m.submodules.plrus = plrus = PLRUs("dtag", self.NUM_LINES,
975 self.WAY_BITS)
976 comb += plrus.way.eq(r1.hit_way)
977 comb += plrus.valid.eq(r1.cache_hit)
978 comb += plrus.index.eq(r1.hit_index)
979 comb += plrus.isel.eq(r1.store_index) # select victim
980 comb += plru_victim.eq(plrus.o_index) # selected victim
981
982 def cache_tag_read(self, m, r0_stall, req_index, cache_tag_set):
983 """Cache tag RAM read port
984 """
985 comb = m.d.comb
986 sync = m.d.sync
987
988 m_in, d_in = self.m_in, self.d_in
989
990 # synchronous tag read-port: NOT TRANSPARENT (cannot pass through
991 # write-to-a-read at the same time), seems to pass tests ok
992 m.submodules.rd_tag = rd_tag = self.tagmem.read_port(transparent=False)
993
994 index = Signal(self.INDEX_BITS)
995
996 with m.If(r0_stall):
997 comb += index.eq(req_index)
998 with m.Elif(m_in.valid):
999 comb += index.eq(self.get_index(m_in.addr))
1000 with m.Else():
1001 comb += index.eq(self.get_index(d_in.addr))
1002 comb += rd_tag.addr.eq(index)
1003 comb += cache_tag_set.eq(rd_tag.data) # read-port is a 1-clock delay
1004
1005 def dcache_request(self, m, r0, ra, req_index, req_row, req_tag,
1006 r0_valid, r1, cache_valids, replace_way,
1007 use_forward1_next, use_forward2_next,
1008 req_hit_way, plru_victim, rc_ok, perm_attr,
1009 valid_ra, perm_ok, access_ok, req_op, req_go,
1010 tlb_hit, tlb_way, cache_tag_set,
1011 cancel_store, req_same_tag, r0_stall, early_req_row):
1012 """Cache request parsing and hit detection
1013 """
1014
1015 comb = m.d.comb
1016 m_in, d_in = self.m_in, self.d_in
1017
1018 is_hit = Signal()
1019 hit_way = Signal(self.WAY_BITS)
1020 op = Signal(Op)
1021 opsel = Signal(3)
1022 go = Signal()
1023 nc = Signal()
1024 cache_i_validdx = Signal(self.NUM_WAYS)
1025
1026 # Extract line, row and tag from request
1027 comb += req_index.eq(self.get_index(r0.req.addr))
1028 comb += req_row.eq(self.get_row(r0.req.addr))
1029 comb += req_tag.eq(self.get_tag(ra))
1030
1031 if False: # display on comb is a bit... busy.
1032 comb += Display("dcache_req addr:%x ra: %x idx: %x tag: %x row: %x",
1033 r0.req.addr, ra, req_index, req_tag, req_row)
1034
1035 comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
1036 comb += cache_i_validdx.eq(cache_valids[req_index])
1037
1038 m.submodules.dcache_pend = dc = DCachePendingHit(self, tlb_way,
1039 cache_i_validdx, cache_tag_set,
1040 r0.req.addr)
1041 comb += dc.tlb_hit.eq(tlb_hit)
1042 comb += dc.reload_tag.eq(r1.reload_tag)
1043 comb += dc.virt_mode.eq(r0.req.virt_mode)
1044 comb += dc.go.eq(go)
1045 comb += dc.req_index.eq(req_index)
1046
1047 comb += is_hit.eq(dc.is_hit)
1048 comb += hit_way.eq(dc.hit_way)
1049 comb += req_same_tag.eq(dc.rel_match)
1050
1051 # See if the request matches the line currently being reloaded
1052 with m.If((r1.state == State.RELOAD_WAIT_ACK) &
1053 (req_index == r1.store_index) & req_same_tag):
1054 # For a store, consider this a hit even if the row isn't
1055 # valid since it will be by the time we perform the store.
1056 # For a load, check the appropriate row valid bit.
1057 rrow = Signal(self.ROW_LINE_BITS)
1058 comb += rrow.eq(req_row)
1059 valid = r1.rows_valid[rrow]
1060 comb += is_hit.eq((~r0.req.load) | valid)
1061 comb += hit_way.eq(replace_way)
1062
1063 # Whether to use forwarded data for a load or not
1064 with m.If((self.get_row(r1.req.real_addr) == req_row) &
1065 (r1.req.hit_way == hit_way)):
1066 # Only need to consider r1.write_bram here, since if we
1067 # are writing refill data here, then we don't have a
1068 # cache hit this cycle on the line being refilled.
1069 # (There is the possibility that the load following the
1070 # load miss that started the refill could be to the old
1071 # contents of the victim line, since it is a couple of
1072 # cycles after the refill starts before we see the updated
1073 # cache tag. In that case we don't use the bypass.)
1074 comb += use_forward1_next.eq(r1.write_bram)
1075 with m.If((r1.forward_row1 == req_row) & (r1.forward_way1 == hit_way)):
1076 comb += use_forward2_next.eq(r1.forward_valid1)
1077
1078 # The way that matched on a hit
1079 comb += req_hit_way.eq(hit_way)
1080
1081 # The way to replace on a miss
1082 with m.If(r1.write_tag):
1083 comb += replace_way.eq(plru_victim)
1084 with m.Else():
1085 comb += replace_way.eq(r1.store_way)
1086
1087 # work out whether we have permission for this access
1088 # NB we don't yet implement AMR, thus no KUAP
1089 comb += rc_ok.eq(perm_attr.reference
1090 & (r0.req.load | perm_attr.changed))
1091 comb += perm_ok.eq((r0.req.priv_mode | (~perm_attr.priv)) &
1092 (perm_attr.wr_perm |
1093 (r0.req.load & perm_attr.rd_perm)))
1094 comb += access_ok.eq(valid_ra & perm_ok & rc_ok)
1095
1096 # Combine the request and cache hit status to decide what
1097 # operation needs to be done
1098 comb += nc.eq(r0.req.nc | perm_attr.nocache)
1099 comb += op.eq(Op.OP_NONE)
1100 with m.If(go):
1101 with m.If(~access_ok):
1102 m.d.sync += Display("DCACHE access fail valid_ra=%d p=%d rc=%d",
1103 valid_ra, perm_ok, rc_ok)
1104 comb += op.eq(Op.OP_BAD)
1105 with m.Elif(cancel_store):
1106 m.d.sync += Display("DCACHE cancel store")
1107 comb += op.eq(Op.OP_STCX_FAIL)
1108 with m.Else():
1109 m.d.sync += Display("DCACHE valid_ra=%d nc=%d ld=%d",
1110 valid_ra, nc, r0.req.load)
1111 comb += opsel.eq(Cat(is_hit, nc, r0.req.load))
1112 with m.Switch(opsel):
1113 with m.Case(0b101): comb += op.eq(Op.OP_LOAD_HIT)
1114 with m.Case(0b100): comb += op.eq(Op.OP_LOAD_MISS)
1115 with m.Case(0b110): comb += op.eq(Op.OP_LOAD_NC)
1116 with m.Case(0b001): comb += op.eq(Op.OP_STORE_HIT)
1117 with m.Case(0b000): comb += op.eq(Op.OP_STORE_MISS)
1118 with m.Case(0b010): comb += op.eq(Op.OP_STORE_MISS)
1119 with m.Case(0b011): comb += op.eq(Op.OP_BAD)
1120 with m.Case(0b111): comb += op.eq(Op.OP_BAD)
1121 comb += req_op.eq(op)
1122 comb += req_go.eq(go)
1123
1124 # Version of the row number that is valid one cycle earlier
1125 # in the cases where we need to read the cache data BRAM.
1126 # If we're stalling then we need to keep reading the last
1127 # row requested.
1128 with m.If(~r0_stall):
1129 with m.If(m_in.valid):
1130 comb += early_req_row.eq(self.get_row(m_in.addr))
1131 with m.Else():
1132 comb += early_req_row.eq(self.get_row(d_in.addr))
1133 with m.Else():
1134 comb += early_req_row.eq(req_row)
1135
1136 def reservation_comb(self, m, cancel_store, set_rsrv, clear_rsrv,
1137 r0_valid, r0, reservation):
1138 """Handle load-with-reservation and store-conditional instructions
1139 """
1140 comb = m.d.comb
1141
1142 with m.If(r0_valid & r0.req.reserve):
1143 # XXX generate alignment interrupt if address
1144 # is not aligned XXX or if r0.req.nc = '1'
1145 with m.If(r0.req.load):
1146 comb += set_rsrv.eq(r0.req.atomic_last) # load with reservation
1147 with m.Else():
1148 comb += clear_rsrv.eq(r0.req.atomic_last) # store conditional
1149 with m.If((~reservation.valid) |
1150 (r0.req.addr[self.LINE_OFF_BITS:64] !=
1151 reservation.addr)):
1152 comb += cancel_store.eq(1)
1153
1154 def reservation_reg(self, m, r0_valid, access_ok, set_rsrv, clear_rsrv,
1155 reservation, r0):
1156 comb = m.d.comb
1157 sync = m.d.sync
1158
1159 with m.If(r0_valid & access_ok):
1160 with m.If(clear_rsrv):
1161 sync += reservation.valid.eq(0)
1162 with m.Elif(set_rsrv):
1163 sync += reservation.valid.eq(1)
1164 sync += reservation.addr.eq(r0.req.addr[self.LINE_OFF_BITS:64])
1165
1166 def writeback_control(self, m, r1, cache_out_row):
1167 """Return data for loads & completion control logic
1168 """
1169 comb = m.d.comb
1170 sync = m.d.sync
1171 d_out, m_out = self.d_out, self.m_out
1172
1173 data_out = Signal(64)
1174 data_fwd = Signal(64)
1175
1176 # Use the bypass if are reading the row that was
1177 # written 1 or 2 cycles ago, including for the
1178 # slow_valid = 1 case (i.e. completing a load
1179 # miss or a non-cacheable load).
1180 with m.If(r1.use_forward1):
1181 comb += data_fwd.eq(r1.forward_data1)
1182 with m.Else():
1183 comb += data_fwd.eq(r1.forward_data2)
1184
1185 comb += data_out.eq(cache_out_row)
1186
1187 for i in range(8):
1188 with m.If(r1.forward_sel[i]):
1189 dsel = data_fwd.word_select(i, 8)
1190 comb += data_out.word_select(i, 8).eq(dsel)
1191
1192 # DCache output to LoadStore
1193 comb += d_out.valid.eq(r1.ls_valid)
1194 comb += d_out.data.eq(data_out)
1195 comb += d_out.store_done.eq(~r1.stcx_fail)
1196 comb += d_out.error.eq(r1.ls_error)
1197 comb += d_out.cache_paradox.eq(r1.cache_paradox)
1198
1199 # Outputs to MMU
1200 comb += m_out.done.eq(r1.mmu_done)
1201 comb += m_out.err.eq(r1.mmu_error)
1202 comb += m_out.data.eq(data_out)
1203
1204 # We have a valid load or store hit or we just completed
1205 # a slow op such as a load miss, a NC load or a store
1206 #
1207 # Note: the load hit is delayed by one cycle. However it
1208 # can still not collide with r.slow_valid (well unless I
1209 # miscalculated) because slow_valid can only be set on a
1210 # subsequent request and not on its first cycle (the state
1211 # machine must have advanced), which makes slow_valid
1212 # at least 2 cycles from the previous hit_load_valid.
1213
1214 # Sanity: Only one of these must be set in any given cycle
1215
1216 if False: # TODO: need Display to get this to work
1217 assert (r1.slow_valid & r1.stcx_fail) != 1, \
1218 "unexpected slow_valid collision with stcx_fail"
1219
1220 assert ((r1.slow_valid | r1.stcx_fail) | r1.hit_load_valid) != 1, \
1221 "unexpected hit_load_delayed collision with slow_valid"
1222
1223 with m.If(~r1.mmu_req):
1224 # Request came from loadstore1...
1225 # Load hit case is the standard path
1226 with m.If(r1.hit_load_valid):
1227 sync += Display("completing load hit data=%x", data_out)
1228
1229 # error cases complete without stalling
1230 with m.If(r1.ls_error):
1231 with m.If(r1.dcbz):
1232 sync += Display("completing dcbz with error")
1233 with m.Else():
1234 sync += Display("completing ld/st with error")
1235
1236 # Slow ops (load miss, NC, stores)
1237 with m.If(r1.slow_valid):
1238 sync += Display("completing store or load miss adr=%x data=%x",
1239 r1.req.real_addr, data_out)
1240
1241 with m.Else():
1242 # Request came from MMU
1243 with m.If(r1.hit_load_valid):
1244 sync += Display("completing load hit to MMU, data=%x",
1245 m_out.data)
1246 # error cases complete without stalling
1247 with m.If(r1.mmu_error):
1248 sync += Display("combpleting MMU ld with error")
1249
1250 # Slow ops (i.e. load miss)
1251 with m.If(r1.slow_valid):
1252 sync += Display("completing MMU load miss, adr=%x data=%x",
1253 r1.req.real_addr, m_out.data)
1254
1255 def rams(self, m, r1, early_req_row, cache_out_row, replace_way):
1256 """rams
1257 Generate a cache RAM for each way. This handles the normal
1258 reads, writes from reloads and the special store-hit update
1259 path as well.
1260
1261 Note: the BRAMs have an extra read buffer, meaning the output
1262 is pipelined an extra cycle. This differs from the
1263 icache. The writeback logic needs to take that into
1264 account by using 1-cycle delayed signals for load hits.
1265 """
1266 comb = m.d.comb
1267 bus = self.bus
1268
1269 # a Binary-to-Unary one-hots here. replace-way one-hot is gated
1270 # (enabled) by bus.ack, not-write-bram, and state RELOAD_WAIT_ACK
1271 m.submodules.rams_replace_way_e = rwe = Decoder(self.NUM_WAYS)
1272 comb += rwe.n.eq(~((r1.state == State.RELOAD_WAIT_ACK) & bus.ack &
1273 ~r1.write_bram))
1274 comb += rwe.i.eq(replace_way)
1275
1276 m.submodules.rams_hit_way_e = hwe = Decoder(self.NUM_WAYS)
1277 comb += hwe.i.eq(r1.hit_way)
1278
1279 # this one is gated with write_bram, and replace_way_e can never be
1280 # set at the same time. that means that do_write can OR the outputs
1281 m.submodules.rams_hit_req_way_e = hre = Decoder(self.NUM_WAYS)
1282 comb += hre.n.eq(~r1.write_bram) # Decoder.n is inverted
1283 comb += hre.i.eq(r1.req.hit_way)
1284
1285 # common Signals
1286 do_read = Signal()
1287 wr_addr = Signal(self.ROW_BITS)
1288 wr_data = Signal(WB_DATA_BITS)
1289 wr_sel = Signal(self.ROW_SIZE)
1290 rd_addr = Signal(self.ROW_BITS)
1291
1292 comb += do_read.eq(1) # always enable
1293 comb += rd_addr.eq(early_req_row)
1294
1295 # Write mux:
1296 #
1297 # Defaults to wishbone read responses (cache refill)
1298 #
1299 # For timing, the mux on wr_data/sel/addr is not
1300 # dependent on anything other than the current state.
1301
1302 with m.If(r1.write_bram):
1303 # Write store data to BRAM. This happens one
1304 # cycle after the store is in r0.
1305 comb += wr_data.eq(r1.req.data)
1306 comb += wr_sel.eq(r1.req.byte_sel)
1307 comb += wr_addr.eq(self.get_row(r1.req.real_addr))
1308
1309 with m.Else():
1310 # Otherwise, we might be doing a reload or a DCBZ
1311 with m.If(r1.dcbz):
1312 comb += wr_data.eq(0)
1313 with m.Else():
1314 comb += wr_data.eq(bus.dat_r)
1315 comb += wr_addr.eq(r1.store_row)
1316 comb += wr_sel.eq(~0) # all 1s
1317
1318 # set up Cache Rams
1319 for i in range(self.NUM_WAYS):
1320 do_write = Signal(name="do_wr%d" % i)
1321 wr_sel_m = Signal(self.ROW_SIZE, name="wr_sel_m_%d" % i)
1322 d_out= Signal(WB_DATA_BITS, name="dout_%d" % i) # cache_row_t
1323
1324 way = CacheRam(self.ROW_BITS, WB_DATA_BITS, ADD_BUF=True, ram_num=i)
1325 m.submodules["cacheram_%d" % i] = way
1326
1327 comb += way.rd_en.eq(do_read)
1328 comb += way.rd_addr.eq(rd_addr)
1329 comb += d_out.eq(way.rd_data_o)
1330 comb += way.wr_sel.eq(wr_sel_m)
1331 comb += way.wr_addr.eq(wr_addr)
1332 comb += way.wr_data.eq(wr_data)
1333
1334 # Cache hit reads
1335 with m.If(hwe.o[i]):
1336 comb += cache_out_row.eq(d_out)
1337
1338 # these are mutually-exclusive via their Decoder-enablers
1339 # (note: Decoder-enable is inverted)
1340 comb += do_write.eq(hre.o[i] | rwe.o[i])
1341
1342 # Mask write selects with do_write since BRAM
1343 # doesn't have a global write-enable
1344 with m.If(do_write):
1345 comb += wr_sel_m.eq(wr_sel)
1346
1347 # Cache hit synchronous machine for the easy case.
1348 # This handles load hits.
1349 # It also handles error cases (TLB miss, cache paradox)
1350 def dcache_fast_hit(self, m, req_op, r0_valid, r0, r1,
1351 req_hit_way, req_index, req_tag, access_ok,
1352 tlb_hit, tlb_req_index):
1353 comb = m.d.comb
1354 sync = m.d.sync
1355
1356 with m.If(req_op != Op.OP_NONE):
1357 sync += Display("op:%d addr:%x nc: %d idx: %x tag: %x way: %x",
1358 req_op, r0.req.addr, r0.req.nc,
1359 req_index, req_tag, req_hit_way)
1360
1361 with m.If(r0_valid):
1362 sync += r1.mmu_req.eq(r0.mmu_req)
1363
1364 # Fast path for load/store hits.
1365 # Set signals for the writeback controls.
1366 sync += r1.hit_way.eq(req_hit_way)
1367 sync += r1.hit_index.eq(req_index)
1368
1369 sync += r1.hit_load_valid.eq(req_op == Op.OP_LOAD_HIT)
1370 sync += r1.cache_hit.eq((req_op == Op.OP_LOAD_HIT) |
1371 (req_op == Op.OP_STORE_HIT))
1372
1373 with m.If(req_op == Op.OP_BAD):
1374 sync += Display("Signalling ld/st error "
1375 "ls_error=%i mmu_error=%i cache_paradox=%i",
1376 ~r0.mmu_req,r0.mmu_req,access_ok)
1377 sync += r1.ls_error.eq(~r0.mmu_req)
1378 sync += r1.mmu_error.eq(r0.mmu_req)
1379 sync += r1.cache_paradox.eq(access_ok)
1380 with m.Else():
1381 sync += r1.ls_error.eq(0)
1382 sync += r1.mmu_error.eq(0)
1383 sync += r1.cache_paradox.eq(0)
1384
1385 sync += r1.stcx_fail.eq(req_op == Op.OP_STCX_FAIL)
1386
1387 # Record TLB hit information for updating TLB PLRU
1388 sync += r1.tlb_hit.eq(tlb_hit)
1389 sync += r1.tlb_hit_index.eq(tlb_req_index)
1390
1391 # Memory accesses are handled by this state machine:
1392 #
1393 # * Cache load miss/reload (in conjunction with "rams")
1394 # * Load hits for non-cachable forms
1395 # * Stores (the collision case is handled in "rams")
1396 #
1397 # All wishbone requests generation is done here.
1398 # This machine operates at stage 1.
1399 def dcache_slow(self, m, r1, use_forward1_next, use_forward2_next,
1400 r0, replace_way,
1401 req_hit_way, req_same_tag,
1402 r0_valid, req_op, cache_valids, req_go, ra):
1403
1404 comb = m.d.comb
1405 sync = m.d.sync
1406 bus = self.bus
1407 d_in = self.d_in
1408
1409 m.submodules.wr_tag = wr_tag = self.tagmem.write_port(
1410 granularity=self.TAG_WIDTH)
1411
1412 req = MemAccessRequest(self, "mreq_ds")
1413
1414 r1_next_cycle = Signal()
1415 req_row = Signal(self.ROW_BITS)
1416 req_idx = Signal(self.INDEX_BITS)
1417 req_tag = Signal(self.TAG_BITS)
1418 comb += req_idx.eq(self.get_index(req.real_addr))
1419 comb += req_row.eq(self.get_row(req.real_addr))
1420 comb += req_tag.eq(self.get_tag(req.real_addr))
1421
1422 sync += r1.use_forward1.eq(use_forward1_next)
1423 sync += r1.forward_sel.eq(0)
1424
1425 with m.If(use_forward1_next):
1426 sync += r1.forward_sel.eq(r1.req.byte_sel)
1427 with m.Elif(use_forward2_next):
1428 sync += r1.forward_sel.eq(r1.forward_sel1)
1429
1430 sync += r1.forward_data2.eq(r1.forward_data1)
1431 with m.If(r1.write_bram):
1432 sync += r1.forward_data1.eq(r1.req.data)
1433 sync += r1.forward_sel1.eq(r1.req.byte_sel)
1434 sync += r1.forward_way1.eq(r1.req.hit_way)
1435 sync += r1.forward_row1.eq(self.get_row(r1.req.real_addr))
1436 sync += r1.forward_valid1.eq(1)
1437 with m.Else():
1438 with m.If(r1.dcbz):
1439 sync += r1.forward_data1.eq(0)
1440 with m.Else():
1441 sync += r1.forward_data1.eq(bus.dat_r)
1442 sync += r1.forward_sel1.eq(~0) # all 1s
1443 sync += r1.forward_way1.eq(replace_way)
1444 sync += r1.forward_row1.eq(r1.store_row)
1445 sync += r1.forward_valid1.eq(0)
1446
1447 # One cycle pulses reset
1448 sync += r1.slow_valid.eq(0)
1449 sync += r1.write_bram.eq(0)
1450 sync += r1.inc_acks.eq(0)
1451 sync += r1.dec_acks.eq(0)
1452
1453 sync += r1.ls_valid.eq(0)
1454 # complete tlbies and TLB loads in the third cycle
1455 sync += r1.mmu_done.eq(r0_valid & (r0.tlbie | r0.tlbld))
1456
1457 with m.If((req_op == Op.OP_LOAD_HIT) | (req_op == Op.OP_STCX_FAIL)):
1458 with m.If(r0.mmu_req):
1459 sync += r1.mmu_done.eq(1)
1460 with m.Else():
1461 sync += r1.ls_valid.eq(1)
1462
1463 with m.If(r1.write_tag):
1464 # Store new tag in selected way
1465 replace_way_onehot = Signal(self.NUM_WAYS)
1466 comb += replace_way_onehot.eq(1<<replace_way)
1467 ct = Signal(self.TAG_RAM_WIDTH)
1468 comb += ct.eq(r1.reload_tag << (replace_way*self.TAG_WIDTH))
1469 comb += wr_tag.en.eq(replace_way_onehot)
1470 comb += wr_tag.addr.eq(r1.store_index)
1471 comb += wr_tag.data.eq(ct)
1472
1473 sync += r1.store_way.eq(replace_way)
1474 sync += r1.write_tag.eq(0)
1475
1476 # Take request from r1.req if there is one there,
1477 # else from req_op, ra, etc.
1478 with m.If(r1.full):
1479 comb += req.eq(r1.req)
1480 with m.Else():
1481 comb += req.op.eq(req_op)
1482 comb += req.valid.eq(req_go)
1483 comb += req.mmu_req.eq(r0.mmu_req)
1484 comb += req.dcbz.eq(r0.req.dcbz)
1485 comb += req.real_addr.eq(ra)
1486
1487 with m.If(r0.req.dcbz):
1488 # force data to 0 for dcbz
1489 comb += req.data.eq(0)
1490 with m.Elif(r0.d_valid):
1491 comb += req.data.eq(r0.req.data)
1492 with m.Else():
1493 comb += req.data.eq(d_in.data)
1494
1495 # Select all bytes for dcbz
1496 # and for cacheable loads
1497 with m.If(r0.req.dcbz | (r0.req.load & ~r0.req.nc)):
1498 comb += req.byte_sel.eq(~0) # all 1s
1499 with m.Else():
1500 comb += req.byte_sel.eq(r0.req.byte_sel)
1501 comb += req.hit_way.eq(req_hit_way)
1502 comb += req.same_tag.eq(req_same_tag)
1503
1504 # Store the incoming request from r0,
1505 # if it is a slow request
1506 # Note that r1.full = 1 implies req_op = OP_NONE
1507 with m.If((req_op == Op.OP_LOAD_MISS)
1508 | (req_op == Op.OP_LOAD_NC)
1509 | (req_op == Op.OP_STORE_MISS)
1510 | (req_op == Op.OP_STORE_HIT)):
1511 sync += r1.req.eq(req)
1512 sync += r1.full.eq(1)
1513 # do not let r1.state RELOAD_WAIT_ACK or STORE_WAIT_ACK
1514 # destroy r1.req by overwriting r1.full back to zero
1515 comb += r1_next_cycle.eq(1)
1516
1517 # Main state machine
1518 with m.Switch(r1.state):
1519
1520 with m.Case(State.IDLE):
1521 sync += r1.wb.adr.eq(req.real_addr[self.ROW_OFF_BITS:])
1522 sync += r1.wb.sel.eq(req.byte_sel)
1523 sync += r1.wb.dat.eq(req.data)
1524 sync += r1.dcbz.eq(req.dcbz)
1525
1526 # Keep track of our index and way
1527 # for subsequent stores.
1528 sync += r1.store_index.eq(req_idx)
1529 sync += r1.store_row.eq(req_row)
1530 sync += r1.end_row_ix.eq(self.get_row_of_line(req_row)-1)
1531 sync += r1.reload_tag.eq(req_tag)
1532 sync += r1.req.same_tag.eq(1)
1533
1534 with m.If(req.op == Op.OP_STORE_HIT):
1535 sync += r1.store_way.eq(req.hit_way)
1536
1537 #with m.If(r1.dec_acks):
1538 # sync += r1.acks_pending.eq(r1.acks_pending - 1)
1539
1540 # Reset per-row valid bits,
1541 # ready for handling OP_LOAD_MISS
1542 for i in range(self.ROW_PER_LINE):
1543 sync += r1.rows_valid[i].eq(0)
1544
1545 with m.If(req_op != Op.OP_NONE):
1546 sync += Display("cache op %d", req.op)
1547
1548 with m.Switch(req.op):
1549 with m.Case(Op.OP_LOAD_HIT):
1550 # stay in IDLE state
1551 pass
1552
1553 with m.Case(Op.OP_LOAD_MISS):
1554 sync += Display("cache miss real addr: %x " \
1555 "idx: %x tag: %x",
1556 req.real_addr, req_row, req_tag)
1557
1558 # Start the wishbone cycle
1559 sync += r1.wb.we.eq(0)
1560 sync += r1.wb.cyc.eq(1)
1561 sync += r1.wb.stb.eq(1)
1562
1563 # Track that we had one request sent
1564 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
1565 sync += r1.write_tag.eq(1)
1566
1567 with m.Case(Op.OP_LOAD_NC):
1568 sync += r1.wb.cyc.eq(1)
1569 sync += r1.wb.stb.eq(1)
1570 sync += r1.wb.we.eq(0)
1571 sync += r1.state.eq(State.NC_LOAD_WAIT_ACK)
1572
1573 with m.Case(Op.OP_STORE_HIT, Op.OP_STORE_MISS):
1574 with m.If(~req.dcbz):
1575 sync += r1.state.eq(State.STORE_WAIT_ACK)
1576 sync += r1.acks_pending.eq(1)
1577 sync += r1.full.eq(0)
1578 comb += r1_next_cycle.eq(0)
1579 sync += r1.slow_valid.eq(1)
1580
1581 with m.If(req.mmu_req):
1582 sync += r1.mmu_done.eq(1)
1583 with m.Else():
1584 sync += r1.ls_valid.eq(1)
1585
1586 with m.If(req.op == Op.OP_STORE_HIT):
1587 sync += r1.write_bram.eq(1)
1588 with m.Else():
1589 # dcbz is handled much like a load miss except
1590 # that we are writing to memory instead of reading
1591 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
1592
1593 with m.If(req.op == Op.OP_STORE_MISS):
1594 sync += r1.write_tag.eq(1)
1595
1596 sync += r1.wb.we.eq(1)
1597 sync += r1.wb.cyc.eq(1)
1598 sync += r1.wb.stb.eq(1)
1599
1600 # OP_NONE and OP_BAD do nothing
1601 # OP_BAD & OP_STCX_FAIL were
1602 # handled above already
1603 with m.Case(Op.OP_NONE):
1604 pass
1605 with m.Case(Op.OP_BAD):
1606 pass
1607 with m.Case(Op.OP_STCX_FAIL):
1608 pass
1609
1610 with m.Case(State.RELOAD_WAIT_ACK):
1611
1612 # If we are still sending requests, was one accepted?
1613 with m.If((~bus.stall) & r1.wb.stb):
1614 # That was the last word? We are done sending. Clear stb
1615 # sigh - reconstruct wb adr with 3 extra 0s at front
1616 wb_adr = Cat(Const(0, self.ROW_OFF_BITS), r1.wb.adr)
1617 with m.If(self.is_last_row_addr(wb_adr, r1.end_row_ix)):
1618 sync += r1.wb.stb.eq(0)
1619
1620 # Calculate the next row address in the current cache line
1621 rlen = self.LINE_OFF_BITS-self.ROW_OFF_BITS
1622 row = Signal(rlen)
1623 comb += row.eq(r1.wb.adr)
1624 sync += r1.wb.adr[:rlen].eq(row+1)
1625
1626 # Incoming acks processing
1627 sync += r1.forward_valid1.eq(bus.ack)
1628 with m.If(bus.ack):
1629 srow = Signal(self.ROW_LINE_BITS)
1630 comb += srow.eq(r1.store_row)
1631 sync += r1.rows_valid[srow].eq(1)
1632
1633 # If this is the data we were looking for,
1634 # we can complete the request next cycle.
1635 # Compare the whole address in case the
1636 # request in r1.req is not the one that
1637 # started this refill.
1638 rowmatch = Signal()
1639 lastrow = Signal()
1640 comb += rowmatch.eq(r1.store_row ==
1641 self.get_row(r1.req.real_addr))
1642 comb += lastrow.eq(self.is_last_row(r1.store_row,
1643 r1.end_row_ix))
1644 with m.If(r1.full & r1.req.same_tag &
1645 ((r1.dcbz & req.dcbz) |
1646 (r1.req.op == Op.OP_LOAD_MISS)) & rowmatch):
1647 sync += r1.full.eq(r1_next_cycle)
1648 sync += r1.slow_valid.eq(1)
1649 with m.If(r1.mmu_req):
1650 sync += r1.mmu_done.eq(1)
1651 with m.Else():
1652 sync += r1.ls_valid.eq(1)
1653 sync += r1.forward_sel.eq(~0) # all 1s
1654 sync += r1.use_forward1.eq(1)
1655
1656 # Check for completion
1657 with m.If(lastrow):
1658 # Complete wishbone cycle
1659 sync += r1.wb.cyc.eq(0)
1660
1661 # Cache line is now valid
1662 cv = Signal(self.INDEX_BITS)
1663 comb += cv.eq(cache_valids[r1.store_index])
1664 comb += cv.bit_select(r1.store_way, 1).eq(1)
1665 sync += cache_valids[r1.store_index].eq(cv)
1666
1667 sync += r1.state.eq(State.IDLE)
1668 sync += Display("cache valid set %x "
1669 "idx %d way %d",
1670 cv, r1.store_index, r1.store_way)
1671
1672 # Increment store row counter
1673 sync += r1.store_row.eq(self.next_row(r1.store_row))
1674
1675 with m.Case(State.STORE_WAIT_ACK):
1676 st_stbs_done = Signal()
1677 adjust_acks = Signal(3)
1678
1679 comb += st_stbs_done.eq(~r1.wb.stb)
1680
1681 with m.If(r1.inc_acks != r1.dec_acks):
1682 with m.If(r1.inc_acks):
1683 comb += adjust_acks.eq(r1.acks_pending + 1)
1684 with m.Else():
1685 comb += adjust_acks.eq(r1.acks_pending - 1)
1686 with m.Else():
1687 comb += adjust_acks.eq(r1.acks_pending)
1688
1689 sync += r1.acks_pending.eq(adjust_acks)
1690
1691 # Clear stb when slave accepted request
1692 with m.If(~bus.stall):
1693 # See if there is another store waiting
1694 # to be done which is in the same real page.
1695 # (this is when same_tsg is true)
1696 with m.If(req.valid):
1697 _ra = req.real_addr[self.ROW_OFF_BITS:
1698 self.SET_SIZE_BITS]
1699 alen = self.SET_SIZE_BITS-self.ROW_OFF_BITS
1700 sync += r1.wb.adr[0:alen].eq(_ra)
1701 sync += r1.wb.dat.eq(req.data)
1702 sync += r1.wb.sel.eq(req.byte_sel)
1703
1704 with m.If((adjust_acks < 7) & req.same_tag &
1705 ((req.op == Op.OP_STORE_MISS) |
1706 (req.op == Op.OP_STORE_HIT))):
1707 sync += r1.wb.stb.eq(1)
1708 comb += st_stbs_done.eq(0)
1709 sync += r1.store_way.eq(req.hit_way)
1710 sync += r1.store_row.eq(self.get_row(req.real_addr))
1711
1712 with m.If(req.op == Op.OP_STORE_HIT):
1713 sync += r1.write_bram.eq(1)
1714 sync += r1.full.eq(r1_next_cycle)
1715 sync += r1.slow_valid.eq(1)
1716
1717 # Store requests never come from the MMU
1718 sync += r1.ls_valid.eq(1)
1719 comb += st_stbs_done.eq(0)
1720 sync += r1.inc_acks.eq(1)
1721 with m.Else():
1722 sync += r1.wb.stb.eq(0)
1723 comb += st_stbs_done.eq(1)
1724
1725 # Got ack ? See if complete.
1726 sync += Display("got ack %d %d stbs %d adjust_acks %d",
1727 bus.ack, bus.ack, st_stbs_done, adjust_acks)
1728 with m.If(bus.ack):
1729 with m.If(st_stbs_done & (adjust_acks == 1)):
1730 sync += r1.state.eq(State.IDLE)
1731 sync += r1.wb.cyc.eq(0)
1732 sync += r1.wb.stb.eq(0)
1733 sync += r1.dec_acks.eq(1)
1734
1735 with m.Case(State.NC_LOAD_WAIT_ACK):
1736 # Clear stb when slave accepted request
1737 with m.If(~bus.stall):
1738 sync += r1.wb.stb.eq(0)
1739
1740 # Got ack ? complete.
1741 with m.If(bus.ack):
1742 sync += r1.state.eq(State.IDLE)
1743 sync += r1.full.eq(r1_next_cycle)
1744 sync += r1.slow_valid.eq(1)
1745
1746 with m.If(r1.mmu_req):
1747 sync += r1.mmu_done.eq(1)
1748 with m.Else():
1749 sync += r1.ls_valid.eq(1)
1750
1751 sync += r1.forward_sel.eq(~0) # all 1s
1752 sync += r1.use_forward1.eq(1)
1753 sync += r1.wb.cyc.eq(0)
1754 sync += r1.wb.stb.eq(0)
1755
1756 def dcache_log(self, m, r1, valid_ra, tlb_hit, stall_out):
1757
1758 sync = m.d.sync
1759 d_out, bus, log_out = self.d_out, self.bus, self.log_out
1760
1761 sync += log_out.eq(Cat(r1.state[:3], valid_ra, tlb_hit.way[:3],
1762 stall_out, req_op[:3], d_out.valid, d_out.error,
1763 r1.wb.cyc, r1.wb.stb, bus.ack, bus.stall,
1764 r1.real_adr[3:6]))
1765
1766 def elaborate(self, platform):
1767
1768 m = Module()
1769 comb, sync = m.d.comb, m.d.sync
1770 m_in, d_in = self.m_in, self.d_in
1771
1772 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1773 cache_valids = self.CacheValidsArray()
1774 cache_tag_set = Signal(self.TAG_RAM_WIDTH)
1775
1776 self.tagmem = Memory(depth=self.NUM_LINES, width=self.TAG_RAM_WIDTH,
1777 attrs={'syn_ramstyle': "block_ram"})
1778
1779 """note: these are passed to nmigen.hdl.Memory as "attributes".
1780 don't know how, just that they are.
1781 """
1782 # TODO attribute ram_style of
1783 # dtlb_tags : signal is "distributed";
1784 # TODO attribute ram_style of
1785 # dtlb_ptes : signal is "distributed";
1786
1787 r0 = RegStage0("r0")
1788 r0_full = Signal()
1789
1790 r1 = RegStage1(self, "r1")
1791
1792 reservation = Reservation(self, "rsrv")
1793
1794 # Async signals on incoming request
1795 req_index = Signal(self.INDEX_BITS)
1796 req_row = Signal(self.ROW_BITS)
1797 req_hit_way = Signal(self.WAY_BITS)
1798 req_tag = Signal(self.TAG_BITS)
1799 req_op = Signal(Op)
1800 req_data = Signal(64)
1801 req_same_tag = Signal()
1802 req_go = Signal()
1803
1804 early_req_row = Signal(self.ROW_BITS)
1805
1806 cancel_store = Signal()
1807 set_rsrv = Signal()
1808 clear_rsrv = Signal()
1809
1810 r0_valid = Signal()
1811 r0_stall = Signal()
1812
1813 use_forward1_next = Signal()
1814 use_forward2_next = Signal()
1815
1816 cache_out_row = Signal(WB_DATA_BITS)
1817
1818 plru_victim = Signal(self.WAY_BITS)
1819 replace_way = Signal(self.WAY_BITS)
1820
1821 # Wishbone read/write/cache write formatting signals
1822 bus_sel = Signal(8)
1823
1824 # TLB signals
1825 tlb_way = self.TLBRecord("tlb_way")
1826 tlb_req_index = Signal(self.TLB_SET_BITS)
1827 tlb_hit = self.TLBHit("tlb_hit")
1828 pte = Signal(self.TLB_PTE_BITS)
1829 ra = Signal(self.REAL_ADDR_BITS)
1830 valid_ra = Signal()
1831 perm_attr = PermAttr("dc_perms")
1832 rc_ok = Signal()
1833 perm_ok = Signal()
1834 access_ok = Signal()
1835
1836 tlb_plru_victim = Signal(self.TLB_WAY_BITS)
1837
1838 # we don't yet handle collisions between loadstore1 requests
1839 # and MMU requests
1840 comb += self.m_out.stall.eq(0)
1841
1842 # Hold off the request in r0 when r1 has an uncompleted request
1843 comb += r0_stall.eq(r0_full & (r1.full | d_in.hold))
1844 comb += r0_valid.eq(r0_full & ~r1.full & ~d_in.hold)
1845 comb += self.stall_out.eq(r0_stall)
1846 # debugging: detect if any stall ever requested, which is fine,
1847 # but if a request comes in when stall requested, that's bad.
1848 with m.If(r0_stall):
1849 sync += self.any_stall_out.eq(1)
1850 with m.If(d_in.valid):
1851 sync += self.dreq_when_stall.eq(1)
1852 with m.If(m_in.valid):
1853 sync += self.mreq_when_stall.eq(1)
1854
1855 # deal with litex not doing wishbone pipeline mode
1856 # XXX in wrong way. FIFOs are needed in the SRAM test
1857 # so that stb/ack match up. same thing done in icache.py
1858 if not self.microwatt_compat:
1859 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
1860
1861 # Wire up wishbone request latch out of stage 1
1862 comb += self.bus.we.eq(r1.wb.we)
1863 comb += self.bus.adr.eq(r1.wb.adr)
1864 comb += self.bus.sel.eq(r1.wb.sel)
1865 comb += self.bus.stb.eq(r1.wb.stb)
1866 comb += self.bus.dat_w.eq(r1.wb.dat)
1867 comb += self.bus.cyc.eq(r1.wb.cyc)
1868
1869 # create submodule TLBUpdate
1870 m.submodules.dtlb_update = self.dtlb_update = DTLBUpdate(self)
1871
1872 # call sub-functions putting everything together, using shared
1873 # signals established above
1874 self.stage_0(m, r0, r1, r0_full)
1875 self.tlb_read(m, r0_stall, tlb_way)
1876 self.tlb_search(m, tlb_req_index, r0, r0_valid,
1877 tlb_way,
1878 pte, tlb_hit, valid_ra, perm_attr, ra)
1879 self.tlb_update(m, r0_valid, r0, tlb_req_index,
1880 tlb_hit, tlb_plru_victim)
1881 self.maybe_plrus(m, r1, plru_victim)
1882 self.maybe_tlb_plrus(m, r1, tlb_plru_victim, tlb_req_index)
1883 self.cache_tag_read(m, r0_stall, req_index, cache_tag_set)
1884 self.dcache_request(m, r0, ra, req_index, req_row, req_tag,
1885 r0_valid, r1, cache_valids, replace_way,
1886 use_forward1_next, use_forward2_next,
1887 req_hit_way, plru_victim, rc_ok, perm_attr,
1888 valid_ra, perm_ok, access_ok, req_op, req_go,
1889 tlb_hit, tlb_way, cache_tag_set,
1890 cancel_store, req_same_tag, r0_stall, early_req_row)
1891 self.reservation_comb(m, cancel_store, set_rsrv, clear_rsrv,
1892 r0_valid, r0, reservation)
1893 self.reservation_reg(m, r0_valid, access_ok, set_rsrv, clear_rsrv,
1894 reservation, r0)
1895 self.writeback_control(m, r1, cache_out_row)
1896 self.rams(m, r1, early_req_row, cache_out_row, replace_way)
1897 self.dcache_fast_hit(m, req_op, r0_valid, r0, r1,
1898 req_hit_way, req_index, req_tag, access_ok,
1899 tlb_hit, tlb_req_index)
1900 self.dcache_slow(m, r1, use_forward1_next, use_forward2_next,
1901 r0, replace_way,
1902 req_hit_way, req_same_tag,
1903 r0_valid, req_op, cache_valids, req_go, ra)
1904 #self.dcache_log(m, r1, valid_ra, tlb_hit, stall_out)
1905
1906 return m
1907
1908
1909 if __name__ == '__main__':
1910 dut = DCache()
1911 vl = rtlil.convert(dut, ports=[])
1912 with open("test_dcache.il", "w") as f:
1913 f.write(vl)