reduce dcache/icache number of ways, to fit into ECP5 45k resource
[soc.git] / src / soc / experiment / dcache.py
1 #!/usr/bin/env python3
2 #
3 # Copyright (C) 2020,2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
4 # Copyright (C) 2020 Cole Poirier
5 # Copyright (C) 2020,2021 Cesar Strauss
6 # Copyright (C) 2021 Tobias Platen
7 #
8 # Original dcache.vhdl Copyright of its authors and licensed
9 # by IBM under CC-BY 4.0
10 # https://github.com/antonblanchard/microwatt
11 #
12 # Conversion to nmigen funded by NLnet and NGI POINTER under EU Grants
13 # 871528 and 957073, under the LGPL-v3+ License
14
15 """DCache
16
17 based on Anton Blanchard microwatt dcache.vhdl
18
19 note that the microwatt dcache wishbone interface expects "stall".
20 for simplicity at the moment this is hard-coded to cyc & ~ack.
21 see WB4 spec, p84, section 5.2.1
22
23 IMPORTANT: for store, the data is sampled the cycle AFTER the "valid"
24 is raised. sigh
25
26 Links:
27
28 * https://libre-soc.org/3d_gpu/architecture/set_associative_cache.jpg
29 * https://bugs.libre-soc.org/show_bug.cgi?id=469
30 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
31 (discussion about brams for ECP5)
32
33 """
34
35 import sys
36
37 from nmutil.gtkw import write_gtkw
38
39 sys.setrecursionlimit(1000000)
40
41 from enum import Enum, unique
42
43 from nmigen import (Module, Signal, Elaboratable, Cat, Repl, Array, Const,
44 Record, Memory)
45 from nmutil.util import Display
46 from nmigen.lib.coding import Decoder
47
48 from copy import deepcopy
49 from random import randint, seed
50
51 from nmigen_soc.wishbone.bus import Interface
52
53 from nmigen.cli import main
54 from nmutil.iocontrol import RecordObject
55 from nmigen.utils import log2_int
56 from soc.experiment.mem_types import (LoadStore1ToDCacheType,
57 DCacheToLoadStore1Type,
58 MMUToDCacheType,
59 DCacheToMMUType)
60
61 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS, WB_SEL_BITS,
62 WBAddrType, WBDataType, WBSelType,
63 WBMasterOut, WBSlaveOut,
64 WBMasterOutVector, WBSlaveOutVector,
65 WBIOMasterOut, WBIOSlaveOut)
66
67 from soc.experiment.cache_ram import CacheRam
68 from soc.experiment.plru import PLRU, PLRUs
69 #from nmutil.plru import PLRU, PLRUs
70
71 # for test
72 from soc.bus.sram import SRAM
73 from nmigen import Memory
74 from nmigen.cli import rtlil
75
76 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
77 # Also, check out the cxxsim nmigen branch, and latest yosys from git
78 from nmutil.sim_tmp_alternative import Simulator
79
80 from nmutil.util import wrap
81
82 LOG_LENGTH = 0 # Non-zero to enable log data collection
83
84 def ispow2(x):
85 return (1<<log2_int(x, False)) == x
86
87
88 class DCacheConfig:
89 def __init__(self, LINE_SIZE = 64, # Line size in bytes
90 NUM_LINES = 64, # Number of lines in a set
91 NUM_WAYS = 2, # Number of ways
92 TLB_SET_SIZE = 64, # L1 DTLB entries per set
93 TLB_NUM_WAYS = 2, # L1 DTLB number of sets
94 TLB_LG_PGSZ = 12): # L1 DTLB log_2(page_size)
95 self.LINE_SIZE = LINE_SIZE
96 self.NUM_LINES = NUM_LINES
97 self.NUM_WAYS = NUM_WAYS
98 self.TLB_SET_SIZE = TLB_SET_SIZE
99 self.TLB_NUM_WAYS = TLB_NUM_WAYS
100 self.TLB_LG_PGSZ = TLB_LG_PGSZ
101
102 # BRAM organisation: We never access more than
103 # -- WB_DATA_BITS at a time so to save
104 # -- resources we make the array only that wide, and
105 # -- use consecutive indices to make a cache "line"
106 # --
107 # -- ROW_SIZE is the width in bytes of the BRAM
108 # -- (based on WB, so 64-bits)
109 self.ROW_SIZE = WB_DATA_BITS // 8;
110
111 # ROW_PER_LINE is the number of row (wishbone
112 # transactions) in a line
113 self.ROW_PER_LINE = self.LINE_SIZE // self.ROW_SIZE
114
115 # BRAM_ROWS is the number of rows in BRAM needed
116 # to represent the full dcache
117 self.BRAM_ROWS = self.NUM_LINES * self.ROW_PER_LINE
118
119 print ("ROW_SIZE", self.ROW_SIZE)
120 print ("ROW_PER_LINE", self.ROW_PER_LINE)
121 print ("BRAM_ROWS", self.BRAM_ROWS)
122 print ("NUM_WAYS", self.NUM_WAYS)
123
124 # Bit fields counts in the address
125
126 # REAL_ADDR_BITS is the number of real address
127 # bits that we store
128 self.REAL_ADDR_BITS = 56
129
130 # ROW_BITS is the number of bits to select a row
131 self.ROW_BITS = log2_int(self.BRAM_ROWS)
132
133 # ROW_LINE_BITS is the number of bits to select
134 # a row within a line
135 self.ROW_LINE_BITS = log2_int(self.ROW_PER_LINE)
136
137 # LINE_OFF_BITS is the number of bits for
138 # the offset in a cache line
139 self.LINE_OFF_BITS = log2_int(self.LINE_SIZE)
140
141 # ROW_OFF_BITS is the number of bits for
142 # the offset in a row
143 self.ROW_OFF_BITS = log2_int(self.ROW_SIZE)
144
145 # INDEX_BITS is the number if bits to
146 # select a cache line
147 self.INDEX_BITS = log2_int(self.NUM_LINES)
148
149 # SET_SIZE_BITS is the log base 2 of the set size
150 self.SET_SIZE_BITS = self.LINE_OFF_BITS + self.INDEX_BITS
151
152 # TAG_BITS is the number of bits of
153 # the tag part of the address
154 self.TAG_BITS = self.REAL_ADDR_BITS - self.SET_SIZE_BITS
155
156 # TAG_WIDTH is the width in bits of each way of the tag RAM
157 self.TAG_WIDTH = self.TAG_BITS + 7 - ((self.TAG_BITS + 7) % 8)
158
159 # WAY_BITS is the number of bits to select a way
160 self.WAY_BITS = log2_int(self.NUM_WAYS)
161
162 # Example of layout for 32 lines of 64 bytes:
163 layout = f"""\
164 DCache Layout:
165 |.. -----------------------| REAL_ADDR_BITS ({self.REAL_ADDR_BITS})
166 .. |--------------| SET_SIZE_BITS ({self.SET_SIZE_BITS})
167 .. tag |index| line |
168 .. | row | |
169 .. | |---| | ROW_LINE_BITS ({self.ROW_LINE_BITS})
170 .. | |--- - --| LINE_OFF_BITS ({self.LINE_OFF_BITS})
171 .. | |- --| ROW_OFF_BITS ({self.ROW_OFF_BITS})
172 .. |----- ---| | ROW_BITS ({self.ROW_BITS})
173 .. |-----| | INDEX_BITS ({self.INDEX_BITS})
174 .. --------| | TAG_BITS ({self.TAG_BITS})
175 """
176 print (layout)
177 print ("Dcache TAG %d IDX %d ROW_BITS %d ROFF %d LOFF %d RLB %d" % \
178 (self.TAG_BITS, self.INDEX_BITS, self.ROW_BITS,
179 self.ROW_OFF_BITS, self.LINE_OFF_BITS, self.ROW_LINE_BITS))
180 print ("index @: %d-%d" % (self.LINE_OFF_BITS, self.SET_SIZE_BITS))
181 print ("row @: %d-%d" % (self.LINE_OFF_BITS, self.ROW_OFF_BITS))
182 print ("tag @: %d-%d width %d" % (self.SET_SIZE_BITS,
183 self.REAL_ADDR_BITS, self.TAG_WIDTH))
184
185 self.TAG_RAM_WIDTH = self.TAG_WIDTH * self.NUM_WAYS
186
187 print ("TAG_RAM_WIDTH", self.TAG_RAM_WIDTH)
188 print (" TAG_WIDTH", self.TAG_WIDTH)
189 print (" NUM_WAYS", self.NUM_WAYS)
190 print (" NUM_LINES", self.NUM_LINES)
191
192 # L1 TLB
193 self.TLB_SET_BITS = log2_int(self.TLB_SET_SIZE)
194 self.TLB_WAY_BITS = log2_int(self.TLB_NUM_WAYS)
195 self.TLB_EA_TAG_BITS = 64 - (self.TLB_LG_PGSZ + self.TLB_SET_BITS)
196 self.TLB_TAG_WAY_BITS = self.TLB_NUM_WAYS * self.TLB_EA_TAG_BITS
197 self.TLB_PTE_BITS = 64
198 self.TLB_PTE_WAY_BITS = self.TLB_NUM_WAYS * self.TLB_PTE_BITS;
199
200 assert (self.LINE_SIZE % self.ROW_SIZE) == 0, \
201 "LINE_SIZE not multiple of ROW_SIZE"
202 assert ispow2(self.LINE_SIZE), "LINE_SIZE not power of 2"
203 assert ispow2(self.NUM_LINES), "NUM_LINES not power of 2"
204 assert ispow2(self.ROW_PER_LINE), "ROW_PER_LINE not power of 2"
205 assert self.ROW_BITS == \
206 (self.INDEX_BITS + self.ROW_LINE_BITS), \
207 "geometry bits don't add up"
208 assert (self.LINE_OFF_BITS == \
209 self.ROW_OFF_BITS + self.ROW_LINE_BITS), \
210 "geometry bits don't add up"
211 assert self.REAL_ADDR_BITS == \
212 (self.TAG_BITS + self.INDEX_BITS + self.LINE_OFF_BITS), \
213 "geometry bits don't add up"
214 assert self.REAL_ADDR_BITS == \
215 (self.TAG_BITS + self.ROW_BITS + self.ROW_OFF_BITS), \
216 "geometry bits don't add up"
217 assert 64 == WB_DATA_BITS, \
218 "Can't yet handle wb width that isn't 64-bits"
219 assert self.SET_SIZE_BITS <= self.TLB_LG_PGSZ, \
220 "Set indexed by virtual address"
221
222 def CacheTagArray(self):
223 return Array(Signal(self.TAG_RAM_WIDTH, name="tag%d" % x) \
224 for x in range(self.NUM_LINES))
225
226 def CacheValidsArray(self):
227 return Array(Signal(self.NUM_WAYS, name="tag_valids%d" % x)
228 for x in range(self.NUM_LINES))
229
230 def RowPerLineValidArray(self):
231 return Array(Signal(name="rows_valid%d" % x) \
232 for x in range(self.ROW_PER_LINE))
233
234 def TLBHit(self, name):
235 return Record([('valid', 1),
236 ('way', self.TLB_WAY_BITS)], name=name)
237
238 def TLBTagEAArray(self):
239 return Array(Signal(self.TLB_EA_TAG_BITS, name="tlbtagea%d" % x) \
240 for x in range (self.TLB_NUM_WAYS))
241
242 def TLBRecord(self, name):
243 tlb_layout = [('valid', self.TLB_NUM_WAYS),
244 ('tag', self.TLB_TAG_WAY_BITS),
245 ('pte', self.TLB_PTE_WAY_BITS)
246 ]
247 return Record(tlb_layout, name=name)
248
249 def TLBValidArray(self):
250 return Array(Signal(self.TLB_NUM_WAYS, name="tlb_valid%d" % x)
251 for x in range(self.TLB_SET_SIZE))
252
253 def HitWaySet(self):
254 return Array(Signal(self.WAY_BITS, name="hitway_%d" % x) \
255 for x in range(self.TLB_NUM_WAYS))
256
257 # Cache RAM interface
258 def CacheRamOut(self):
259 return Array(Signal(self.WB_DATA_BITS, name="cache_out%d" % x) \
260 for x in range(self.NUM_WAYS))
261
262 # PLRU output interface
263 def PLRUOut(self):
264 return Array(Signal(self.WAY_BITS, name="plru_out%d" % x) \
265 for x in range(self.NUM_LINES))
266
267 # TLB PLRU output interface
268 def TLBPLRUOut(self):
269 return Array(Signal(self.TLB_WAY_BITS, name="tlbplru_out%d" % x) \
270 for x in range(self.TLB_SET_SIZE))
271
272 # Helper functions to decode incoming requests
273 #
274 # Return the cache line index (tag index) for an address
275 def get_index(self, addr):
276 return addr[self.LINE_OFF_BITS:self.SET_SIZE_BITS]
277
278 # Return the cache row index (data memory) for an address
279 def get_row(self, addr):
280 return addr[self.ROW_OFF_BITS:self.SET_SIZE_BITS]
281
282 # Return the index of a row within a line
283 def get_row_of_line(self, row):
284 return row[:self.ROW_BITS][:self.ROW_LINE_BITS]
285
286 # Returns whether this is the last row of a line
287 def is_last_row_addr(self, addr, last):
288 return addr[self.ROW_OFF_BITS:self.LINE_OFF_BITS] == last
289
290 # Returns whether this is the last row of a line
291 def is_last_row(self, row, last):
292 return self.get_row_of_line(row) == last
293
294 # Return the next row in the current cache line. We use a
295 # dedicated function in order to limit the size of the
296 # generated adder to be only the bits within a cache line
297 # (3 bits with default settings)
298 def next_row(self, row):
299 row_v = row[0:self.ROW_LINE_BITS] + 1
300 return Cat(row_v[:self.ROW_LINE_BITS], row[self.ROW_LINE_BITS:])
301
302 # Get the tag value from the address
303 def get_tag(self, addr):
304 return addr[self.SET_SIZE_BITS:self.REAL_ADDR_BITS]
305
306 # Read a tag from a tag memory row
307 def read_tag(self, way, tagset):
308 return tagset.word_select(way, self.TAG_WIDTH)[:self.TAG_BITS]
309
310 # Read a TLB tag from a TLB tag memory row
311 def read_tlb_tag(self, way, tags):
312 return tags.word_select(way, self.TLB_EA_TAG_BITS)
313
314 # Write a TLB tag to a TLB tag memory row
315 def write_tlb_tag(self, way, tags, tag):
316 return self.read_tlb_tag(way, tags).eq(tag)
317
318 # Read a PTE from a TLB PTE memory row
319 def read_tlb_pte(self, way, ptes):
320 return ptes.word_select(way, self.TLB_PTE_BITS)
321
322 def write_tlb_pte(self, way, ptes, newpte):
323 return self.read_tlb_pte(way, ptes).eq(newpte)
324
325
326 # Record for storing permission, attribute, etc. bits from a PTE
327 class PermAttr(RecordObject):
328 def __init__(self, name=None):
329 super().__init__(name=name)
330 self.reference = Signal()
331 self.changed = Signal()
332 self.nocache = Signal()
333 self.priv = Signal()
334 self.rd_perm = Signal()
335 self.wr_perm = Signal()
336
337
338 def extract_perm_attr(pte):
339 pa = PermAttr()
340 return pa;
341
342
343 # Type of operation on a "valid" input
344 @unique
345 class Op(Enum):
346 OP_NONE = 0
347 OP_BAD = 1 # NC cache hit, TLB miss, prot/RC failure
348 OP_STCX_FAIL = 2 # conditional store w/o reservation
349 OP_LOAD_HIT = 3 # Cache hit on load
350 OP_LOAD_MISS = 4 # Load missing cache
351 OP_LOAD_NC = 5 # Non-cachable load
352 OP_STORE_HIT = 6 # Store hitting cache
353 OP_STORE_MISS = 7 # Store missing cache
354
355
356 # Cache state machine
357 @unique
358 class State(Enum):
359 IDLE = 0 # Normal load hit processing
360 RELOAD_WAIT_ACK = 1 # Cache reload wait ack
361 STORE_WAIT_ACK = 2 # Store wait ack
362 NC_LOAD_WAIT_ACK = 3 # Non-cachable load wait ack
363
364
365 # Dcache operations:
366 #
367 # In order to make timing, we use the BRAMs with
368 # an output buffer, which means that the BRAM
369 # output is delayed by an extra cycle.
370 #
371 # Thus, the dcache has a 2-stage internal pipeline
372 # for cache hits with no stalls.
373 #
374 # All other operations are handled via stalling
375 # in the first stage.
376 #
377 # The second stage can thus complete a hit at the same
378 # time as the first stage emits a stall for a complex op.
379 #
380 # Stage 0 register, basically contains just the latched request
381
382 class RegStage0(RecordObject):
383 def __init__(self, name=None):
384 super().__init__(name=name)
385 self.req = LoadStore1ToDCacheType(name="lsmem")
386 self.tlbie = Signal() # indicates a tlbie request (from MMU)
387 self.doall = Signal() # with tlbie, indicates flush whole TLB
388 self.tlbld = Signal() # indicates a TLB load request (from MMU)
389 self.mmu_req = Signal() # indicates source of request
390 self.d_valid = Signal() # indicates req.data is valid now
391
392
393 class MemAccessRequest(RecordObject):
394 def __init__(self, cfg, name=None):
395 super().__init__(name=name)
396 self.op = Signal(Op)
397 self.valid = Signal()
398 self.dcbz = Signal()
399 self.real_addr = Signal(cfg.REAL_ADDR_BITS)
400 self.data = Signal(64)
401 self.byte_sel = Signal(8)
402 self.hit_way = Signal(cfg.WAY_BITS)
403 self.same_tag = Signal()
404 self.mmu_req = Signal()
405
406
407 # First stage register, contains state for stage 1 of load hits
408 # and for the state machine used by all other operations
409 class RegStage1(RecordObject):
410 def __init__(self, cfg, name=None):
411 super().__init__(name=name)
412 # Info about the request
413 self.full = Signal() # have uncompleted request
414 self.mmu_req = Signal() # request is from MMU
415 self.req = MemAccessRequest(cfg, name="reqmem")
416
417 # Cache hit state
418 self.hit_way = Signal(cfg.WAY_BITS)
419 self.hit_load_valid = Signal()
420 self.hit_index = Signal(cfg.INDEX_BITS)
421 self.cache_hit = Signal()
422
423 # TLB hit state
424 self.tlb_hit = cfg.TLBHit("tlb_hit")
425 self.tlb_hit_index = Signal(cfg.TLB_SET_BITS)
426
427 # 2-stage data buffer for data forwarded from writes to reads
428 self.forward_data1 = Signal(64)
429 self.forward_data2 = Signal(64)
430 self.forward_sel1 = Signal(8)
431 self.forward_valid1 = Signal()
432 self.forward_way1 = Signal(cfg.WAY_BITS)
433 self.forward_row1 = Signal(cfg.ROW_BITS)
434 self.use_forward1 = Signal()
435 self.forward_sel = Signal(8)
436
437 # Cache miss state (reload state machine)
438 self.state = Signal(State)
439 self.dcbz = Signal()
440 self.write_bram = Signal()
441 self.write_tag = Signal()
442 self.slow_valid = Signal()
443 self.wb = WBMasterOut("wb")
444 self.reload_tag = Signal(cfg.TAG_BITS)
445 self.store_way = Signal(cfg.WAY_BITS)
446 self.store_row = Signal(cfg.ROW_BITS)
447 self.store_index = Signal(cfg.INDEX_BITS)
448 self.end_row_ix = Signal(cfg.ROW_LINE_BITS)
449 self.rows_valid = cfg.RowPerLineValidArray()
450 self.acks_pending = Signal(3)
451 self.inc_acks = Signal()
452 self.dec_acks = Signal()
453
454 # Signals to complete (possibly with error)
455 self.ls_valid = Signal()
456 self.ls_error = Signal()
457 self.mmu_done = Signal()
458 self.mmu_error = Signal()
459 self.cache_paradox = Signal()
460
461 # Signal to complete a failed stcx.
462 self.stcx_fail = Signal()
463
464
465 # Reservation information
466 class Reservation(RecordObject):
467 def __init__(self, cfg, name=None):
468 super().__init__(name=name)
469 self.valid = Signal()
470 self.addr = Signal(64-cfg.LINE_OFF_BITS)
471
472
473 class DTLBUpdate(Elaboratable):
474 def __init__(self, cfg):
475 self.cfg = cfg
476 self.tlbie = Signal()
477 self.tlbwe = Signal()
478 self.doall = Signal()
479 self.tlb_hit = cfg.TLBHit("tlb_hit")
480 self.tlb_req_index = Signal(cfg.TLB_SET_BITS)
481
482 self.repl_way = Signal(cfg.TLB_WAY_BITS)
483 self.eatag = Signal(cfg.TLB_EA_TAG_BITS)
484 self.pte_data = Signal(cfg.TLB_PTE_BITS)
485
486 # read from dtlb array
487 self.tlb_read = Signal()
488 self.tlb_read_index = Signal(cfg.TLB_SET_BITS)
489 self.tlb_way = cfg.TLBRecord("o_tlb_way")
490
491 def elaborate(self, platform):
492 m = Module()
493 comb = m.d.comb
494 sync = m.d.sync
495 cfg = self.cfg
496
497 # there are 3 parts to this:
498 # QTY TLB_NUM_WAYs TAGs - of width (say) 46 bits of Effective Address
499 # QTY TLB_NUM_WAYs PTEs - of width (say) 64 bits
500 # "Valid" bits, one per "way", of QTY TLB_NUM_WAYs. these cannot
501 # be a Memory because they can all be cleared (tlbie, doall), i mean,
502 # we _could_, in theory, by overriding the Reset Signal of the Memory,
503 # hmmm....
504
505 dtlb_valid = cfg.TLBValidArray()
506 tlb_req_index = self.tlb_req_index
507
508 print ("TLB_TAG_WAY_BITS", cfg.TLB_TAG_WAY_BITS)
509 print (" TLB_EA_TAG_BITS", cfg.TLB_EA_TAG_BITS)
510 print (" TLB_NUM_WAYS", cfg.TLB_NUM_WAYS)
511 print ("TLB_PTE_WAY_BITS", cfg.TLB_PTE_WAY_BITS)
512 print (" TLB_PTE_BITS", cfg.TLB_PTE_BITS)
513 print (" TLB_NUM_WAYS", cfg.TLB_NUM_WAYS)
514
515 # TAG and PTE Memory SRAMs. transparent, write-enables are TLB_NUM_WAYS
516 tagway = Memory(depth=cfg.TLB_SET_SIZE, width=cfg.TLB_TAG_WAY_BITS,
517 attrs={'syn_ramstyle': "block_ram"})
518 m.submodules.rd_tagway = rd_tagway = tagway.read_port()
519 m.submodules.wr_tagway = wr_tagway = tagway.write_port(
520 granularity=cfg.TLB_EA_TAG_BITS)
521
522 pteway = Memory(depth=cfg.TLB_SET_SIZE, width=cfg.TLB_PTE_WAY_BITS,
523 attrs={'syn_ramstyle': "block_ram"})
524 m.submodules.rd_pteway = rd_pteway = pteway.read_port()
525 m.submodules.wr_pteway = wr_pteway = pteway.write_port(
526 granularity=cfg.TLB_PTE_BITS)
527
528 # commented out for now, can be put in if Memory.reset can be
529 # used for tlbie&doall to reset the entire Memory to zero in 1 cycle
530 #validm = Memory(depth=TLB_SET_SIZE, width=TLB_NUM_WAYS)
531 #m.submodules.rd_valid = rd_valid = validm.read_port()
532 #m.submodules.wr_valid = wr_valid = validm.write_port(
533 #granularity=1)
534
535 # connect up read and write addresses to Valid/PTE/TAG SRAMs
536 m.d.comb += rd_pteway.addr.eq(self.tlb_read_index)
537 m.d.comb += rd_tagway.addr.eq(self.tlb_read_index)
538 #m.d.comb += rd_valid.addr.eq(self.tlb_read_index)
539 m.d.comb += wr_tagway.addr.eq(tlb_req_index)
540 m.d.comb += wr_pteway.addr.eq(tlb_req_index)
541 #m.d.comb += wr_valid.addr.eq(tlb_req_index)
542
543 updated = Signal()
544 v_updated = Signal()
545 tb_out = Signal(cfg.TLB_TAG_WAY_BITS) # tlb_way_tags_t
546 db_out = Signal(cfg.TLB_NUM_WAYS) # tlb_way_valids_t
547 pb_out = Signal(cfg.TLB_PTE_WAY_BITS) # tlb_way_ptes_t
548 dv = Signal(cfg.TLB_NUM_WAYS) # tlb_way_valids_t
549
550 comb += dv.eq(dtlb_valid[tlb_req_index])
551 comb += db_out.eq(dv)
552
553 with m.If(self.tlbie & self.doall):
554 # clear all valid bits at once
555 # XXX hmmm, validm _could_ use Memory reset here...
556 for i in range(cfg.TLB_SET_SIZE):
557 sync += dtlb_valid[i].eq(0)
558 with m.Elif(self.tlbie):
559 # invalidate just the hit_way
560 with m.If(self.tlb_hit.valid):
561 comb += db_out.bit_select(self.tlb_hit.way, 1).eq(0)
562 comb += v_updated.eq(1)
563 with m.Elif(self.tlbwe):
564 # write to the requested tag and PTE
565 comb += cfg.write_tlb_tag(self.repl_way, tb_out, self.eatag)
566 comb += cfg.write_tlb_pte(self.repl_way, pb_out, self.pte_data)
567 # set valid bit
568 comb += db_out.bit_select(self.repl_way, 1).eq(1)
569
570 comb += updated.eq(1)
571 comb += v_updated.eq(1)
572
573 # above, sometimes valid is requested to be updated but data not
574 # therefore split them out, here. note the granularity thing matches
575 # with the shift-up of the eatag/pte_data into the correct TLB way.
576 # thus is it not necessary to write the entire lot, just the portion
577 # being altered: hence writing the *old* copy of the row is not needed
578 with m.If(updated): # PTE and TAG to be written
579 comb += wr_pteway.data.eq(pb_out)
580 comb += wr_pteway.en.eq(1<<self.repl_way)
581 comb += wr_tagway.data.eq(tb_out)
582 comb += wr_tagway.en.eq(1<<self.repl_way)
583 with m.If(v_updated): # Valid to be written
584 sync += dtlb_valid[tlb_req_index].eq(db_out)
585 #comb += wr_valid.data.eq(db_out)
586 #comb += wr_valid.en.eq(1<<self.repl_way)
587
588 # select one TLB way, use a register here
589 r_delay = Signal()
590 sync += r_delay.eq(self.tlb_read)
591 # first deal with the valids, which are not in a Memory.
592 # tlb way valid is output on a 1 clock delay with sync,
593 # but have to explicitly deal with "forwarding" here
594 with m.If(self.tlb_read):
595 with m.If(v_updated): # write *and* read in same cycle: forward
596 sync += self.tlb_way.valid.eq(db_out)
597 with m.Else():
598 sync += self.tlb_way.valid.eq(dtlb_valid[self.tlb_read_index])
599 # now deal with the Memory-read case. the output must remain
600 # valid (stable) even when a read-request is not made, but stable
601 # on a one-clock delay, hence the register
602 r_tlb_way = cfg.TLBRecord("r_tlb_way")
603 with m.If(r_delay):
604 # on one clock delay, capture the contents of the read port(s)
605 comb += self.tlb_way.tag.eq(rd_tagway.data)
606 comb += self.tlb_way.pte.eq(rd_pteway.data)
607 sync += r_tlb_way.tag.eq(rd_tagway.data)
608 sync += r_tlb_way.pte.eq(rd_pteway.data)
609 with m.Else():
610 # ... so that the register can output it when no read is requested
611 # it's rather overkill but better to be safe than sorry
612 comb += self.tlb_way.tag.eq(r_tlb_way.tag)
613 comb += self.tlb_way.pte.eq(r_tlb_way.pte)
614 #comb += self.tlb_way.eq(r_tlb_way)
615
616 return m
617
618
619 class DCachePendingHit(Elaboratable):
620
621 def __init__(self, cfg, tlb_way,
622 cache_i_validdx, cache_tag_set,
623 req_addr):
624
625 self.go = Signal()
626 self.virt_mode = Signal()
627 self.is_hit = Signal()
628 self.tlb_hit = cfg.TLBHit("tlb_hit")
629 self.hit_way = Signal(cfg.WAY_BITS)
630 self.rel_match = Signal()
631 self.req_index = Signal(cfg.INDEX_BITS)
632 self.reload_tag = Signal(cfg.TAG_BITS)
633
634 self.tlb_way = tlb_way
635 self.cache_i_validdx = cache_i_validdx
636 self.cache_tag_set = cache_tag_set
637 self.req_addr = req_addr
638 self.cfg = cfg
639
640 def elaborate(self, platform):
641 m = Module()
642 comb = m.d.comb
643 sync = m.d.sync
644
645 go = self.go
646 virt_mode = self.virt_mode
647 is_hit = self.is_hit
648 tlb_way = self.tlb_way
649 cache_i_validdx = self.cache_i_validdx
650 cache_tag_set = self.cache_tag_set
651 req_addr = self.req_addr
652 tlb_hit = self.tlb_hit
653 hit_way = self.hit_way
654 rel_match = self.rel_match
655 req_index = self.req_index
656 reload_tag = self.reload_tag
657 cfg = self.cfg
658
659 hit_set = Array(Signal(name="hit_set_%d" % i) \
660 for i in range(cfg.TLB_NUM_WAYS))
661 rel_matches = Array(Signal(name="rel_matches_%d" % i) \
662 for i in range(cfg.TLB_NUM_WAYS))
663 hit_way_set = cfg.HitWaySet()
664
665 # Test if pending request is a hit on any way
666 # In order to make timing in virtual mode,
667 # when we are using the TLB, we compare each
668 # way with each of the real addresses from each way of
669 # the TLB, and then decide later which match to use.
670
671 with m.If(virt_mode):
672 for j in range(cfg.TLB_NUM_WAYS): # tlb_num_way_t
673 s_tag = Signal(cfg.TAG_BITS, name="s_tag%d" % j)
674 s_hit = Signal(name="s_hit%d" % j)
675 s_pte = Signal(cfg.TLB_PTE_BITS, name="s_pte%d" % j)
676 s_ra = Signal(cfg.REAL_ADDR_BITS, name="s_ra%d" % j)
677 # read the PTE, calc the Real Address, get tge tag
678 comb += s_pte.eq(cfg.read_tlb_pte(j, tlb_way.pte))
679 comb += s_ra.eq(Cat(req_addr[0:cfg.TLB_LG_PGSZ],
680 s_pte[cfg.TLB_LG_PGSZ:cfg.REAL_ADDR_BITS]))
681 comb += s_tag.eq(cfg.get_tag(s_ra))
682 # for each way check tge tag against the cache tag set
683 for i in range(cfg.NUM_WAYS): # way_t
684 is_tag_hit = Signal(name="is_tag_hit_%d_%d" % (j, i))
685 comb += is_tag_hit.eq(go & cache_i_validdx[i] &
686 (cfg.read_tag(i, cache_tag_set) == s_tag)
687 & (tlb_way.valid[j]))
688 with m.If(is_tag_hit):
689 comb += hit_way_set[j].eq(i)
690 comb += s_hit.eq(1)
691 comb += hit_set[j].eq(s_hit)
692 comb += rel_matches[j].eq(s_tag == reload_tag)
693 with m.If(tlb_hit.valid):
694 comb += is_hit.eq(hit_set[tlb_hit.way])
695 comb += hit_way.eq(hit_way_set[tlb_hit.way])
696 comb += rel_match.eq(rel_matches[tlb_hit.way])
697 with m.Else():
698 s_tag = Signal(cfg.TAG_BITS)
699 comb += s_tag.eq(cfg.get_tag(req_addr))
700 for i in range(cfg.NUM_WAYS): # way_t
701 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
702 comb += is_tag_hit.eq(go & cache_i_validdx[i] &
703 (cfg.read_tag(i, cache_tag_set) == s_tag))
704 with m.If(is_tag_hit):
705 comb += hit_way.eq(i)
706 comb += is_hit.eq(1)
707 with m.If(s_tag == reload_tag):
708 comb += rel_match.eq(1)
709
710 return m
711
712
713 class DCache(Elaboratable, DCacheConfig):
714 """Set associative dcache write-through
715
716 TODO (in no specific order):
717 * See list in icache.vhdl
718 * Complete load misses on the cycle when WB data comes instead of
719 at the end of line (this requires dealing with requests coming in
720 while not idle...)
721 """
722 def __init__(self, pspec=None):
723 self.d_in = LoadStore1ToDCacheType("d_in")
724 self.d_out = DCacheToLoadStore1Type("d_out")
725
726 self.m_in = MMUToDCacheType("m_in")
727 self.m_out = DCacheToMMUType("m_out")
728
729 self.stall_out = Signal()
730 self.any_stall_out = Signal()
731 self.dreq_when_stall = Signal()
732 self.mreq_when_stall = Signal()
733
734 # standard naming (wired to non-standard for compatibility)
735 self.bus = Interface(addr_width=32,
736 data_width=64,
737 granularity=8,
738 features={'stall'},
739 #alignment=0,
740 name="dcache")
741
742 self.log_out = Signal(20)
743
744 # test if microwatt compatibility is to be enabled
745 self.microwatt_compat = (hasattr(pspec, "microwatt_compat") and
746 (pspec.microwatt_compat == True))
747
748 if self.microwatt_compat:
749 # reduce way sizes and num lines
750 super().__init__(NUM_LINES = 2,
751 NUM_WAYS = 1,
752 TLB_NUM_WAYS = 1,
753 TLB_SET_SIZE=2) # XXX needs device-tree entry
754 else:
755 super().__init__()
756
757 def stage_0(self, m, r0, r1, r0_full):
758 """Latch the request in r0.req as long as we're not stalling
759 """
760 comb = m.d.comb
761 sync = m.d.sync
762 d_in, d_out, m_in = self.d_in, self.d_out, self.m_in
763
764 r = RegStage0("stage0")
765
766 # TODO, this goes in unit tests and formal proofs
767 with m.If(d_in.valid & m_in.valid):
768 sync += Display("request collision loadstore vs MMU")
769
770 with m.If(m_in.valid):
771 comb += r.req.valid.eq(1)
772 comb += r.req.load.eq(~(m_in.tlbie | m_in.tlbld))# no invalidate
773 comb += r.req.dcbz.eq(0)
774 comb += r.req.nc.eq(0)
775 comb += r.req.reserve.eq(0)
776 comb += r.req.virt_mode.eq(0)
777 comb += r.req.priv_mode.eq(1)
778 comb += r.req.addr.eq(m_in.addr)
779 comb += r.req.data.eq(m_in.pte)
780 comb += r.req.byte_sel.eq(~0) # Const -1 sets all to 0b111....
781 comb += r.tlbie.eq(m_in.tlbie)
782 comb += r.doall.eq(m_in.doall)
783 comb += r.tlbld.eq(m_in.tlbld)
784 comb += r.mmu_req.eq(1)
785 comb += r.d_valid.eq(1)
786 m.d.sync += Display(" DCACHE req mmu addr %x pte %x ld %d",
787 m_in.addr, m_in.pte, r.req.load)
788
789 with m.Else():
790 comb += r.req.eq(d_in)
791 comb += r.req.data.eq(0)
792 comb += r.tlbie.eq(0)
793 comb += r.doall.eq(0)
794 comb += r.tlbld.eq(0)
795 comb += r.mmu_req.eq(0)
796 comb += r.d_valid.eq(0)
797
798 sync += r0_full.eq(0)
799 with m.If((~r1.full & ~d_in.hold) | ~r0_full):
800 sync += r0.eq(r)
801 sync += r0_full.eq(r.req.valid)
802 with m.Elif(~r0.d_valid):
803 # Sample data the cycle after a request comes in from loadstore1.
804 # If another request has come in already then the data will get
805 # put directly into req.data below.
806 sync += r0.req.data.eq(d_in.data)
807 sync += r0.d_valid.eq(1)
808 with m.If(d_in.valid):
809 m.d.sync += Display(" DCACHE req cache "
810 "virt %d addr %x data %x ld %d",
811 r.req.virt_mode, r.req.addr,
812 r.req.data, r.req.load)
813
814 def tlb_read(self, m, r0_stall, tlb_way):
815 """TLB
816 Operates in the second cycle on the request latched in r0.req.
817 TLB updates write the entry at the end of the second cycle.
818 """
819 comb = m.d.comb
820 sync = m.d.sync
821 m_in, d_in = self.m_in, self.d_in
822
823 addrbits = Signal(self.TLB_SET_BITS)
824
825 amin = self.TLB_LG_PGSZ
826 amax = self.TLB_LG_PGSZ + self.TLB_SET_BITS
827
828 with m.If(m_in.valid):
829 comb += addrbits.eq(m_in.addr[amin : amax])
830 with m.Else():
831 comb += addrbits.eq(d_in.addr[amin : amax])
832
833 # If we have any op and the previous op isn't finished,
834 # then keep the same output for next cycle.
835 d = self.dtlb_update
836 comb += d.tlb_read_index.eq(addrbits)
837 comb += d.tlb_read.eq(~r0_stall)
838 comb += tlb_way.eq(d.tlb_way)
839
840 def maybe_tlb_plrus(self, m, r1, tlb_plru_victim, tlb_req_index):
841 """Generate TLB PLRUs
842 """
843 comb = m.d.comb
844 sync = m.d.sync
845
846 if self.TLB_NUM_WAYS == 0:
847 return
848
849 # suite of PLRUs with a selection and output mechanism
850 tlb_plrus = PLRUs("d_tlb", self.TLB_SET_SIZE, self.TLB_WAY_BITS)
851 m.submodules.tlb_plrus = tlb_plrus
852 comb += tlb_plrus.way.eq(r1.tlb_hit.way)
853 comb += tlb_plrus.valid.eq(r1.tlb_hit.valid)
854 comb += tlb_plrus.index.eq(r1.tlb_hit_index)
855 comb += tlb_plrus.isel.eq(tlb_req_index) # select victim
856 comb += tlb_plru_victim.eq(tlb_plrus.o_index) # selected victim
857
858 def tlb_search(self, m, tlb_req_index, r0, r0_valid,
859 tlb_way,
860 pte, tlb_hit, valid_ra, perm_attr, ra):
861
862 comb = m.d.comb
863
864 hitway = Signal(self.TLB_WAY_BITS)
865 hit = Signal()
866 eatag = Signal(self.TLB_EA_TAG_BITS)
867
868 self.TLB_LG_END = self.TLB_LG_PGSZ + self.TLB_SET_BITS
869 r0_req_addr = r0.req.addr[self.TLB_LG_PGSZ : self.TLB_LG_END]
870 comb += tlb_req_index.eq(r0_req_addr)
871 comb += eatag.eq(r0.req.addr[self.TLB_LG_END : 64 ])
872
873 for i in range(self.TLB_NUM_WAYS):
874 is_tag_hit = Signal(name="is_tag_hit%d" % i)
875 tlb_tag = Signal(self.TLB_EA_TAG_BITS, name="tlb_tag%d" % i)
876 comb += tlb_tag.eq(self.read_tlb_tag(i, tlb_way.tag))
877 comb += is_tag_hit.eq((tlb_way.valid[i]) & (tlb_tag == eatag))
878 with m.If(is_tag_hit):
879 comb += hitway.eq(i)
880 comb += hit.eq(1)
881
882 comb += tlb_hit.valid.eq(hit & r0_valid)
883 comb += tlb_hit.way.eq(hitway)
884
885 with m.If(tlb_hit.valid):
886 comb += pte.eq(self.read_tlb_pte(hitway, tlb_way.pte))
887 comb += valid_ra.eq(tlb_hit.valid | ~r0.req.virt_mode)
888
889 with m.If(r0.req.virt_mode):
890 comb += ra.eq(Cat(Const(0, self.ROW_OFF_BITS),
891 r0.req.addr[self.ROW_OFF_BITS:self.TLB_LG_PGSZ],
892 pte[self.TLB_LG_PGSZ:self.REAL_ADDR_BITS]))
893 comb += perm_attr.reference.eq(pte[8])
894 comb += perm_attr.changed.eq(pte[7])
895 comb += perm_attr.nocache.eq(pte[5])
896 comb += perm_attr.priv.eq(pte[3])
897 comb += perm_attr.rd_perm.eq(pte[2])
898 comb += perm_attr.wr_perm.eq(pte[1])
899 with m.Else():
900 comb += ra.eq(Cat(Const(0, self.ROW_OFF_BITS),
901 r0.req.addr[self.ROW_OFF_BITS:self.REAL_ADDR_BITS]))
902 comb += perm_attr.reference.eq(1)
903 comb += perm_attr.changed.eq(1)
904 comb += perm_attr.nocache.eq(0)
905 comb += perm_attr.priv.eq(1)
906 comb += perm_attr.rd_perm.eq(1)
907 comb += perm_attr.wr_perm.eq(1)
908
909 with m.If(valid_ra):
910 m.d.sync += Display("DCACHE virt mode %d hit %d ra %x pte %x",
911 r0.req.virt_mode, tlb_hit.valid, ra, pte)
912 m.d.sync += Display(" perm ref=%d", perm_attr.reference)
913 m.d.sync += Display(" perm chg=%d", perm_attr.changed)
914 m.d.sync += Display(" perm noc=%d", perm_attr.nocache)
915 m.d.sync += Display(" perm prv=%d", perm_attr.priv)
916 m.d.sync += Display(" perm rdp=%d", perm_attr.rd_perm)
917 m.d.sync += Display(" perm wrp=%d", perm_attr.wr_perm)
918
919 def tlb_update(self, m, r0_valid, r0, tlb_req_index,
920 tlb_hit, tlb_plru_victim):
921
922 comb = m.d.comb
923 sync = m.d.sync
924
925 tlbie = Signal()
926 tlbwe = Signal()
927
928 comb += tlbie.eq(r0_valid & r0.tlbie)
929 comb += tlbwe.eq(r0_valid & r0.tlbld)
930
931 d = self.dtlb_update
932
933 comb += d.tlbie.eq(tlbie)
934 comb += d.tlbwe.eq(tlbwe)
935 comb += d.doall.eq(r0.doall)
936 comb += d.tlb_hit.eq(tlb_hit)
937 comb += d.tlb_req_index.eq(tlb_req_index)
938
939 with m.If(tlb_hit.valid):
940 comb += d.repl_way.eq(tlb_hit.way)
941 with m.Else():
942 comb += d.repl_way.eq(tlb_plru_victim)
943 comb += d.eatag.eq(r0.req.addr[self.TLB_LG_PGSZ + self.TLB_SET_BITS:64])
944 comb += d.pte_data.eq(r0.req.data)
945
946 def maybe_plrus(self, m, r1, plru_victim):
947 """Generate PLRUs
948 """
949 comb = m.d.comb
950 sync = m.d.sync
951
952 if self.TLB_NUM_WAYS == 0:
953 return
954
955 # suite of PLRUs with a selection and output mechanism
956 m.submodules.plrus = plrus = PLRUs("dtag", self.NUM_LINES,
957 self.WAY_BITS)
958 comb += plrus.way.eq(r1.hit_way)
959 comb += plrus.valid.eq(r1.cache_hit)
960 comb += plrus.index.eq(r1.hit_index)
961 comb += plrus.isel.eq(r1.store_index) # select victim
962 comb += plru_victim.eq(plrus.o_index) # selected victim
963
964 def cache_tag_read(self, m, r0_stall, req_index, cache_tag_set):
965 """Cache tag RAM read port
966 """
967 comb = m.d.comb
968 sync = m.d.sync
969
970 m_in, d_in = self.m_in, self.d_in
971
972 # synchronous tag read-port: NOT TRANSPARENT (cannot pass through
973 # write-to-a-read at the same time), seems to pass tests ok
974 m.submodules.rd_tag = rd_tag = self.tagmem.read_port(transparent=False)
975
976 index = Signal(self.INDEX_BITS)
977
978 with m.If(r0_stall):
979 comb += index.eq(req_index)
980 with m.Elif(m_in.valid):
981 comb += index.eq(self.get_index(m_in.addr))
982 with m.Else():
983 comb += index.eq(self.get_index(d_in.addr))
984 comb += rd_tag.addr.eq(index)
985 comb += cache_tag_set.eq(rd_tag.data) # read-port is a 1-clock delay
986
987 def dcache_request(self, m, r0, ra, req_index, req_row, req_tag,
988 r0_valid, r1, cache_valids, replace_way,
989 use_forward1_next, use_forward2_next,
990 req_hit_way, plru_victim, rc_ok, perm_attr,
991 valid_ra, perm_ok, access_ok, req_op, req_go,
992 tlb_hit, tlb_way, cache_tag_set,
993 cancel_store, req_same_tag, r0_stall, early_req_row):
994 """Cache request parsing and hit detection
995 """
996
997 comb = m.d.comb
998 m_in, d_in = self.m_in, self.d_in
999
1000 is_hit = Signal()
1001 hit_way = Signal(self.WAY_BITS)
1002 op = Signal(Op)
1003 opsel = Signal(3)
1004 go = Signal()
1005 nc = Signal()
1006 cache_i_validdx = Signal(self.NUM_WAYS)
1007
1008 # Extract line, row and tag from request
1009 comb += req_index.eq(self.get_index(r0.req.addr))
1010 comb += req_row.eq(self.get_row(r0.req.addr))
1011 comb += req_tag.eq(self.get_tag(ra))
1012
1013 if False: # display on comb is a bit... busy.
1014 comb += Display("dcache_req addr:%x ra: %x idx: %x tag: %x row: %x",
1015 r0.req.addr, ra, req_index, req_tag, req_row)
1016
1017 comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
1018 comb += cache_i_validdx.eq(cache_valids[req_index])
1019
1020 m.submodules.dcache_pend = dc = DCachePendingHit(self, tlb_way,
1021 cache_i_validdx, cache_tag_set,
1022 r0.req.addr)
1023 comb += dc.tlb_hit.eq(tlb_hit)
1024 comb += dc.reload_tag.eq(r1.reload_tag)
1025 comb += dc.virt_mode.eq(r0.req.virt_mode)
1026 comb += dc.go.eq(go)
1027 comb += dc.req_index.eq(req_index)
1028
1029 comb += is_hit.eq(dc.is_hit)
1030 comb += hit_way.eq(dc.hit_way)
1031 comb += req_same_tag.eq(dc.rel_match)
1032
1033 # See if the request matches the line currently being reloaded
1034 with m.If((r1.state == State.RELOAD_WAIT_ACK) &
1035 (req_index == r1.store_index) & req_same_tag):
1036 # For a store, consider this a hit even if the row isn't
1037 # valid since it will be by the time we perform the store.
1038 # For a load, check the appropriate row valid bit.
1039 rrow = Signal(self.ROW_LINE_BITS)
1040 comb += rrow.eq(req_row)
1041 valid = r1.rows_valid[rrow]
1042 comb += is_hit.eq((~r0.req.load) | valid)
1043 comb += hit_way.eq(replace_way)
1044
1045 # Whether to use forwarded data for a load or not
1046 with m.If((self.get_row(r1.req.real_addr) == req_row) &
1047 (r1.req.hit_way == hit_way)):
1048 # Only need to consider r1.write_bram here, since if we
1049 # are writing refill data here, then we don't have a
1050 # cache hit this cycle on the line being refilled.
1051 # (There is the possibility that the load following the
1052 # load miss that started the refill could be to the old
1053 # contents of the victim line, since it is a couple of
1054 # cycles after the refill starts before we see the updated
1055 # cache tag. In that case we don't use the bypass.)
1056 comb += use_forward1_next.eq(r1.write_bram)
1057 with m.If((r1.forward_row1 == req_row) & (r1.forward_way1 == hit_way)):
1058 comb += use_forward2_next.eq(r1.forward_valid1)
1059
1060 # The way that matched on a hit
1061 comb += req_hit_way.eq(hit_way)
1062
1063 # The way to replace on a miss
1064 with m.If(r1.write_tag):
1065 comb += replace_way.eq(plru_victim)
1066 with m.Else():
1067 comb += replace_way.eq(r1.store_way)
1068
1069 # work out whether we have permission for this access
1070 # NB we don't yet implement AMR, thus no KUAP
1071 comb += rc_ok.eq(perm_attr.reference
1072 & (r0.req.load | perm_attr.changed))
1073 comb += perm_ok.eq((r0.req.priv_mode | (~perm_attr.priv)) &
1074 (perm_attr.wr_perm |
1075 (r0.req.load & perm_attr.rd_perm)))
1076 comb += access_ok.eq(valid_ra & perm_ok & rc_ok)
1077
1078 # Combine the request and cache hit status to decide what
1079 # operation needs to be done
1080 comb += nc.eq(r0.req.nc | perm_attr.nocache)
1081 comb += op.eq(Op.OP_NONE)
1082 with m.If(go):
1083 with m.If(~access_ok):
1084 m.d.sync += Display("DCACHE access fail valid_ra=%d p=%d rc=%d",
1085 valid_ra, perm_ok, rc_ok)
1086 comb += op.eq(Op.OP_BAD)
1087 with m.Elif(cancel_store):
1088 m.d.sync += Display("DCACHE cancel store")
1089 comb += op.eq(Op.OP_STCX_FAIL)
1090 with m.Else():
1091 m.d.sync += Display("DCACHE valid_ra=%d nc=%d ld=%d",
1092 valid_ra, nc, r0.req.load)
1093 comb += opsel.eq(Cat(is_hit, nc, r0.req.load))
1094 with m.Switch(opsel):
1095 with m.Case(0b101): comb += op.eq(Op.OP_LOAD_HIT)
1096 with m.Case(0b100): comb += op.eq(Op.OP_LOAD_MISS)
1097 with m.Case(0b110): comb += op.eq(Op.OP_LOAD_NC)
1098 with m.Case(0b001): comb += op.eq(Op.OP_STORE_HIT)
1099 with m.Case(0b000): comb += op.eq(Op.OP_STORE_MISS)
1100 with m.Case(0b010): comb += op.eq(Op.OP_STORE_MISS)
1101 with m.Case(0b011): comb += op.eq(Op.OP_BAD)
1102 with m.Case(0b111): comb += op.eq(Op.OP_BAD)
1103 comb += req_op.eq(op)
1104 comb += req_go.eq(go)
1105
1106 # Version of the row number that is valid one cycle earlier
1107 # in the cases where we need to read the cache data BRAM.
1108 # If we're stalling then we need to keep reading the last
1109 # row requested.
1110 with m.If(~r0_stall):
1111 with m.If(m_in.valid):
1112 comb += early_req_row.eq(self.get_row(m_in.addr))
1113 with m.Else():
1114 comb += early_req_row.eq(self.get_row(d_in.addr))
1115 with m.Else():
1116 comb += early_req_row.eq(req_row)
1117
1118 def reservation_comb(self, m, cancel_store, set_rsrv, clear_rsrv,
1119 r0_valid, r0, reservation):
1120 """Handle load-with-reservation and store-conditional instructions
1121 """
1122 comb = m.d.comb
1123
1124 with m.If(r0_valid & r0.req.reserve):
1125 # XXX generate alignment interrupt if address
1126 # is not aligned XXX or if r0.req.nc = '1'
1127 with m.If(r0.req.load):
1128 comb += set_rsrv.eq(r0.req.atomic_last) # load with reservation
1129 with m.Else():
1130 comb += clear_rsrv.eq(r0.req.atomic_last) # store conditional
1131 with m.If((~reservation.valid) |
1132 (r0.req.addr[self.LINE_OFF_BITS:64] !=
1133 reservation.addr)):
1134 comb += cancel_store.eq(1)
1135
1136 def reservation_reg(self, m, r0_valid, access_ok, set_rsrv, clear_rsrv,
1137 reservation, r0):
1138 comb = m.d.comb
1139 sync = m.d.sync
1140
1141 with m.If(r0_valid & access_ok):
1142 with m.If(clear_rsrv):
1143 sync += reservation.valid.eq(0)
1144 with m.Elif(set_rsrv):
1145 sync += reservation.valid.eq(1)
1146 sync += reservation.addr.eq(r0.req.addr[self.LINE_OFF_BITS:64])
1147
1148 def writeback_control(self, m, r1, cache_out_row):
1149 """Return data for loads & completion control logic
1150 """
1151 comb = m.d.comb
1152 sync = m.d.sync
1153 d_out, m_out = self.d_out, self.m_out
1154
1155 data_out = Signal(64)
1156 data_fwd = Signal(64)
1157
1158 # Use the bypass if are reading the row that was
1159 # written 1 or 2 cycles ago, including for the
1160 # slow_valid = 1 case (i.e. completing a load
1161 # miss or a non-cacheable load).
1162 with m.If(r1.use_forward1):
1163 comb += data_fwd.eq(r1.forward_data1)
1164 with m.Else():
1165 comb += data_fwd.eq(r1.forward_data2)
1166
1167 comb += data_out.eq(cache_out_row)
1168
1169 for i in range(8):
1170 with m.If(r1.forward_sel[i]):
1171 dsel = data_fwd.word_select(i, 8)
1172 comb += data_out.word_select(i, 8).eq(dsel)
1173
1174 # DCache output to LoadStore
1175 comb += d_out.valid.eq(r1.ls_valid)
1176 comb += d_out.data.eq(data_out)
1177 comb += d_out.store_done.eq(~r1.stcx_fail)
1178 comb += d_out.error.eq(r1.ls_error)
1179 comb += d_out.cache_paradox.eq(r1.cache_paradox)
1180
1181 # Outputs to MMU
1182 comb += m_out.done.eq(r1.mmu_done)
1183 comb += m_out.err.eq(r1.mmu_error)
1184 comb += m_out.data.eq(data_out)
1185
1186 # We have a valid load or store hit or we just completed
1187 # a slow op such as a load miss, a NC load or a store
1188 #
1189 # Note: the load hit is delayed by one cycle. However it
1190 # can still not collide with r.slow_valid (well unless I
1191 # miscalculated) because slow_valid can only be set on a
1192 # subsequent request and not on its first cycle (the state
1193 # machine must have advanced), which makes slow_valid
1194 # at least 2 cycles from the previous hit_load_valid.
1195
1196 # Sanity: Only one of these must be set in any given cycle
1197
1198 if False: # TODO: need Display to get this to work
1199 assert (r1.slow_valid & r1.stcx_fail) != 1, \
1200 "unexpected slow_valid collision with stcx_fail"
1201
1202 assert ((r1.slow_valid | r1.stcx_fail) | r1.hit_load_valid) != 1, \
1203 "unexpected hit_load_delayed collision with slow_valid"
1204
1205 with m.If(~r1.mmu_req):
1206 # Request came from loadstore1...
1207 # Load hit case is the standard path
1208 with m.If(r1.hit_load_valid):
1209 sync += Display("completing load hit data=%x", data_out)
1210
1211 # error cases complete without stalling
1212 with m.If(r1.ls_error):
1213 with m.If(r1.dcbz):
1214 sync += Display("completing dcbz with error")
1215 with m.Else():
1216 sync += Display("completing ld/st with error")
1217
1218 # Slow ops (load miss, NC, stores)
1219 with m.If(r1.slow_valid):
1220 sync += Display("completing store or load miss adr=%x data=%x",
1221 r1.req.real_addr, data_out)
1222
1223 with m.Else():
1224 # Request came from MMU
1225 with m.If(r1.hit_load_valid):
1226 sync += Display("completing load hit to MMU, data=%x",
1227 m_out.data)
1228 # error cases complete without stalling
1229 with m.If(r1.mmu_error):
1230 sync += Display("combpleting MMU ld with error")
1231
1232 # Slow ops (i.e. load miss)
1233 with m.If(r1.slow_valid):
1234 sync += Display("completing MMU load miss, adr=%x data=%x",
1235 r1.req.real_addr, m_out.data)
1236
1237 def rams(self, m, r1, early_req_row, cache_out_row, replace_way):
1238 """rams
1239 Generate a cache RAM for each way. This handles the normal
1240 reads, writes from reloads and the special store-hit update
1241 path as well.
1242
1243 Note: the BRAMs have an extra read buffer, meaning the output
1244 is pipelined an extra cycle. This differs from the
1245 icache. The writeback logic needs to take that into
1246 account by using 1-cycle delayed signals for load hits.
1247 """
1248 comb = m.d.comb
1249 bus = self.bus
1250
1251 # a Binary-to-Unary one-hots here. replace-way one-hot is gated
1252 # (enabled) by bus.ack, not-write-bram, and state RELOAD_WAIT_ACK
1253 m.submodules.rams_replace_way_e = rwe = Decoder(self.NUM_WAYS)
1254 comb += rwe.n.eq(~((r1.state == State.RELOAD_WAIT_ACK) & bus.ack &
1255 ~r1.write_bram))
1256 comb += rwe.i.eq(replace_way)
1257
1258 m.submodules.rams_hit_way_e = hwe = Decoder(self.NUM_WAYS)
1259 comb += hwe.i.eq(r1.hit_way)
1260
1261 # this one is gated with write_bram, and replace_way_e can never be
1262 # set at the same time. that means that do_write can OR the outputs
1263 m.submodules.rams_hit_req_way_e = hre = Decoder(self.NUM_WAYS)
1264 comb += hre.n.eq(~r1.write_bram) # Decoder.n is inverted
1265 comb += hre.i.eq(r1.req.hit_way)
1266
1267 # common Signals
1268 do_read = Signal()
1269 wr_addr = Signal(self.ROW_BITS)
1270 wr_data = Signal(WB_DATA_BITS)
1271 wr_sel = Signal(self.ROW_SIZE)
1272 rd_addr = Signal(self.ROW_BITS)
1273
1274 comb += do_read.eq(1) # always enable
1275 comb += rd_addr.eq(early_req_row)
1276
1277 # Write mux:
1278 #
1279 # Defaults to wishbone read responses (cache refill)
1280 #
1281 # For timing, the mux on wr_data/sel/addr is not
1282 # dependent on anything other than the current state.
1283
1284 with m.If(r1.write_bram):
1285 # Write store data to BRAM. This happens one
1286 # cycle after the store is in r0.
1287 comb += wr_data.eq(r1.req.data)
1288 comb += wr_sel.eq(r1.req.byte_sel)
1289 comb += wr_addr.eq(self.get_row(r1.req.real_addr))
1290
1291 with m.Else():
1292 # Otherwise, we might be doing a reload or a DCBZ
1293 with m.If(r1.dcbz):
1294 comb += wr_data.eq(0)
1295 with m.Else():
1296 comb += wr_data.eq(bus.dat_r)
1297 comb += wr_addr.eq(r1.store_row)
1298 comb += wr_sel.eq(~0) # all 1s
1299
1300 # set up Cache Rams
1301 for i in range(self.NUM_WAYS):
1302 do_write = Signal(name="do_wr%d" % i)
1303 wr_sel_m = Signal(self.ROW_SIZE, name="wr_sel_m_%d" % i)
1304 d_out= Signal(WB_DATA_BITS, name="dout_%d" % i) # cache_row_t
1305
1306 way = CacheRam(self.ROW_BITS, WB_DATA_BITS, ADD_BUF=True, ram_num=i)
1307 m.submodules["cacheram_%d" % i] = way
1308
1309 comb += way.rd_en.eq(do_read)
1310 comb += way.rd_addr.eq(rd_addr)
1311 comb += d_out.eq(way.rd_data_o)
1312 comb += way.wr_sel.eq(wr_sel_m)
1313 comb += way.wr_addr.eq(wr_addr)
1314 comb += way.wr_data.eq(wr_data)
1315
1316 # Cache hit reads
1317 with m.If(hwe.o[i]):
1318 comb += cache_out_row.eq(d_out)
1319
1320 # these are mutually-exclusive via their Decoder-enablers
1321 # (note: Decoder-enable is inverted)
1322 comb += do_write.eq(hre.o[i] | rwe.o[i])
1323
1324 # Mask write selects with do_write since BRAM
1325 # doesn't have a global write-enable
1326 with m.If(do_write):
1327 comb += wr_sel_m.eq(wr_sel)
1328
1329 # Cache hit synchronous machine for the easy case.
1330 # This handles load hits.
1331 # It also handles error cases (TLB miss, cache paradox)
1332 def dcache_fast_hit(self, m, req_op, r0_valid, r0, r1,
1333 req_hit_way, req_index, req_tag, access_ok,
1334 tlb_hit, tlb_req_index):
1335 comb = m.d.comb
1336 sync = m.d.sync
1337
1338 with m.If(req_op != Op.OP_NONE):
1339 sync += Display("op:%d addr:%x nc: %d idx: %x tag: %x way: %x",
1340 req_op, r0.req.addr, r0.req.nc,
1341 req_index, req_tag, req_hit_way)
1342
1343 with m.If(r0_valid):
1344 sync += r1.mmu_req.eq(r0.mmu_req)
1345
1346 # Fast path for load/store hits.
1347 # Set signals for the writeback controls.
1348 sync += r1.hit_way.eq(req_hit_way)
1349 sync += r1.hit_index.eq(req_index)
1350
1351 sync += r1.hit_load_valid.eq(req_op == Op.OP_LOAD_HIT)
1352 sync += r1.cache_hit.eq((req_op == Op.OP_LOAD_HIT) |
1353 (req_op == Op.OP_STORE_HIT))
1354
1355 with m.If(req_op == Op.OP_BAD):
1356 sync += Display("Signalling ld/st error "
1357 "ls_error=%i mmu_error=%i cache_paradox=%i",
1358 ~r0.mmu_req,r0.mmu_req,access_ok)
1359 sync += r1.ls_error.eq(~r0.mmu_req)
1360 sync += r1.mmu_error.eq(r0.mmu_req)
1361 sync += r1.cache_paradox.eq(access_ok)
1362 with m.Else():
1363 sync += r1.ls_error.eq(0)
1364 sync += r1.mmu_error.eq(0)
1365 sync += r1.cache_paradox.eq(0)
1366
1367 sync += r1.stcx_fail.eq(req_op == Op.OP_STCX_FAIL)
1368
1369 # Record TLB hit information for updating TLB PLRU
1370 sync += r1.tlb_hit.eq(tlb_hit)
1371 sync += r1.tlb_hit_index.eq(tlb_req_index)
1372
1373 # Memory accesses are handled by this state machine:
1374 #
1375 # * Cache load miss/reload (in conjunction with "rams")
1376 # * Load hits for non-cachable forms
1377 # * Stores (the collision case is handled in "rams")
1378 #
1379 # All wishbone requests generation is done here.
1380 # This machine operates at stage 1.
1381 def dcache_slow(self, m, r1, use_forward1_next, use_forward2_next,
1382 r0, replace_way,
1383 req_hit_way, req_same_tag,
1384 r0_valid, req_op, cache_valids, req_go, ra):
1385
1386 comb = m.d.comb
1387 sync = m.d.sync
1388 bus = self.bus
1389 d_in = self.d_in
1390
1391 m.submodules.wr_tag = wr_tag = self.tagmem.write_port(
1392 granularity=self.TAG_WIDTH)
1393
1394 req = MemAccessRequest(self, "mreq_ds")
1395
1396 r1_next_cycle = Signal()
1397 req_row = Signal(self.ROW_BITS)
1398 req_idx = Signal(self.INDEX_BITS)
1399 req_tag = Signal(self.TAG_BITS)
1400 comb += req_idx.eq(self.get_index(req.real_addr))
1401 comb += req_row.eq(self.get_row(req.real_addr))
1402 comb += req_tag.eq(self.get_tag(req.real_addr))
1403
1404 sync += r1.use_forward1.eq(use_forward1_next)
1405 sync += r1.forward_sel.eq(0)
1406
1407 with m.If(use_forward1_next):
1408 sync += r1.forward_sel.eq(r1.req.byte_sel)
1409 with m.Elif(use_forward2_next):
1410 sync += r1.forward_sel.eq(r1.forward_sel1)
1411
1412 sync += r1.forward_data2.eq(r1.forward_data1)
1413 with m.If(r1.write_bram):
1414 sync += r1.forward_data1.eq(r1.req.data)
1415 sync += r1.forward_sel1.eq(r1.req.byte_sel)
1416 sync += r1.forward_way1.eq(r1.req.hit_way)
1417 sync += r1.forward_row1.eq(self.get_row(r1.req.real_addr))
1418 sync += r1.forward_valid1.eq(1)
1419 with m.Else():
1420 with m.If(r1.dcbz):
1421 sync += r1.forward_data1.eq(0)
1422 with m.Else():
1423 sync += r1.forward_data1.eq(bus.dat_r)
1424 sync += r1.forward_sel1.eq(~0) # all 1s
1425 sync += r1.forward_way1.eq(replace_way)
1426 sync += r1.forward_row1.eq(r1.store_row)
1427 sync += r1.forward_valid1.eq(0)
1428
1429 # One cycle pulses reset
1430 sync += r1.slow_valid.eq(0)
1431 sync += r1.write_bram.eq(0)
1432 sync += r1.inc_acks.eq(0)
1433 sync += r1.dec_acks.eq(0)
1434
1435 sync += r1.ls_valid.eq(0)
1436 # complete tlbies and TLB loads in the third cycle
1437 sync += r1.mmu_done.eq(r0_valid & (r0.tlbie | r0.tlbld))
1438
1439 with m.If((req_op == Op.OP_LOAD_HIT) | (req_op == Op.OP_STCX_FAIL)):
1440 with m.If(r0.mmu_req):
1441 sync += r1.mmu_done.eq(1)
1442 with m.Else():
1443 sync += r1.ls_valid.eq(1)
1444
1445 with m.If(r1.write_tag):
1446 # Store new tag in selected way
1447 replace_way_onehot = Signal(self.NUM_WAYS)
1448 comb += replace_way_onehot.eq(1<<replace_way)
1449 ct = Signal(self.TAG_RAM_WIDTH)
1450 comb += ct.eq(r1.reload_tag << (replace_way*self.TAG_WIDTH))
1451 comb += wr_tag.en.eq(replace_way_onehot)
1452 comb += wr_tag.addr.eq(r1.store_index)
1453 comb += wr_tag.data.eq(ct)
1454
1455 sync += r1.store_way.eq(replace_way)
1456 sync += r1.write_tag.eq(0)
1457
1458 # Take request from r1.req if there is one there,
1459 # else from req_op, ra, etc.
1460 with m.If(r1.full):
1461 comb += req.eq(r1.req)
1462 with m.Else():
1463 comb += req.op.eq(req_op)
1464 comb += req.valid.eq(req_go)
1465 comb += req.mmu_req.eq(r0.mmu_req)
1466 comb += req.dcbz.eq(r0.req.dcbz)
1467 comb += req.real_addr.eq(ra)
1468
1469 with m.If(r0.req.dcbz):
1470 # force data to 0 for dcbz
1471 comb += req.data.eq(0)
1472 with m.Elif(r0.d_valid):
1473 comb += req.data.eq(r0.req.data)
1474 with m.Else():
1475 comb += req.data.eq(d_in.data)
1476
1477 # Select all bytes for dcbz
1478 # and for cacheable loads
1479 with m.If(r0.req.dcbz | (r0.req.load & ~r0.req.nc)):
1480 comb += req.byte_sel.eq(~0) # all 1s
1481 with m.Else():
1482 comb += req.byte_sel.eq(r0.req.byte_sel)
1483 comb += req.hit_way.eq(req_hit_way)
1484 comb += req.same_tag.eq(req_same_tag)
1485
1486 # Store the incoming request from r0,
1487 # if it is a slow request
1488 # Note that r1.full = 1 implies req_op = OP_NONE
1489 with m.If((req_op == Op.OP_LOAD_MISS)
1490 | (req_op == Op.OP_LOAD_NC)
1491 | (req_op == Op.OP_STORE_MISS)
1492 | (req_op == Op.OP_STORE_HIT)):
1493 sync += r1.req.eq(req)
1494 sync += r1.full.eq(1)
1495 # do not let r1.state RELOAD_WAIT_ACK or STORE_WAIT_ACK
1496 # destroy r1.req by overwriting r1.full back to zero
1497 comb += r1_next_cycle.eq(1)
1498
1499 # Main state machine
1500 with m.Switch(r1.state):
1501
1502 with m.Case(State.IDLE):
1503 sync += r1.wb.adr.eq(req.real_addr[self.ROW_OFF_BITS:])
1504 sync += r1.wb.sel.eq(req.byte_sel)
1505 sync += r1.wb.dat.eq(req.data)
1506 sync += r1.dcbz.eq(req.dcbz)
1507
1508 # Keep track of our index and way
1509 # for subsequent stores.
1510 sync += r1.store_index.eq(req_idx)
1511 sync += r1.store_row.eq(req_row)
1512 sync += r1.end_row_ix.eq(self.get_row_of_line(req_row)-1)
1513 sync += r1.reload_tag.eq(req_tag)
1514 sync += r1.req.same_tag.eq(1)
1515
1516 with m.If(req.op == Op.OP_STORE_HIT):
1517 sync += r1.store_way.eq(req.hit_way)
1518
1519 #with m.If(r1.dec_acks):
1520 # sync += r1.acks_pending.eq(r1.acks_pending - 1)
1521
1522 # Reset per-row valid bits,
1523 # ready for handling OP_LOAD_MISS
1524 for i in range(self.ROW_PER_LINE):
1525 sync += r1.rows_valid[i].eq(0)
1526
1527 with m.If(req_op != Op.OP_NONE):
1528 sync += Display("cache op %d", req.op)
1529
1530 with m.Switch(req.op):
1531 with m.Case(Op.OP_LOAD_HIT):
1532 # stay in IDLE state
1533 pass
1534
1535 with m.Case(Op.OP_LOAD_MISS):
1536 sync += Display("cache miss real addr: %x " \
1537 "idx: %x tag: %x",
1538 req.real_addr, req_row, req_tag)
1539
1540 # Start the wishbone cycle
1541 sync += r1.wb.we.eq(0)
1542 sync += r1.wb.cyc.eq(1)
1543 sync += r1.wb.stb.eq(1)
1544
1545 # Track that we had one request sent
1546 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
1547 sync += r1.write_tag.eq(1)
1548
1549 with m.Case(Op.OP_LOAD_NC):
1550 sync += r1.wb.cyc.eq(1)
1551 sync += r1.wb.stb.eq(1)
1552 sync += r1.wb.we.eq(0)
1553 sync += r1.state.eq(State.NC_LOAD_WAIT_ACK)
1554
1555 with m.Case(Op.OP_STORE_HIT, Op.OP_STORE_MISS):
1556 with m.If(~req.dcbz):
1557 sync += r1.state.eq(State.STORE_WAIT_ACK)
1558 sync += r1.acks_pending.eq(1)
1559 sync += r1.full.eq(0)
1560 comb += r1_next_cycle.eq(0)
1561 sync += r1.slow_valid.eq(1)
1562
1563 with m.If(req.mmu_req):
1564 sync += r1.mmu_done.eq(1)
1565 with m.Else():
1566 sync += r1.ls_valid.eq(1)
1567
1568 with m.If(req.op == Op.OP_STORE_HIT):
1569 sync += r1.write_bram.eq(1)
1570 with m.Else():
1571 # dcbz is handled much like a load miss except
1572 # that we are writing to memory instead of reading
1573 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
1574
1575 with m.If(req.op == Op.OP_STORE_MISS):
1576 sync += r1.write_tag.eq(1)
1577
1578 sync += r1.wb.we.eq(1)
1579 sync += r1.wb.cyc.eq(1)
1580 sync += r1.wb.stb.eq(1)
1581
1582 # OP_NONE and OP_BAD do nothing
1583 # OP_BAD & OP_STCX_FAIL were
1584 # handled above already
1585 with m.Case(Op.OP_NONE):
1586 pass
1587 with m.Case(Op.OP_BAD):
1588 pass
1589 with m.Case(Op.OP_STCX_FAIL):
1590 pass
1591
1592 with m.Case(State.RELOAD_WAIT_ACK):
1593
1594 # If we are still sending requests, was one accepted?
1595 with m.If((~bus.stall) & r1.wb.stb):
1596 # That was the last word? We are done sending. Clear stb
1597 # sigh - reconstruct wb adr with 3 extra 0s at front
1598 wb_adr = Cat(Const(0, self.ROW_OFF_BITS), r1.wb.adr)
1599 with m.If(self.is_last_row_addr(wb_adr, r1.end_row_ix)):
1600 sync += r1.wb.stb.eq(0)
1601
1602 # Calculate the next row address in the current cache line
1603 rlen = self.LINE_OFF_BITS-self.ROW_OFF_BITS
1604 row = Signal(rlen)
1605 comb += row.eq(r1.wb.adr)
1606 sync += r1.wb.adr[:rlen].eq(row+1)
1607
1608 # Incoming acks processing
1609 sync += r1.forward_valid1.eq(bus.ack)
1610 with m.If(bus.ack):
1611 srow = Signal(self.ROW_LINE_BITS)
1612 comb += srow.eq(r1.store_row)
1613 sync += r1.rows_valid[srow].eq(1)
1614
1615 # If this is the data we were looking for,
1616 # we can complete the request next cycle.
1617 # Compare the whole address in case the
1618 # request in r1.req is not the one that
1619 # started this refill.
1620 rowmatch = Signal()
1621 lastrow = Signal()
1622 comb += rowmatch.eq(r1.store_row ==
1623 self.get_row(r1.req.real_addr))
1624 comb += lastrow.eq(self.is_last_row(r1.store_row,
1625 r1.end_row_ix))
1626 with m.If(r1.full & r1.req.same_tag &
1627 ((r1.dcbz & req.dcbz) |
1628 (r1.req.op == Op.OP_LOAD_MISS)) & rowmatch):
1629 sync += r1.full.eq(r1_next_cycle)
1630 sync += r1.slow_valid.eq(1)
1631 with m.If(r1.mmu_req):
1632 sync += r1.mmu_done.eq(1)
1633 with m.Else():
1634 sync += r1.ls_valid.eq(1)
1635 sync += r1.forward_sel.eq(~0) # all 1s
1636 sync += r1.use_forward1.eq(1)
1637
1638 # Check for completion
1639 with m.If(lastrow):
1640 # Complete wishbone cycle
1641 sync += r1.wb.cyc.eq(0)
1642
1643 # Cache line is now valid
1644 cv = Signal(self.INDEX_BITS)
1645 comb += cv.eq(cache_valids[r1.store_index])
1646 comb += cv.bit_select(r1.store_way, 1).eq(1)
1647 sync += cache_valids[r1.store_index].eq(cv)
1648
1649 sync += r1.state.eq(State.IDLE)
1650 sync += Display("cache valid set %x "
1651 "idx %d way %d",
1652 cv, r1.store_index, r1.store_way)
1653
1654 # Increment store row counter
1655 sync += r1.store_row.eq(self.next_row(r1.store_row))
1656
1657 with m.Case(State.STORE_WAIT_ACK):
1658 st_stbs_done = Signal()
1659 adjust_acks = Signal(3)
1660
1661 comb += st_stbs_done.eq(~r1.wb.stb)
1662
1663 with m.If(r1.inc_acks != r1.dec_acks):
1664 with m.If(r1.inc_acks):
1665 comb += adjust_acks.eq(r1.acks_pending + 1)
1666 with m.Else():
1667 comb += adjust_acks.eq(r1.acks_pending - 1)
1668 with m.Else():
1669 comb += adjust_acks.eq(r1.acks_pending)
1670
1671 sync += r1.acks_pending.eq(adjust_acks)
1672
1673 # Clear stb when slave accepted request
1674 with m.If(~bus.stall):
1675 # See if there is another store waiting
1676 # to be done which is in the same real page.
1677 # (this is when same_tsg is true)
1678 with m.If(req.valid):
1679 _ra = req.real_addr[self.ROW_OFF_BITS:
1680 self.SET_SIZE_BITS]
1681 alen = self.SET_SIZE_BITS-self.ROW_OFF_BITS
1682 sync += r1.wb.adr[0:alen].eq(_ra)
1683 sync += r1.wb.dat.eq(req.data)
1684 sync += r1.wb.sel.eq(req.byte_sel)
1685
1686 with m.If((adjust_acks < 7) & req.same_tag &
1687 ((req.op == Op.OP_STORE_MISS) |
1688 (req.op == Op.OP_STORE_HIT))):
1689 sync += r1.wb.stb.eq(1)
1690 comb += st_stbs_done.eq(0)
1691 sync += r1.store_way.eq(req.hit_way)
1692 sync += r1.store_row.eq(self.get_row(req.real_addr))
1693
1694 with m.If(req.op == Op.OP_STORE_HIT):
1695 sync += r1.write_bram.eq(1)
1696 sync += r1.full.eq(r1_next_cycle)
1697 sync += r1.slow_valid.eq(1)
1698
1699 # Store requests never come from the MMU
1700 sync += r1.ls_valid.eq(1)
1701 comb += st_stbs_done.eq(0)
1702 sync += r1.inc_acks.eq(1)
1703 with m.Else():
1704 sync += r1.wb.stb.eq(0)
1705 comb += st_stbs_done.eq(1)
1706
1707 # Got ack ? See if complete.
1708 sync += Display("got ack %d %d stbs %d adjust_acks %d",
1709 bus.ack, bus.ack, st_stbs_done, adjust_acks)
1710 with m.If(bus.ack):
1711 with m.If(st_stbs_done & (adjust_acks == 1)):
1712 sync += r1.state.eq(State.IDLE)
1713 sync += r1.wb.cyc.eq(0)
1714 sync += r1.wb.stb.eq(0)
1715 sync += r1.dec_acks.eq(1)
1716
1717 with m.Case(State.NC_LOAD_WAIT_ACK):
1718 # Clear stb when slave accepted request
1719 with m.If(~bus.stall):
1720 sync += r1.wb.stb.eq(0)
1721
1722 # Got ack ? complete.
1723 with m.If(bus.ack):
1724 sync += r1.state.eq(State.IDLE)
1725 sync += r1.full.eq(r1_next_cycle)
1726 sync += r1.slow_valid.eq(1)
1727
1728 with m.If(r1.mmu_req):
1729 sync += r1.mmu_done.eq(1)
1730 with m.Else():
1731 sync += r1.ls_valid.eq(1)
1732
1733 sync += r1.forward_sel.eq(~0) # all 1s
1734 sync += r1.use_forward1.eq(1)
1735 sync += r1.wb.cyc.eq(0)
1736 sync += r1.wb.stb.eq(0)
1737
1738 def dcache_log(self, m, r1, valid_ra, tlb_hit, stall_out):
1739
1740 sync = m.d.sync
1741 d_out, bus, log_out = self.d_out, self.bus, self.log_out
1742
1743 sync += log_out.eq(Cat(r1.state[:3], valid_ra, tlb_hit.way[:3],
1744 stall_out, req_op[:3], d_out.valid, d_out.error,
1745 r1.wb.cyc, r1.wb.stb, bus.ack, bus.stall,
1746 r1.real_adr[3:6]))
1747
1748 def elaborate(self, platform):
1749
1750 m = Module()
1751 comb, sync = m.d.comb, m.d.sync
1752 m_in, d_in = self.m_in, self.d_in
1753
1754 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1755 cache_valids = self.CacheValidsArray()
1756 cache_tag_set = Signal(self.TAG_RAM_WIDTH)
1757
1758 self.tagmem = Memory(depth=self.NUM_LINES, width=self.TAG_RAM_WIDTH,
1759 attrs={'syn_ramstyle': "block_ram"})
1760
1761 """note: these are passed to nmigen.hdl.Memory as "attributes".
1762 don't know how, just that they are.
1763 """
1764 # TODO attribute ram_style of
1765 # dtlb_tags : signal is "distributed";
1766 # TODO attribute ram_style of
1767 # dtlb_ptes : signal is "distributed";
1768
1769 r0 = RegStage0("r0")
1770 r0_full = Signal()
1771
1772 r1 = RegStage1(self, "r1")
1773
1774 reservation = Reservation(self, "rsrv")
1775
1776 # Async signals on incoming request
1777 req_index = Signal(self.INDEX_BITS)
1778 req_row = Signal(self.ROW_BITS)
1779 req_hit_way = Signal(self.WAY_BITS)
1780 req_tag = Signal(self.TAG_BITS)
1781 req_op = Signal(Op)
1782 req_data = Signal(64)
1783 req_same_tag = Signal()
1784 req_go = Signal()
1785
1786 early_req_row = Signal(self.ROW_BITS)
1787
1788 cancel_store = Signal()
1789 set_rsrv = Signal()
1790 clear_rsrv = Signal()
1791
1792 r0_valid = Signal()
1793 r0_stall = Signal()
1794
1795 use_forward1_next = Signal()
1796 use_forward2_next = Signal()
1797
1798 cache_out_row = Signal(WB_DATA_BITS)
1799
1800 plru_victim = Signal(self.WAY_BITS)
1801 replace_way = Signal(self.WAY_BITS)
1802
1803 # Wishbone read/write/cache write formatting signals
1804 bus_sel = Signal(8)
1805
1806 # TLB signals
1807 tlb_way = self.TLBRecord("tlb_way")
1808 tlb_req_index = Signal(self.TLB_SET_BITS)
1809 tlb_hit = self.TLBHit("tlb_hit")
1810 pte = Signal(self.TLB_PTE_BITS)
1811 ra = Signal(self.REAL_ADDR_BITS)
1812 valid_ra = Signal()
1813 perm_attr = PermAttr("dc_perms")
1814 rc_ok = Signal()
1815 perm_ok = Signal()
1816 access_ok = Signal()
1817
1818 tlb_plru_victim = Signal(self.TLB_WAY_BITS)
1819
1820 # we don't yet handle collisions between loadstore1 requests
1821 # and MMU requests
1822 comb += self.m_out.stall.eq(0)
1823
1824 # Hold off the request in r0 when r1 has an uncompleted request
1825 comb += r0_stall.eq(r0_full & (r1.full | d_in.hold))
1826 comb += r0_valid.eq(r0_full & ~r1.full & ~d_in.hold)
1827 comb += self.stall_out.eq(r0_stall)
1828 # debugging: detect if any stall ever requested, which is fine,
1829 # but if a request comes in when stall requested, that's bad.
1830 with m.If(r0_stall):
1831 sync += self.any_stall_out.eq(1)
1832 with m.If(d_in.valid):
1833 sync += self.dreq_when_stall.eq(1)
1834 with m.If(m_in.valid):
1835 sync += self.mreq_when_stall.eq(1)
1836
1837 # deal with litex not doing wishbone pipeline mode
1838 # XXX in wrong way. FIFOs are needed in the SRAM test
1839 # so that stb/ack match up. same thing done in icache.py
1840 if not self.microwatt_compat:
1841 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
1842
1843 # Wire up wishbone request latch out of stage 1
1844 comb += self.bus.we.eq(r1.wb.we)
1845 comb += self.bus.adr.eq(r1.wb.adr)
1846 comb += self.bus.sel.eq(r1.wb.sel)
1847 comb += self.bus.stb.eq(r1.wb.stb)
1848 comb += self.bus.dat_w.eq(r1.wb.dat)
1849 comb += self.bus.cyc.eq(r1.wb.cyc)
1850
1851 # create submodule TLBUpdate
1852 m.submodules.dtlb_update = self.dtlb_update = DTLBUpdate(self)
1853
1854 # call sub-functions putting everything together, using shared
1855 # signals established above
1856 self.stage_0(m, r0, r1, r0_full)
1857 self.tlb_read(m, r0_stall, tlb_way)
1858 self.tlb_search(m, tlb_req_index, r0, r0_valid,
1859 tlb_way,
1860 pte, tlb_hit, valid_ra, perm_attr, ra)
1861 self.tlb_update(m, r0_valid, r0, tlb_req_index,
1862 tlb_hit, tlb_plru_victim)
1863 self.maybe_plrus(m, r1, plru_victim)
1864 self.maybe_tlb_plrus(m, r1, tlb_plru_victim, tlb_req_index)
1865 self.cache_tag_read(m, r0_stall, req_index, cache_tag_set)
1866 self.dcache_request(m, r0, ra, req_index, req_row, req_tag,
1867 r0_valid, r1, cache_valids, replace_way,
1868 use_forward1_next, use_forward2_next,
1869 req_hit_way, plru_victim, rc_ok, perm_attr,
1870 valid_ra, perm_ok, access_ok, req_op, req_go,
1871 tlb_hit, tlb_way, cache_tag_set,
1872 cancel_store, req_same_tag, r0_stall, early_req_row)
1873 self.reservation_comb(m, cancel_store, set_rsrv, clear_rsrv,
1874 r0_valid, r0, reservation)
1875 self.reservation_reg(m, r0_valid, access_ok, set_rsrv, clear_rsrv,
1876 reservation, r0)
1877 self.writeback_control(m, r1, cache_out_row)
1878 self.rams(m, r1, early_req_row, cache_out_row, replace_way)
1879 self.dcache_fast_hit(m, req_op, r0_valid, r0, r1,
1880 req_hit_way, req_index, req_tag, access_ok,
1881 tlb_hit, tlb_req_index)
1882 self.dcache_slow(m, r1, use_forward1_next, use_forward2_next,
1883 r0, replace_way,
1884 req_hit_way, req_same_tag,
1885 r0_valid, req_op, cache_valids, req_go, ra)
1886 #self.dcache_log(m, r1, valid_ra, tlb_hit, stall_out)
1887
1888 return m
1889
1890
1891 if __name__ == '__main__':
1892 dut = DCache()
1893 vl = rtlil.convert(dut, ports=[])
1894 with open("test_dcache.il", "w") as f:
1895 f.write(vl)