add license and copyright header to dcache.py,
[soc.git] / src / soc / experiment / dcache.py
1 #!/usr/bin/env python3
2 #
3 # Copyright (C) 2020,2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
4 # Copyright (C) 2020 Cole Poirier
5 # Copyright (C) 2020,2021 Cesar Strauss
6 # Copyright (C) 2021 Tobias Platen
7 #
8 # Original dcache.vhdl Copyright of its authors and licensed
9 # by IBM under CC-BY 4.0
10 # https://github.com/antonblanchard/microwatt
11 #
12 # Conversion to nmigen funded by NLnet and NGI POINTER under EU Grants
13 # 871528 and 957073, under the LGPL-v3+ License
14
15 """DCache
16
17 based on Anton Blanchard microwatt dcache.vhdl
18
19 note that the microwatt dcache wishbone interface expects "stall".
20 for simplicity at the moment this is hard-coded to cyc & ~ack.
21 see WB4 spec, p84, section 5.2.1
22
23 IMPORTANT: for store, the data is sampled the cycle AFTER the "valid"
24 is raised. sigh
25
26 Links:
27
28 * https://libre-soc.org/3d_gpu/architecture/set_associative_cache.jpg
29 * https://bugs.libre-soc.org/show_bug.cgi?id=469
30 * https://libre-soc.org/irclog-microwatt/%23microwatt.2021-12-07.log.html
31 (discussion about brams for ECP5)
32
33 """
34
35 import sys
36
37 from nmutil.gtkw import write_gtkw
38
39 sys.setrecursionlimit(1000000)
40
41 from enum import Enum, unique
42
43 from nmigen import (Module, Signal, Elaboratable, Cat, Repl, Array, Const,
44 Record, Memory)
45 from nmutil.util import Display
46 from nmigen.lib.coding import Decoder
47
48 from copy import deepcopy
49 from random import randint, seed
50
51 from nmigen_soc.wishbone.bus import Interface
52
53 from nmigen.cli import main
54 from nmutil.iocontrol import RecordObject
55 from nmigen.utils import log2_int
56 from soc.experiment.mem_types import (LoadStore1ToDCacheType,
57 DCacheToLoadStore1Type,
58 MMUToDCacheType,
59 DCacheToMMUType)
60
61 from soc.experiment.wb_types import (WB_ADDR_BITS, WB_DATA_BITS, WB_SEL_BITS,
62 WBAddrType, WBDataType, WBSelType,
63 WBMasterOut, WBSlaveOut,
64 WBMasterOutVector, WBSlaveOutVector,
65 WBIOMasterOut, WBIOSlaveOut)
66
67 from soc.experiment.cache_ram import CacheRam
68 from soc.experiment.plru import PLRU, PLRUs
69 #from nmutil.plru import PLRU, PLRUs
70
71 # for test
72 from soc.bus.sram import SRAM
73 from nmigen import Memory
74 from nmigen.cli import rtlil
75
76 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
77 # Also, check out the cxxsim nmigen branch, and latest yosys from git
78 from nmutil.sim_tmp_alternative import Simulator
79
80 from nmutil.util import wrap
81
82
83 # TODO: make these parameters of DCache at some point
84 LINE_SIZE = 64 # Line size in bytes
85 NUM_LINES = 64 # Number of lines in a set
86 NUM_WAYS = 2 # Number of ways
87 TLB_SET_SIZE = 64 # L1 DTLB entries per set
88 TLB_NUM_WAYS = 2 # L1 DTLB number of sets
89 TLB_LG_PGSZ = 12 # L1 DTLB log_2(page_size)
90 LOG_LENGTH = 0 # Non-zero to enable log data collection
91
92 # BRAM organisation: We never access more than
93 # -- WB_DATA_BITS at a time so to save
94 # -- resources we make the array only that wide, and
95 # -- use consecutive indices to make a cache "line"
96 # --
97 # -- ROW_SIZE is the width in bytes of the BRAM
98 # -- (based on WB, so 64-bits)
99 ROW_SIZE = WB_DATA_BITS // 8;
100
101 # ROW_PER_LINE is the number of row (wishbone
102 # transactions) in a line
103 ROW_PER_LINE = LINE_SIZE // ROW_SIZE
104
105 # BRAM_ROWS is the number of rows in BRAM needed
106 # to represent the full dcache
107 BRAM_ROWS = NUM_LINES * ROW_PER_LINE
108
109 print ("ROW_SIZE", ROW_SIZE)
110 print ("ROW_PER_LINE", ROW_PER_LINE)
111 print ("BRAM_ROWS", BRAM_ROWS)
112 print ("NUM_WAYS", NUM_WAYS)
113
114 # Bit fields counts in the address
115
116 # REAL_ADDR_BITS is the number of real address
117 # bits that we store
118 REAL_ADDR_BITS = 56
119
120 # ROW_BITS is the number of bits to select a row
121 ROW_BITS = log2_int(BRAM_ROWS)
122
123 # ROW_LINE_BITS is the number of bits to select
124 # a row within a line
125 ROW_LINE_BITS = log2_int(ROW_PER_LINE)
126
127 # LINE_OFF_BITS is the number of bits for
128 # the offset in a cache line
129 LINE_OFF_BITS = log2_int(LINE_SIZE)
130
131 # ROW_OFF_BITS is the number of bits for
132 # the offset in a row
133 ROW_OFF_BITS = log2_int(ROW_SIZE)
134
135 # INDEX_BITS is the number if bits to
136 # select a cache line
137 INDEX_BITS = log2_int(NUM_LINES)
138
139 # SET_SIZE_BITS is the log base 2 of the set size
140 SET_SIZE_BITS = LINE_OFF_BITS + INDEX_BITS
141
142 # TAG_BITS is the number of bits of
143 # the tag part of the address
144 TAG_BITS = REAL_ADDR_BITS - SET_SIZE_BITS
145
146 # TAG_WIDTH is the width in bits of each way of the tag RAM
147 TAG_WIDTH = TAG_BITS + 7 - ((TAG_BITS + 7) % 8)
148
149 # WAY_BITS is the number of bits to select a way
150 WAY_BITS = log2_int(NUM_WAYS)
151
152 # Example of layout for 32 lines of 64 bytes:
153 layout = f"""\
154 DCache Layout:
155 |.. -----------------------| REAL_ADDR_BITS ({REAL_ADDR_BITS})
156 .. |--------------| SET_SIZE_BITS ({SET_SIZE_BITS})
157 .. tag |index| line |
158 .. | row | |
159 .. | |---| | ROW_LINE_BITS ({ROW_LINE_BITS})
160 .. | |--- - --| LINE_OFF_BITS ({LINE_OFF_BITS})
161 .. | |- --| ROW_OFF_BITS ({ROW_OFF_BITS})
162 .. |----- ---| | ROW_BITS ({ROW_BITS})
163 .. |-----| | INDEX_BITS ({INDEX_BITS})
164 .. --------| | TAG_BITS ({TAG_BITS})
165 """
166 print (layout)
167 print ("Dcache TAG %d IDX %d ROW_BITS %d ROFF %d LOFF %d RLB %d" % \
168 (TAG_BITS, INDEX_BITS, ROW_BITS,
169 ROW_OFF_BITS, LINE_OFF_BITS, ROW_LINE_BITS))
170 print ("index @: %d-%d" % (LINE_OFF_BITS, SET_SIZE_BITS))
171 print ("row @: %d-%d" % (LINE_OFF_BITS, ROW_OFF_BITS))
172 print ("tag @: %d-%d width %d" % (SET_SIZE_BITS, REAL_ADDR_BITS, TAG_WIDTH))
173
174 TAG_RAM_WIDTH = TAG_WIDTH * NUM_WAYS
175
176 print ("TAG_RAM_WIDTH", TAG_RAM_WIDTH)
177 print (" TAG_WIDTH", TAG_WIDTH)
178 print (" NUM_WAYS", NUM_WAYS)
179 print (" NUM_LINES", NUM_LINES)
180
181
182 def CacheTag(name=None):
183 tag_layout = [('valid', NUM_WAYS),
184 ('tag', TAG_RAM_WIDTH),
185 ]
186 return Record(tag_layout, name=name)
187
188
189 def CacheTagArray():
190 return Array(CacheTag(name="tag%d" % x) for x in range(NUM_LINES))
191
192
193 def RowPerLineValidArray():
194 return Array(Signal(name="rows_valid%d" % x) \
195 for x in range(ROW_PER_LINE))
196
197
198 # L1 TLB
199 TLB_SET_BITS = log2_int(TLB_SET_SIZE)
200 TLB_WAY_BITS = log2_int(TLB_NUM_WAYS)
201 TLB_EA_TAG_BITS = 64 - (TLB_LG_PGSZ + TLB_SET_BITS)
202 TLB_TAG_WAY_BITS = TLB_NUM_WAYS * TLB_EA_TAG_BITS
203 TLB_PTE_BITS = 64
204 TLB_PTE_WAY_BITS = TLB_NUM_WAYS * TLB_PTE_BITS;
205
206 def ispow2(x):
207 return (1<<log2_int(x, False)) == x
208
209 assert (LINE_SIZE % ROW_SIZE) == 0, "LINE_SIZE not multiple of ROW_SIZE"
210 assert ispow2(LINE_SIZE), "LINE_SIZE not power of 2"
211 assert ispow2(NUM_LINES), "NUM_LINES not power of 2"
212 assert ispow2(ROW_PER_LINE), "ROW_PER_LINE not power of 2"
213 assert ROW_BITS == (INDEX_BITS + ROW_LINE_BITS), "geometry bits don't add up"
214 assert (LINE_OFF_BITS == ROW_OFF_BITS + ROW_LINE_BITS), \
215 "geometry bits don't add up"
216 assert REAL_ADDR_BITS == (TAG_BITS + INDEX_BITS + LINE_OFF_BITS), \
217 "geometry bits don't add up"
218 assert REAL_ADDR_BITS == (TAG_BITS + ROW_BITS + ROW_OFF_BITS), \
219 "geometry bits don't add up"
220 assert 64 == WB_DATA_BITS, "Can't yet handle wb width that isn't 64-bits"
221 assert SET_SIZE_BITS <= TLB_LG_PGSZ, "Set indexed by virtual address"
222
223
224 def TLBHit(name):
225 return Record([('valid', 1),
226 ('way', TLB_WAY_BITS)], name=name)
227
228 def TLBTagEAArray():
229 return Array(Signal(TLB_EA_TAG_BITS, name="tlbtagea%d" % x) \
230 for x in range (TLB_NUM_WAYS))
231
232 def TLBRecord(name):
233 tlb_layout = [('valid', TLB_NUM_WAYS),
234 ('tag', TLB_TAG_WAY_BITS),
235 ('pte', TLB_PTE_WAY_BITS)
236 ]
237 return Record(tlb_layout, name=name)
238
239 def TLBValidArray():
240 return Array(Signal(TLB_NUM_WAYS, name="tlb_valid%d" % x)
241 for x in range(TLB_SET_SIZE))
242
243 def HitWaySet():
244 return Array(Signal(WAY_BITS, name="hitway_%d" % x) \
245 for x in range(TLB_NUM_WAYS))
246
247 # Cache RAM interface
248 def CacheRamOut():
249 return Array(Signal(WB_DATA_BITS, name="cache_out%d" % x) \
250 for x in range(NUM_WAYS))
251
252 # PLRU output interface
253 def PLRUOut():
254 return Array(Signal(WAY_BITS, name="plru_out%d" % x) \
255 for x in range(NUM_LINES))
256
257 # TLB PLRU output interface
258 def TLBPLRUOut():
259 return Array(Signal(TLB_WAY_BITS, name="tlbplru_out%d" % x) \
260 for x in range(TLB_SET_SIZE))
261
262 # Helper functions to decode incoming requests
263 #
264 # Return the cache line index (tag index) for an address
265 def get_index(addr):
266 return addr[LINE_OFF_BITS:SET_SIZE_BITS]
267
268 # Return the cache row index (data memory) for an address
269 def get_row(addr):
270 return addr[ROW_OFF_BITS:SET_SIZE_BITS]
271
272 # Return the index of a row within a line
273 def get_row_of_line(row):
274 return row[:ROW_BITS][:ROW_LINE_BITS]
275
276 # Returns whether this is the last row of a line
277 def is_last_row_addr(addr, last):
278 return addr[ROW_OFF_BITS:LINE_OFF_BITS] == last
279
280 # Returns whether this is the last row of a line
281 def is_last_row(row, last):
282 return get_row_of_line(row) == last
283
284 # Return the next row in the current cache line. We use a
285 # dedicated function in order to limit the size of the
286 # generated adder to be only the bits within a cache line
287 # (3 bits with default settings)
288 def next_row(row):
289 row_v = row[0:ROW_LINE_BITS] + 1
290 return Cat(row_v[:ROW_LINE_BITS], row[ROW_LINE_BITS:])
291
292 # Get the tag value from the address
293 def get_tag(addr):
294 return addr[SET_SIZE_BITS:REAL_ADDR_BITS]
295
296 # Read a tag from a tag memory row
297 def read_tag(way, tagset):
298 return tagset.word_select(way, TAG_WIDTH)[:TAG_BITS]
299
300 # Read a TLB tag from a TLB tag memory row
301 def read_tlb_tag(way, tags):
302 return tags.word_select(way, TLB_EA_TAG_BITS)
303
304 # Write a TLB tag to a TLB tag memory row
305 def write_tlb_tag(way, tags, tag):
306 return read_tlb_tag(way, tags).eq(tag)
307
308 # Read a PTE from a TLB PTE memory row
309 def read_tlb_pte(way, ptes):
310 return ptes.word_select(way, TLB_PTE_BITS)
311
312 def write_tlb_pte(way, ptes, newpte):
313 return read_tlb_pte(way, ptes).eq(newpte)
314
315
316 # Record for storing permission, attribute, etc. bits from a PTE
317 class PermAttr(RecordObject):
318 def __init__(self, name=None):
319 super().__init__(name=name)
320 self.reference = Signal()
321 self.changed = Signal()
322 self.nocache = Signal()
323 self.priv = Signal()
324 self.rd_perm = Signal()
325 self.wr_perm = Signal()
326
327
328 def extract_perm_attr(pte):
329 pa = PermAttr()
330 return pa;
331
332
333 # Type of operation on a "valid" input
334 @unique
335 class Op(Enum):
336 OP_NONE = 0
337 OP_BAD = 1 # NC cache hit, TLB miss, prot/RC failure
338 OP_STCX_FAIL = 2 # conditional store w/o reservation
339 OP_LOAD_HIT = 3 # Cache hit on load
340 OP_LOAD_MISS = 4 # Load missing cache
341 OP_LOAD_NC = 5 # Non-cachable load
342 OP_STORE_HIT = 6 # Store hitting cache
343 OP_STORE_MISS = 7 # Store missing cache
344
345
346 # Cache state machine
347 @unique
348 class State(Enum):
349 IDLE = 0 # Normal load hit processing
350 RELOAD_WAIT_ACK = 1 # Cache reload wait ack
351 STORE_WAIT_ACK = 2 # Store wait ack
352 NC_LOAD_WAIT_ACK = 3 # Non-cachable load wait ack
353
354
355 # Dcache operations:
356 #
357 # In order to make timing, we use the BRAMs with
358 # an output buffer, which means that the BRAM
359 # output is delayed by an extra cycle.
360 #
361 # Thus, the dcache has a 2-stage internal pipeline
362 # for cache hits with no stalls.
363 #
364 # All other operations are handled via stalling
365 # in the first stage.
366 #
367 # The second stage can thus complete a hit at the same
368 # time as the first stage emits a stall for a complex op.
369 #
370 # Stage 0 register, basically contains just the latched request
371
372 class RegStage0(RecordObject):
373 def __init__(self, name=None):
374 super().__init__(name=name)
375 self.req = LoadStore1ToDCacheType(name="lsmem")
376 self.tlbie = Signal() # indicates a tlbie request (from MMU)
377 self.doall = Signal() # with tlbie, indicates flush whole TLB
378 self.tlbld = Signal() # indicates a TLB load request (from MMU)
379 self.mmu_req = Signal() # indicates source of request
380 self.d_valid = Signal() # indicates req.data is valid now
381
382
383 class MemAccessRequest(RecordObject):
384 def __init__(self, name=None):
385 super().__init__(name=name)
386 self.op = Signal(Op)
387 self.valid = Signal()
388 self.dcbz = Signal()
389 self.real_addr = Signal(REAL_ADDR_BITS)
390 self.data = Signal(64)
391 self.byte_sel = Signal(8)
392 self.hit_way = Signal(WAY_BITS)
393 self.same_tag = Signal()
394 self.mmu_req = Signal()
395
396
397 # First stage register, contains state for stage 1 of load hits
398 # and for the state machine used by all other operations
399 class RegStage1(RecordObject):
400 def __init__(self, name=None):
401 super().__init__(name=name)
402 # Info about the request
403 self.full = Signal() # have uncompleted request
404 self.mmu_req = Signal() # request is from MMU
405 self.req = MemAccessRequest(name="reqmem")
406
407 # Cache hit state
408 self.hit_way = Signal(WAY_BITS)
409 self.hit_load_valid = Signal()
410 self.hit_index = Signal(INDEX_BITS)
411 self.cache_hit = Signal()
412
413 # TLB hit state
414 self.tlb_hit = TLBHit("tlb_hit")
415 self.tlb_hit_index = Signal(TLB_SET_BITS)
416
417 # 2-stage data buffer for data forwarded from writes to reads
418 self.forward_data1 = Signal(64)
419 self.forward_data2 = Signal(64)
420 self.forward_sel1 = Signal(8)
421 self.forward_valid1 = Signal()
422 self.forward_way1 = Signal(WAY_BITS)
423 self.forward_row1 = Signal(ROW_BITS)
424 self.use_forward1 = Signal()
425 self.forward_sel = Signal(8)
426
427 # Cache miss state (reload state machine)
428 self.state = Signal(State)
429 self.dcbz = Signal()
430 self.write_bram = Signal()
431 self.write_tag = Signal()
432 self.slow_valid = Signal()
433 self.wb = WBMasterOut("wb")
434 self.reload_tag = Signal(TAG_BITS)
435 self.store_way = Signal(WAY_BITS)
436 self.store_row = Signal(ROW_BITS)
437 self.store_index = Signal(INDEX_BITS)
438 self.end_row_ix = Signal(ROW_LINE_BITS)
439 self.rows_valid = RowPerLineValidArray()
440 self.acks_pending = Signal(3)
441 self.inc_acks = Signal()
442 self.dec_acks = Signal()
443
444 # Signals to complete (possibly with error)
445 self.ls_valid = Signal()
446 self.ls_error = Signal()
447 self.mmu_done = Signal()
448 self.mmu_error = Signal()
449 self.cache_paradox = Signal()
450
451 # Signal to complete a failed stcx.
452 self.stcx_fail = Signal()
453
454
455 # Reservation information
456 class Reservation(RecordObject):
457 def __init__(self, name=None):
458 super().__init__(name=name)
459 self.valid = Signal()
460 self.addr = Signal(64-LINE_OFF_BITS)
461
462
463 class DTLBUpdate(Elaboratable):
464 def __init__(self):
465 self.tlbie = Signal()
466 self.tlbwe = Signal()
467 self.doall = Signal()
468 self.tlb_hit = TLBHit("tlb_hit")
469 self.tlb_req_index = Signal(TLB_SET_BITS)
470
471 self.repl_way = Signal(TLB_WAY_BITS)
472 self.eatag = Signal(TLB_EA_TAG_BITS)
473 self.pte_data = Signal(TLB_PTE_BITS)
474
475 # read from dtlb array
476 self.tlb_read = Signal()
477 self.tlb_read_index = Signal(TLB_SET_BITS)
478 self.tlb_way = TLBRecord("o_tlb_way")
479
480 def elaborate(self, platform):
481 m = Module()
482 comb = m.d.comb
483 sync = m.d.sync
484
485 # there are 3 parts to this:
486 # QTY TLB_NUM_WAYs TAGs - of width (say) 46 bits of Effective Address
487 # QTY TLB_NUM_WAYs PTEs - of width (say) 64 bits
488 # "Valid" bits, one per "way", of QTY TLB_NUM_WAYs. these cannot
489 # be a Memory because they can all be cleared (tlbie, doall), i mean,
490 # we _could_, in theory, by overriding the Reset Signal of the Memory,
491 # hmmm....
492
493 dtlb_valid = TLBValidArray()
494 tlb_req_index = self.tlb_req_index
495
496 print ("TLB_TAG_WAY_BITS", TLB_TAG_WAY_BITS)
497 print (" TLB_EA_TAG_BITS", TLB_EA_TAG_BITS)
498 print (" TLB_NUM_WAYS", TLB_NUM_WAYS)
499 print ("TLB_PTE_WAY_BITS", TLB_PTE_WAY_BITS)
500 print (" TLB_PTE_BITS", TLB_PTE_BITS)
501 print (" TLB_NUM_WAYS", TLB_NUM_WAYS)
502
503 # TAG and PTE Memory SRAMs. transparent, write-enables are TLB_NUM_WAYS
504 tagway = Memory(depth=TLB_SET_SIZE, width=TLB_TAG_WAY_BITS)
505 m.submodules.rd_tagway = rd_tagway = tagway.read_port()
506 m.submodules.wr_tagway = wr_tagway = tagway.write_port(
507 granularity=TLB_EA_TAG_BITS)
508
509 pteway = Memory(depth=TLB_SET_SIZE, width=TLB_PTE_WAY_BITS)
510 m.submodules.rd_pteway = rd_pteway = pteway.read_port()
511 m.submodules.wr_pteway = wr_pteway = pteway.write_port(
512 granularity=TLB_PTE_BITS)
513
514 # commented out for now, can be put in if Memory.reset can be
515 # used for tlbie&doall to reset the entire Memory to zero in 1 cycle
516 #validm = Memory(depth=TLB_SET_SIZE, width=TLB_NUM_WAYS)
517 #m.submodules.rd_valid = rd_valid = validm.read_port()
518 #m.submodules.wr_valid = wr_valid = validm.write_port(
519 #granularity=1)
520
521 # connect up read and write addresses to Valid/PTE/TAG SRAMs
522 m.d.comb += rd_pteway.addr.eq(self.tlb_read_index)
523 m.d.comb += rd_tagway.addr.eq(self.tlb_read_index)
524 #m.d.comb += rd_valid.addr.eq(self.tlb_read_index)
525 m.d.comb += wr_tagway.addr.eq(tlb_req_index)
526 m.d.comb += wr_pteway.addr.eq(tlb_req_index)
527 #m.d.comb += wr_valid.addr.eq(tlb_req_index)
528
529 updated = Signal()
530 v_updated = Signal()
531 tb_out = Signal(TLB_TAG_WAY_BITS) # tlb_way_tags_t
532 db_out = Signal(TLB_NUM_WAYS) # tlb_way_valids_t
533 pb_out = Signal(TLB_PTE_WAY_BITS) # tlb_way_ptes_t
534 dv = Signal(TLB_NUM_WAYS) # tlb_way_valids_t
535
536 comb += dv.eq(dtlb_valid[tlb_req_index])
537 comb += db_out.eq(dv)
538
539 with m.If(self.tlbie & self.doall):
540 # clear all valid bits at once
541 # XXX hmmm, validm _could_ use Memory reset here...
542 for i in range(TLB_SET_SIZE):
543 sync += dtlb_valid[i].eq(0)
544 with m.Elif(self.tlbie):
545 # invalidate just the hit_way
546 with m.If(self.tlb_hit.valid):
547 comb += db_out.bit_select(self.tlb_hit.way, 1).eq(0)
548 comb += v_updated.eq(1)
549 with m.Elif(self.tlbwe):
550 # write to the requested tag and PTE
551 comb += write_tlb_tag(self.repl_way, tb_out, self.eatag)
552 comb += write_tlb_pte(self.repl_way, pb_out, self.pte_data)
553 # set valid bit
554 comb += db_out.bit_select(self.repl_way, 1).eq(1)
555
556 comb += updated.eq(1)
557 comb += v_updated.eq(1)
558
559 # above, sometimes valid is requested to be updated but data not
560 # therefore split them out, here. note the granularity thing matches
561 # with the shift-up of the eatag/pte_data into the correct TLB way.
562 # thus is it not necessary to write the entire lot, just the portion
563 # being altered: hence writing the *old* copy of the row is not needed
564 with m.If(updated): # PTE and TAG to be written
565 comb += wr_pteway.data.eq(pb_out)
566 comb += wr_pteway.en.eq(1<<self.repl_way)
567 comb += wr_tagway.data.eq(tb_out)
568 comb += wr_tagway.en.eq(1<<self.repl_way)
569 with m.If(v_updated): # Valid to be written
570 sync += dtlb_valid[tlb_req_index].eq(db_out)
571 #comb += wr_valid.data.eq(db_out)
572 #comb += wr_valid.en.eq(1<<self.repl_way)
573
574 # select one TLB way, use a register here
575 r_delay = Signal()
576 sync += r_delay.eq(self.tlb_read)
577 # first deal with the valids, which are not in a Memory.
578 # tlb way valid is output on a 1 clock delay with sync,
579 # but have to explicitly deal with "forwarding" here
580 with m.If(self.tlb_read):
581 with m.If(v_updated): # write *and* read in same cycle: forward
582 sync += self.tlb_way.valid.eq(db_out)
583 with m.Else():
584 sync += self.tlb_way.valid.eq(dtlb_valid[self.tlb_read_index])
585 # now deal with the Memory-read case. the output must remain
586 # valid (stable) even when a read-request is not made, but stable
587 # on a one-clock delay, hence the register
588 r_tlb_way = TLBRecord("r_tlb_way")
589 with m.If(r_delay):
590 # on one clock delay, capture the contents of the read port(s)
591 comb += self.tlb_way.tag.eq(rd_tagway.data)
592 comb += self.tlb_way.pte.eq(rd_pteway.data)
593 sync += r_tlb_way.tag.eq(rd_tagway.data)
594 sync += r_tlb_way.pte.eq(rd_pteway.data)
595 with m.Else():
596 # ... so that the register can output it when no read is requested
597 # it's rather overkill but better to be safe than sorry
598 comb += self.tlb_way.tag.eq(r_tlb_way.tag)
599 comb += self.tlb_way.pte.eq(r_tlb_way.pte)
600 #comb += self.tlb_way.eq(r_tlb_way)
601
602 return m
603
604
605 class DCachePendingHit(Elaboratable):
606
607 def __init__(self, tlb_way,
608 cache_i_validdx, cache_tag_set,
609 req_addr):
610
611 self.go = Signal()
612 self.virt_mode = Signal()
613 self.is_hit = Signal()
614 self.tlb_hit = TLBHit("tlb_hit")
615 self.hit_way = Signal(WAY_BITS)
616 self.rel_match = Signal()
617 self.req_index = Signal(INDEX_BITS)
618 self.reload_tag = Signal(TAG_BITS)
619
620 self.tlb_way = tlb_way
621 self.cache_i_validdx = cache_i_validdx
622 self.cache_tag_set = cache_tag_set
623 self.req_addr = req_addr
624
625 def elaborate(self, platform):
626 m = Module()
627 comb = m.d.comb
628 sync = m.d.sync
629
630 go = self.go
631 virt_mode = self.virt_mode
632 is_hit = self.is_hit
633 tlb_way = self.tlb_way
634 cache_i_validdx = self.cache_i_validdx
635 cache_tag_set = self.cache_tag_set
636 req_addr = self.req_addr
637 tlb_hit = self.tlb_hit
638 hit_way = self.hit_way
639 rel_match = self.rel_match
640 req_index = self.req_index
641 reload_tag = self.reload_tag
642
643 hit_set = Array(Signal(name="hit_set_%d" % i) \
644 for i in range(TLB_NUM_WAYS))
645 rel_matches = Array(Signal(name="rel_matches_%d" % i) \
646 for i in range(TLB_NUM_WAYS))
647 hit_way_set = HitWaySet()
648
649 # Test if pending request is a hit on any way
650 # In order to make timing in virtual mode,
651 # when we are using the TLB, we compare each
652 # way with each of the real addresses from each way of
653 # the TLB, and then decide later which match to use.
654
655 with m.If(virt_mode):
656 for j in range(TLB_NUM_WAYS): # tlb_num_way_t
657 s_tag = Signal(TAG_BITS, name="s_tag%d" % j)
658 s_hit = Signal(name="s_hit%d" % j)
659 s_pte = Signal(TLB_PTE_BITS, name="s_pte%d" % j)
660 s_ra = Signal(REAL_ADDR_BITS, name="s_ra%d" % j)
661 # read the PTE, calc the Real Address, get tge tag
662 comb += s_pte.eq(read_tlb_pte(j, tlb_way.pte))
663 comb += s_ra.eq(Cat(req_addr[0:TLB_LG_PGSZ],
664 s_pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
665 comb += s_tag.eq(get_tag(s_ra))
666 # for each way check tge tag against the cache tag set
667 for i in range(NUM_WAYS): # way_t
668 is_tag_hit = Signal(name="is_tag_hit_%d_%d" % (j, i))
669 comb += is_tag_hit.eq(go & cache_i_validdx[i] &
670 (read_tag(i, cache_tag_set) == s_tag)
671 & (tlb_way.valid[j]))
672 with m.If(is_tag_hit):
673 comb += hit_way_set[j].eq(i)
674 comb += s_hit.eq(1)
675 comb += hit_set[j].eq(s_hit)
676 comb += rel_matches[j].eq(s_tag == reload_tag)
677 with m.If(tlb_hit.valid):
678 comb += is_hit.eq(hit_set[tlb_hit.way])
679 comb += hit_way.eq(hit_way_set[tlb_hit.way])
680 comb += rel_match.eq(rel_matches[tlb_hit.way])
681 with m.Else():
682 s_tag = Signal(TAG_BITS)
683 comb += s_tag.eq(get_tag(req_addr))
684 for i in range(NUM_WAYS): # way_t
685 is_tag_hit = Signal(name="is_tag_hit_%d" % i)
686 comb += is_tag_hit.eq(go & cache_i_validdx[i] &
687 (read_tag(i, cache_tag_set) == s_tag))
688 with m.If(is_tag_hit):
689 comb += hit_way.eq(i)
690 comb += is_hit.eq(1)
691 with m.If(s_tag == reload_tag):
692 comb += rel_match.eq(1)
693
694 return m
695
696
697 class DCache(Elaboratable):
698 """Set associative dcache write-through
699
700 TODO (in no specific order):
701 * See list in icache.vhdl
702 * Complete load misses on the cycle when WB data comes instead of
703 at the end of line (this requires dealing with requests coming in
704 while not idle...)
705 """
706 def __init__(self, pspec=None):
707 self.d_in = LoadStore1ToDCacheType("d_in")
708 self.d_out = DCacheToLoadStore1Type("d_out")
709
710 self.m_in = MMUToDCacheType("m_in")
711 self.m_out = DCacheToMMUType("m_out")
712
713 self.stall_out = Signal()
714 self.any_stall_out = Signal()
715 self.dreq_when_stall = Signal()
716 self.mreq_when_stall = Signal()
717
718 # standard naming (wired to non-standard for compatibility)
719 self.bus = Interface(addr_width=32,
720 data_width=64,
721 granularity=8,
722 features={'stall'},
723 alignment=0,
724 name="dcache")
725
726 self.log_out = Signal(20)
727
728 # test if microwatt compatibility is to be enabled
729 self.microwatt_compat = (hasattr(pspec, "microwatt_compat") and
730 (pspec.microwatt_compat == True))
731
732 def stage_0(self, m, r0, r1, r0_full):
733 """Latch the request in r0.req as long as we're not stalling
734 """
735 comb = m.d.comb
736 sync = m.d.sync
737 d_in, d_out, m_in = self.d_in, self.d_out, self.m_in
738
739 r = RegStage0("stage0")
740
741 # TODO, this goes in unit tests and formal proofs
742 with m.If(d_in.valid & m_in.valid):
743 sync += Display("request collision loadstore vs MMU")
744
745 with m.If(m_in.valid):
746 comb += r.req.valid.eq(1)
747 comb += r.req.load.eq(~(m_in.tlbie | m_in.tlbld))# no invalidate
748 comb += r.req.dcbz.eq(0)
749 comb += r.req.nc.eq(0)
750 comb += r.req.reserve.eq(0)
751 comb += r.req.virt_mode.eq(0)
752 comb += r.req.priv_mode.eq(1)
753 comb += r.req.addr.eq(m_in.addr)
754 comb += r.req.data.eq(m_in.pte)
755 comb += r.req.byte_sel.eq(~0) # Const -1 sets all to 0b111....
756 comb += r.tlbie.eq(m_in.tlbie)
757 comb += r.doall.eq(m_in.doall)
758 comb += r.tlbld.eq(m_in.tlbld)
759 comb += r.mmu_req.eq(1)
760 comb += r.d_valid.eq(1)
761 m.d.sync += Display(" DCACHE req mmu addr %x pte %x ld %d",
762 m_in.addr, m_in.pte, r.req.load)
763
764 with m.Else():
765 comb += r.req.eq(d_in)
766 comb += r.req.data.eq(0)
767 comb += r.tlbie.eq(0)
768 comb += r.doall.eq(0)
769 comb += r.tlbld.eq(0)
770 comb += r.mmu_req.eq(0)
771 comb += r.d_valid.eq(0)
772
773 sync += r0_full.eq(0)
774 with m.If((~r1.full & ~d_in.hold) | ~r0_full):
775 sync += r0.eq(r)
776 sync += r0_full.eq(r.req.valid)
777 with m.Elif(~r0.d_valid):
778 # Sample data the cycle after a request comes in from loadstore1.
779 # If another request has come in already then the data will get
780 # put directly into req.data below.
781 sync += r0.req.data.eq(d_in.data)
782 sync += r0.d_valid.eq(1)
783 with m.If(d_in.valid):
784 m.d.sync += Display(" DCACHE req cache "
785 "virt %d addr %x data %x ld %d",
786 r.req.virt_mode, r.req.addr,
787 r.req.data, r.req.load)
788
789 def tlb_read(self, m, r0_stall, tlb_way):
790 """TLB
791 Operates in the second cycle on the request latched in r0.req.
792 TLB updates write the entry at the end of the second cycle.
793 """
794 comb = m.d.comb
795 sync = m.d.sync
796 m_in, d_in = self.m_in, self.d_in
797
798 addrbits = Signal(TLB_SET_BITS)
799
800 amin = TLB_LG_PGSZ
801 amax = TLB_LG_PGSZ + TLB_SET_BITS
802
803 with m.If(m_in.valid):
804 comb += addrbits.eq(m_in.addr[amin : amax])
805 with m.Else():
806 comb += addrbits.eq(d_in.addr[amin : amax])
807
808 # If we have any op and the previous op isn't finished,
809 # then keep the same output for next cycle.
810 d = self.dtlb_update
811 comb += d.tlb_read_index.eq(addrbits)
812 comb += d.tlb_read.eq(~r0_stall)
813 comb += tlb_way.eq(d.tlb_way)
814
815 def maybe_tlb_plrus(self, m, r1, tlb_plru_victim, tlb_req_index):
816 """Generate TLB PLRUs
817 """
818 comb = m.d.comb
819 sync = m.d.sync
820
821 if TLB_NUM_WAYS == 0:
822 return
823
824 # suite of PLRUs with a selection and output mechanism
825 tlb_plrus = PLRUs(TLB_SET_SIZE, TLB_WAY_BITS)
826 m.submodules.tlb_plrus = tlb_plrus
827 comb += tlb_plrus.way.eq(r1.tlb_hit.way)
828 comb += tlb_plrus.valid.eq(r1.tlb_hit.valid)
829 comb += tlb_plrus.index.eq(r1.tlb_hit_index)
830 comb += tlb_plrus.isel.eq(tlb_req_index) # select victim
831 comb += tlb_plru_victim.eq(tlb_plrus.o_index) # selected victim
832
833 def tlb_search(self, m, tlb_req_index, r0, r0_valid,
834 tlb_way,
835 pte, tlb_hit, valid_ra, perm_attr, ra):
836
837 comb = m.d.comb
838
839 hitway = Signal(TLB_WAY_BITS)
840 hit = Signal()
841 eatag = Signal(TLB_EA_TAG_BITS)
842
843 TLB_LG_END = TLB_LG_PGSZ + TLB_SET_BITS
844 comb += tlb_req_index.eq(r0.req.addr[TLB_LG_PGSZ : TLB_LG_END])
845 comb += eatag.eq(r0.req.addr[TLB_LG_END : 64 ])
846
847 for i in range(TLB_NUM_WAYS):
848 is_tag_hit = Signal(name="is_tag_hit%d" % i)
849 tlb_tag = Signal(TLB_EA_TAG_BITS, name="tlb_tag%d" % i)
850 comb += tlb_tag.eq(read_tlb_tag(i, tlb_way.tag))
851 comb += is_tag_hit.eq((tlb_way.valid[i]) & (tlb_tag == eatag))
852 with m.If(is_tag_hit):
853 comb += hitway.eq(i)
854 comb += hit.eq(1)
855
856 comb += tlb_hit.valid.eq(hit & r0_valid)
857 comb += tlb_hit.way.eq(hitway)
858
859 with m.If(tlb_hit.valid):
860 comb += pte.eq(read_tlb_pte(hitway, tlb_way.pte))
861 comb += valid_ra.eq(tlb_hit.valid | ~r0.req.virt_mode)
862
863 with m.If(r0.req.virt_mode):
864 comb += ra.eq(Cat(Const(0, ROW_OFF_BITS),
865 r0.req.addr[ROW_OFF_BITS:TLB_LG_PGSZ],
866 pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
867 comb += perm_attr.reference.eq(pte[8])
868 comb += perm_attr.changed.eq(pte[7])
869 comb += perm_attr.nocache.eq(pte[5])
870 comb += perm_attr.priv.eq(pte[3])
871 comb += perm_attr.rd_perm.eq(pte[2])
872 comb += perm_attr.wr_perm.eq(pte[1])
873 with m.Else():
874 comb += ra.eq(Cat(Const(0, ROW_OFF_BITS),
875 r0.req.addr[ROW_OFF_BITS:REAL_ADDR_BITS]))
876 comb += perm_attr.reference.eq(1)
877 comb += perm_attr.changed.eq(1)
878 comb += perm_attr.nocache.eq(0)
879 comb += perm_attr.priv.eq(1)
880 comb += perm_attr.rd_perm.eq(1)
881 comb += perm_attr.wr_perm.eq(1)
882
883 with m.If(valid_ra):
884 m.d.sync += Display("DCACHE virt mode %d hit %d ra %x pte %x",
885 r0.req.virt_mode, tlb_hit.valid, ra, pte)
886 m.d.sync += Display(" perm ref=%d", perm_attr.reference)
887 m.d.sync += Display(" perm chg=%d", perm_attr.changed)
888 m.d.sync += Display(" perm noc=%d", perm_attr.nocache)
889 m.d.sync += Display(" perm prv=%d", perm_attr.priv)
890 m.d.sync += Display(" perm rdp=%d", perm_attr.rd_perm)
891 m.d.sync += Display(" perm wrp=%d", perm_attr.wr_perm)
892
893 def tlb_update(self, m, r0_valid, r0, tlb_req_index,
894 tlb_hit, tlb_plru_victim):
895
896 comb = m.d.comb
897 sync = m.d.sync
898
899 tlbie = Signal()
900 tlbwe = Signal()
901
902 comb += tlbie.eq(r0_valid & r0.tlbie)
903 comb += tlbwe.eq(r0_valid & r0.tlbld)
904
905 d = self.dtlb_update
906
907 comb += d.tlbie.eq(tlbie)
908 comb += d.tlbwe.eq(tlbwe)
909 comb += d.doall.eq(r0.doall)
910 comb += d.tlb_hit.eq(tlb_hit)
911 comb += d.tlb_req_index.eq(tlb_req_index)
912
913 with m.If(tlb_hit.valid):
914 comb += d.repl_way.eq(tlb_hit.way)
915 with m.Else():
916 comb += d.repl_way.eq(tlb_plru_victim)
917 comb += d.eatag.eq(r0.req.addr[TLB_LG_PGSZ + TLB_SET_BITS:64])
918 comb += d.pte_data.eq(r0.req.data)
919
920 def maybe_plrus(self, m, r1, plru_victim):
921 """Generate PLRUs
922 """
923 comb = m.d.comb
924 sync = m.d.sync
925
926 if TLB_NUM_WAYS == 0:
927 return
928
929 # suite of PLRUs with a selection and output mechanism
930 m.submodules.plrus = plrus = PLRUs(NUM_LINES, WAY_BITS)
931 comb += plrus.way.eq(r1.hit_way)
932 comb += plrus.valid.eq(r1.cache_hit)
933 comb += plrus.index.eq(r1.hit_index)
934 comb += plrus.isel.eq(r1.store_index) # select victim
935 comb += plru_victim.eq(plrus.o_index) # selected victim
936
937 def cache_tag_read(self, m, r0_stall, req_index, cache_tag_set, cache_tags):
938 """Cache tag RAM read port
939 """
940 comb = m.d.comb
941 sync = m.d.sync
942 m_in, d_in = self.m_in, self.d_in
943
944 index = Signal(INDEX_BITS)
945
946 with m.If(r0_stall):
947 comb += index.eq(req_index)
948 with m.Elif(m_in.valid):
949 comb += index.eq(get_index(m_in.addr))
950 with m.Else():
951 comb += index.eq(get_index(d_in.addr))
952 sync += cache_tag_set.eq(cache_tags[index].tag)
953
954 def dcache_request(self, m, r0, ra, req_index, req_row, req_tag,
955 r0_valid, r1, cache_tags, replace_way,
956 use_forward1_next, use_forward2_next,
957 req_hit_way, plru_victim, rc_ok, perm_attr,
958 valid_ra, perm_ok, access_ok, req_op, req_go,
959 tlb_hit, tlb_way, cache_tag_set,
960 cancel_store, req_same_tag, r0_stall, early_req_row):
961 """Cache request parsing and hit detection
962 """
963
964 comb = m.d.comb
965 m_in, d_in = self.m_in, self.d_in
966
967 is_hit = Signal()
968 hit_way = Signal(WAY_BITS)
969 op = Signal(Op)
970 opsel = Signal(3)
971 go = Signal()
972 nc = Signal()
973 cache_i_validdx = Signal(NUM_WAYS)
974
975 # Extract line, row and tag from request
976 comb += req_index.eq(get_index(r0.req.addr))
977 comb += req_row.eq(get_row(r0.req.addr))
978 comb += req_tag.eq(get_tag(ra))
979
980 if False: # display on comb is a bit... busy.
981 comb += Display("dcache_req addr:%x ra: %x idx: %x tag: %x row: %x",
982 r0.req.addr, ra, req_index, req_tag, req_row)
983
984 comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
985 comb += cache_i_validdx.eq(cache_tags[req_index].valid)
986
987 m.submodules.dcache_pend = dc = DCachePendingHit(tlb_way,
988 cache_i_validdx, cache_tag_set,
989 r0.req.addr)
990 comb += dc.tlb_hit.eq(tlb_hit)
991 comb += dc.reload_tag.eq(r1.reload_tag)
992 comb += dc.virt_mode.eq(r0.req.virt_mode)
993 comb += dc.go.eq(go)
994 comb += dc.req_index.eq(req_index)
995
996 comb += is_hit.eq(dc.is_hit)
997 comb += hit_way.eq(dc.hit_way)
998 comb += req_same_tag.eq(dc.rel_match)
999
1000 # See if the request matches the line currently being reloaded
1001 with m.If((r1.state == State.RELOAD_WAIT_ACK) &
1002 (req_index == r1.store_index) & req_same_tag):
1003 # For a store, consider this a hit even if the row isn't
1004 # valid since it will be by the time we perform the store.
1005 # For a load, check the appropriate row valid bit.
1006 rrow = Signal(ROW_LINE_BITS)
1007 comb += rrow.eq(req_row)
1008 valid = r1.rows_valid[rrow]
1009 comb += is_hit.eq((~r0.req.load) | valid)
1010 comb += hit_way.eq(replace_way)
1011
1012 # Whether to use forwarded data for a load or not
1013 with m.If((get_row(r1.req.real_addr) == req_row) &
1014 (r1.req.hit_way == hit_way)):
1015 # Only need to consider r1.write_bram here, since if we
1016 # are writing refill data here, then we don't have a
1017 # cache hit this cycle on the line being refilled.
1018 # (There is the possibility that the load following the
1019 # load miss that started the refill could be to the old
1020 # contents of the victim line, since it is a couple of
1021 # cycles after the refill starts before we see the updated
1022 # cache tag. In that case we don't use the bypass.)
1023 comb += use_forward1_next.eq(r1.write_bram)
1024 with m.If((r1.forward_row1 == req_row) & (r1.forward_way1 == hit_way)):
1025 comb += use_forward2_next.eq(r1.forward_valid1)
1026
1027 # The way that matched on a hit
1028 comb += req_hit_way.eq(hit_way)
1029
1030 # The way to replace on a miss
1031 with m.If(r1.write_tag):
1032 comb += replace_way.eq(plru_victim)
1033 with m.Else():
1034 comb += replace_way.eq(r1.store_way)
1035
1036 # work out whether we have permission for this access
1037 # NB we don't yet implement AMR, thus no KUAP
1038 comb += rc_ok.eq(perm_attr.reference
1039 & (r0.req.load | perm_attr.changed))
1040 comb += perm_ok.eq((r0.req.priv_mode | (~perm_attr.priv)) &
1041 (perm_attr.wr_perm |
1042 (r0.req.load & perm_attr.rd_perm)))
1043 comb += access_ok.eq(valid_ra & perm_ok & rc_ok)
1044
1045 # Combine the request and cache hit status to decide what
1046 # operation needs to be done
1047 comb += nc.eq(r0.req.nc | perm_attr.nocache)
1048 comb += op.eq(Op.OP_NONE)
1049 with m.If(go):
1050 with m.If(~access_ok):
1051 m.d.sync += Display("DCACHE access fail valid_ra=%d p=%d rc=%d",
1052 valid_ra, perm_ok, rc_ok)
1053 comb += op.eq(Op.OP_BAD)
1054 with m.Elif(cancel_store):
1055 m.d.sync += Display("DCACHE cancel store")
1056 comb += op.eq(Op.OP_STCX_FAIL)
1057 with m.Else():
1058 m.d.sync += Display("DCACHE valid_ra=%d nc=%d ld=%d",
1059 valid_ra, nc, r0.req.load)
1060 comb += opsel.eq(Cat(is_hit, nc, r0.req.load))
1061 with m.Switch(opsel):
1062 with m.Case(0b101): comb += op.eq(Op.OP_LOAD_HIT)
1063 with m.Case(0b100): comb += op.eq(Op.OP_LOAD_MISS)
1064 with m.Case(0b110): comb += op.eq(Op.OP_LOAD_NC)
1065 with m.Case(0b001): comb += op.eq(Op.OP_STORE_HIT)
1066 with m.Case(0b000): comb += op.eq(Op.OP_STORE_MISS)
1067 with m.Case(0b010): comb += op.eq(Op.OP_STORE_MISS)
1068 with m.Case(0b011): comb += op.eq(Op.OP_BAD)
1069 with m.Case(0b111): comb += op.eq(Op.OP_BAD)
1070 comb += req_op.eq(op)
1071 comb += req_go.eq(go)
1072
1073 # Version of the row number that is valid one cycle earlier
1074 # in the cases where we need to read the cache data BRAM.
1075 # If we're stalling then we need to keep reading the last
1076 # row requested.
1077 with m.If(~r0_stall):
1078 with m.If(m_in.valid):
1079 comb += early_req_row.eq(get_row(m_in.addr))
1080 with m.Else():
1081 comb += early_req_row.eq(get_row(d_in.addr))
1082 with m.Else():
1083 comb += early_req_row.eq(req_row)
1084
1085 def reservation_comb(self, m, cancel_store, set_rsrv, clear_rsrv,
1086 r0_valid, r0, reservation):
1087 """Handle load-with-reservation and store-conditional instructions
1088 """
1089 comb = m.d.comb
1090
1091 with m.If(r0_valid & r0.req.reserve):
1092 # XXX generate alignment interrupt if address
1093 # is not aligned XXX or if r0.req.nc = '1'
1094 with m.If(r0.req.load):
1095 comb += set_rsrv.eq(r0.req.atomic_last) # load with reservation
1096 with m.Else():
1097 comb += clear_rsrv.eq(r0.req.atomic_last) # store conditional
1098 with m.If((~reservation.valid) |
1099 (r0.req.addr[LINE_OFF_BITS:64] != reservation.addr)):
1100 comb += cancel_store.eq(1)
1101
1102 def reservation_reg(self, m, r0_valid, access_ok, set_rsrv, clear_rsrv,
1103 reservation, r0):
1104 comb = m.d.comb
1105 sync = m.d.sync
1106
1107 with m.If(r0_valid & access_ok):
1108 with m.If(clear_rsrv):
1109 sync += reservation.valid.eq(0)
1110 with m.Elif(set_rsrv):
1111 sync += reservation.valid.eq(1)
1112 sync += reservation.addr.eq(r0.req.addr[LINE_OFF_BITS:64])
1113
1114 def writeback_control(self, m, r1, cache_out_row):
1115 """Return data for loads & completion control logic
1116 """
1117 comb = m.d.comb
1118 sync = m.d.sync
1119 d_out, m_out = self.d_out, self.m_out
1120
1121 data_out = Signal(64)
1122 data_fwd = Signal(64)
1123
1124 # Use the bypass if are reading the row that was
1125 # written 1 or 2 cycles ago, including for the
1126 # slow_valid = 1 case (i.e. completing a load
1127 # miss or a non-cacheable load).
1128 with m.If(r1.use_forward1):
1129 comb += data_fwd.eq(r1.forward_data1)
1130 with m.Else():
1131 comb += data_fwd.eq(r1.forward_data2)
1132
1133 comb += data_out.eq(cache_out_row)
1134
1135 for i in range(8):
1136 with m.If(r1.forward_sel[i]):
1137 dsel = data_fwd.word_select(i, 8)
1138 comb += data_out.word_select(i, 8).eq(dsel)
1139
1140 # DCache output to LoadStore
1141 comb += d_out.valid.eq(r1.ls_valid)
1142 comb += d_out.data.eq(data_out)
1143 comb += d_out.store_done.eq(~r1.stcx_fail)
1144 comb += d_out.error.eq(r1.ls_error)
1145 comb += d_out.cache_paradox.eq(r1.cache_paradox)
1146
1147 # Outputs to MMU
1148 comb += m_out.done.eq(r1.mmu_done)
1149 comb += m_out.err.eq(r1.mmu_error)
1150 comb += m_out.data.eq(data_out)
1151
1152 # We have a valid load or store hit or we just completed
1153 # a slow op such as a load miss, a NC load or a store
1154 #
1155 # Note: the load hit is delayed by one cycle. However it
1156 # can still not collide with r.slow_valid (well unless I
1157 # miscalculated) because slow_valid can only be set on a
1158 # subsequent request and not on its first cycle (the state
1159 # machine must have advanced), which makes slow_valid
1160 # at least 2 cycles from the previous hit_load_valid.
1161
1162 # Sanity: Only one of these must be set in any given cycle
1163
1164 if False: # TODO: need Display to get this to work
1165 assert (r1.slow_valid & r1.stcx_fail) != 1, \
1166 "unexpected slow_valid collision with stcx_fail"
1167
1168 assert ((r1.slow_valid | r1.stcx_fail) | r1.hit_load_valid) != 1, \
1169 "unexpected hit_load_delayed collision with slow_valid"
1170
1171 with m.If(~r1.mmu_req):
1172 # Request came from loadstore1...
1173 # Load hit case is the standard path
1174 with m.If(r1.hit_load_valid):
1175 sync += Display("completing load hit data=%x", data_out)
1176
1177 # error cases complete without stalling
1178 with m.If(r1.ls_error):
1179 with m.If(r1.dcbz):
1180 sync += Display("completing dcbz with error")
1181 with m.Else():
1182 sync += Display("completing ld/st with error")
1183
1184 # Slow ops (load miss, NC, stores)
1185 with m.If(r1.slow_valid):
1186 sync += Display("completing store or load miss adr=%x data=%x",
1187 r1.req.real_addr, data_out)
1188
1189 with m.Else():
1190 # Request came from MMU
1191 with m.If(r1.hit_load_valid):
1192 sync += Display("completing load hit to MMU, data=%x",
1193 m_out.data)
1194 # error cases complete without stalling
1195 with m.If(r1.mmu_error):
1196 sync += Display("combpleting MMU ld with error")
1197
1198 # Slow ops (i.e. load miss)
1199 with m.If(r1.slow_valid):
1200 sync += Display("completing MMU load miss, adr=%x data=%x",
1201 r1.req.real_addr, m_out.data)
1202
1203 def rams(self, m, r1, early_req_row, cache_out_row, replace_way):
1204 """rams
1205 Generate a cache RAM for each way. This handles the normal
1206 reads, writes from reloads and the special store-hit update
1207 path as well.
1208
1209 Note: the BRAMs have an extra read buffer, meaning the output
1210 is pipelined an extra cycle. This differs from the
1211 icache. The writeback logic needs to take that into
1212 account by using 1-cycle delayed signals for load hits.
1213 """
1214 comb = m.d.comb
1215 bus = self.bus
1216
1217 # a Binary-to-Unary one-hots here. replace-way one-hot is gated
1218 # (enabled) by bus.ack, not-write-bram, and state RELOAD_WAIT_ACK
1219 m.submodules.rams_replace_way_e = rwe = Decoder(NUM_WAYS)
1220 comb += rwe.n.eq(~((r1.state == State.RELOAD_WAIT_ACK) & bus.ack &
1221 ~r1.write_bram))
1222 comb += rwe.i.eq(replace_way)
1223
1224 m.submodules.rams_hit_way_e = hwe = Decoder(NUM_WAYS)
1225 comb += hwe.i.eq(r1.hit_way)
1226
1227 # this one is gated with write_bram, and replace_way_e can never be
1228 # set at the same time. that means that do_write can OR the outputs
1229 m.submodules.rams_hit_req_way_e = hre = Decoder(NUM_WAYS)
1230 comb += hre.n.eq(~r1.write_bram) # Decoder.n is inverted
1231 comb += hre.i.eq(r1.req.hit_way)
1232
1233 # common Signals
1234 do_read = Signal()
1235 wr_addr = Signal(ROW_BITS)
1236 wr_data = Signal(WB_DATA_BITS)
1237 wr_sel = Signal(ROW_SIZE)
1238 rd_addr = Signal(ROW_BITS)
1239
1240 comb += do_read.eq(1) # always enable
1241 comb += rd_addr.eq(early_req_row)
1242
1243 # Write mux:
1244 #
1245 # Defaults to wishbone read responses (cache refill)
1246 #
1247 # For timing, the mux on wr_data/sel/addr is not
1248 # dependent on anything other than the current state.
1249
1250 with m.If(r1.write_bram):
1251 # Write store data to BRAM. This happens one
1252 # cycle after the store is in r0.
1253 comb += wr_data.eq(r1.req.data)
1254 comb += wr_sel.eq(r1.req.byte_sel)
1255 comb += wr_addr.eq(get_row(r1.req.real_addr))
1256
1257 with m.Else():
1258 # Otherwise, we might be doing a reload or a DCBZ
1259 with m.If(r1.dcbz):
1260 comb += wr_data.eq(0)
1261 with m.Else():
1262 comb += wr_data.eq(bus.dat_r)
1263 comb += wr_addr.eq(r1.store_row)
1264 comb += wr_sel.eq(~0) # all 1s
1265
1266 # set up Cache Rams
1267 for i in range(NUM_WAYS):
1268 do_write = Signal(name="do_wr%d" % i)
1269 wr_sel_m = Signal(ROW_SIZE, name="wr_sel_m_%d" % i)
1270 d_out = Signal(WB_DATA_BITS, name="dout_%d" % i) # cache_row_t
1271
1272 way = CacheRam(ROW_BITS, WB_DATA_BITS, ADD_BUF=True, ram_num=i)
1273 m.submodules["cacheram_%d" % i] = way
1274
1275 comb += way.rd_en.eq(do_read)
1276 comb += way.rd_addr.eq(rd_addr)
1277 comb += d_out.eq(way.rd_data_o)
1278 comb += way.wr_sel.eq(wr_sel_m)
1279 comb += way.wr_addr.eq(wr_addr)
1280 comb += way.wr_data.eq(wr_data)
1281
1282 # Cache hit reads
1283 with m.If(hwe.o[i]):
1284 comb += cache_out_row.eq(d_out)
1285
1286 # these are mutually-exclusive via their Decoder-enablers
1287 # (note: Decoder-enable is inverted)
1288 comb += do_write.eq(hre.o[i] | rwe.o[i])
1289
1290 # Mask write selects with do_write since BRAM
1291 # doesn't have a global write-enable
1292 with m.If(do_write):
1293 comb += wr_sel_m.eq(wr_sel)
1294
1295 # Cache hit synchronous machine for the easy case.
1296 # This handles load hits.
1297 # It also handles error cases (TLB miss, cache paradox)
1298 def dcache_fast_hit(self, m, req_op, r0_valid, r0, r1,
1299 req_hit_way, req_index, req_tag, access_ok,
1300 tlb_hit, tlb_req_index):
1301 comb = m.d.comb
1302 sync = m.d.sync
1303
1304 with m.If(req_op != Op.OP_NONE):
1305 sync += Display("op:%d addr:%x nc: %d idx: %x tag: %x way: %x",
1306 req_op, r0.req.addr, r0.req.nc,
1307 req_index, req_tag, req_hit_way)
1308
1309 with m.If(r0_valid):
1310 sync += r1.mmu_req.eq(r0.mmu_req)
1311
1312 # Fast path for load/store hits.
1313 # Set signals for the writeback controls.
1314 sync += r1.hit_way.eq(req_hit_way)
1315 sync += r1.hit_index.eq(req_index)
1316
1317 sync += r1.hit_load_valid.eq(req_op == Op.OP_LOAD_HIT)
1318 sync += r1.cache_hit.eq((req_op == Op.OP_LOAD_HIT) |
1319 (req_op == Op.OP_STORE_HIT))
1320
1321 with m.If(req_op == Op.OP_BAD):
1322 sync += Display("Signalling ld/st error "
1323 "ls_error=%i mmu_error=%i cache_paradox=%i",
1324 ~r0.mmu_req,r0.mmu_req,access_ok)
1325 sync += r1.ls_error.eq(~r0.mmu_req)
1326 sync += r1.mmu_error.eq(r0.mmu_req)
1327 sync += r1.cache_paradox.eq(access_ok)
1328 with m.Else():
1329 sync += r1.ls_error.eq(0)
1330 sync += r1.mmu_error.eq(0)
1331 sync += r1.cache_paradox.eq(0)
1332
1333 sync += r1.stcx_fail.eq(req_op == Op.OP_STCX_FAIL)
1334
1335 # Record TLB hit information for updating TLB PLRU
1336 sync += r1.tlb_hit.eq(tlb_hit)
1337 sync += r1.tlb_hit_index.eq(tlb_req_index)
1338
1339 # Memory accesses are handled by this state machine:
1340 #
1341 # * Cache load miss/reload (in conjunction with "rams")
1342 # * Load hits for non-cachable forms
1343 # * Stores (the collision case is handled in "rams")
1344 #
1345 # All wishbone requests generation is done here.
1346 # This machine operates at stage 1.
1347 def dcache_slow(self, m, r1, use_forward1_next, use_forward2_next,
1348 r0, replace_way,
1349 req_hit_way, req_same_tag,
1350 r0_valid, req_op, cache_tags, req_go, ra):
1351
1352 comb = m.d.comb
1353 sync = m.d.sync
1354 bus = self.bus
1355 d_in = self.d_in
1356
1357 req = MemAccessRequest("mreq_ds")
1358
1359 r1_next_cycle = Signal()
1360 req_row = Signal(ROW_BITS)
1361 req_idx = Signal(INDEX_BITS)
1362 req_tag = Signal(TAG_BITS)
1363 comb += req_idx.eq(get_index(req.real_addr))
1364 comb += req_row.eq(get_row(req.real_addr))
1365 comb += req_tag.eq(get_tag(req.real_addr))
1366
1367 sync += r1.use_forward1.eq(use_forward1_next)
1368 sync += r1.forward_sel.eq(0)
1369
1370 with m.If(use_forward1_next):
1371 sync += r1.forward_sel.eq(r1.req.byte_sel)
1372 with m.Elif(use_forward2_next):
1373 sync += r1.forward_sel.eq(r1.forward_sel1)
1374
1375 sync += r1.forward_data2.eq(r1.forward_data1)
1376 with m.If(r1.write_bram):
1377 sync += r1.forward_data1.eq(r1.req.data)
1378 sync += r1.forward_sel1.eq(r1.req.byte_sel)
1379 sync += r1.forward_way1.eq(r1.req.hit_way)
1380 sync += r1.forward_row1.eq(get_row(r1.req.real_addr))
1381 sync += r1.forward_valid1.eq(1)
1382 with m.Else():
1383 with m.If(r1.dcbz):
1384 sync += r1.forward_data1.eq(0)
1385 with m.Else():
1386 sync += r1.forward_data1.eq(bus.dat_r)
1387 sync += r1.forward_sel1.eq(~0) # all 1s
1388 sync += r1.forward_way1.eq(replace_way)
1389 sync += r1.forward_row1.eq(r1.store_row)
1390 sync += r1.forward_valid1.eq(0)
1391
1392 # One cycle pulses reset
1393 sync += r1.slow_valid.eq(0)
1394 sync += r1.write_bram.eq(0)
1395 sync += r1.inc_acks.eq(0)
1396 sync += r1.dec_acks.eq(0)
1397
1398 sync += r1.ls_valid.eq(0)
1399 # complete tlbies and TLB loads in the third cycle
1400 sync += r1.mmu_done.eq(r0_valid & (r0.tlbie | r0.tlbld))
1401
1402 with m.If((req_op == Op.OP_LOAD_HIT) | (req_op == Op.OP_STCX_FAIL)):
1403 with m.If(r0.mmu_req):
1404 sync += r1.mmu_done.eq(1)
1405 with m.Else():
1406 sync += r1.ls_valid.eq(1)
1407
1408 with m.If(r1.write_tag):
1409 # Store new tag in selected way
1410 replace_way_onehot = Signal(NUM_WAYS)
1411 comb += replace_way_onehot.eq(1<<replace_way)
1412 for i in range(NUM_WAYS):
1413 with m.If(replace_way_onehot[i]):
1414 ct = Signal(TAG_RAM_WIDTH)
1415 comb += ct.eq(cache_tags[r1.store_index].tag)
1416 comb += ct.word_select(i, TAG_WIDTH).eq(r1.reload_tag)
1417 sync += cache_tags[r1.store_index].tag.eq(ct)
1418 sync += r1.store_way.eq(replace_way)
1419 sync += r1.write_tag.eq(0)
1420
1421 # Take request from r1.req if there is one there,
1422 # else from req_op, ra, etc.
1423 with m.If(r1.full):
1424 comb += req.eq(r1.req)
1425 with m.Else():
1426 comb += req.op.eq(req_op)
1427 comb += req.valid.eq(req_go)
1428 comb += req.mmu_req.eq(r0.mmu_req)
1429 comb += req.dcbz.eq(r0.req.dcbz)
1430 comb += req.real_addr.eq(ra)
1431
1432 with m.If(r0.req.dcbz):
1433 # force data to 0 for dcbz
1434 comb += req.data.eq(0)
1435 with m.Elif(r0.d_valid):
1436 comb += req.data.eq(r0.req.data)
1437 with m.Else():
1438 comb += req.data.eq(d_in.data)
1439
1440 # Select all bytes for dcbz
1441 # and for cacheable loads
1442 with m.If(r0.req.dcbz | (r0.req.load & ~r0.req.nc)):
1443 comb += req.byte_sel.eq(~0) # all 1s
1444 with m.Else():
1445 comb += req.byte_sel.eq(r0.req.byte_sel)
1446 comb += req.hit_way.eq(req_hit_way)
1447 comb += req.same_tag.eq(req_same_tag)
1448
1449 # Store the incoming request from r0,
1450 # if it is a slow request
1451 # Note that r1.full = 1 implies req_op = OP_NONE
1452 with m.If((req_op == Op.OP_LOAD_MISS)
1453 | (req_op == Op.OP_LOAD_NC)
1454 | (req_op == Op.OP_STORE_MISS)
1455 | (req_op == Op.OP_STORE_HIT)):
1456 sync += r1.req.eq(req)
1457 sync += r1.full.eq(1)
1458 # do not let r1.state RELOAD_WAIT_ACK or STORE_WAIT_ACK
1459 # destroy r1.req by overwriting r1.full back to zero
1460 comb += r1_next_cycle.eq(1)
1461
1462 # Main state machine
1463 with m.Switch(r1.state):
1464
1465 with m.Case(State.IDLE):
1466 sync += r1.wb.adr.eq(req.real_addr[ROW_LINE_BITS:])
1467 sync += r1.wb.sel.eq(req.byte_sel)
1468 sync += r1.wb.dat.eq(req.data)
1469 sync += r1.dcbz.eq(req.dcbz)
1470
1471 # Keep track of our index and way
1472 # for subsequent stores.
1473 sync += r1.store_index.eq(req_idx)
1474 sync += r1.store_row.eq(req_row)
1475 sync += r1.end_row_ix.eq(get_row_of_line(req_row)-1)
1476 sync += r1.reload_tag.eq(req_tag)
1477 sync += r1.req.same_tag.eq(1)
1478
1479 with m.If(req.op == Op.OP_STORE_HIT):
1480 sync += r1.store_way.eq(req.hit_way)
1481
1482 #with m.If(r1.dec_acks):
1483 # sync += r1.acks_pending.eq(r1.acks_pending - 1)
1484
1485 # Reset per-row valid bits,
1486 # ready for handling OP_LOAD_MISS
1487 for i in range(ROW_PER_LINE):
1488 sync += r1.rows_valid[i].eq(0)
1489
1490 with m.If(req_op != Op.OP_NONE):
1491 sync += Display("cache op %d", req.op)
1492
1493 with m.Switch(req.op):
1494 with m.Case(Op.OP_LOAD_HIT):
1495 # stay in IDLE state
1496 pass
1497
1498 with m.Case(Op.OP_LOAD_MISS):
1499 sync += Display("cache miss real addr: %x " \
1500 "idx: %x tag: %x",
1501 req.real_addr, req_row, req_tag)
1502
1503 # Start the wishbone cycle
1504 sync += r1.wb.we.eq(0)
1505 sync += r1.wb.cyc.eq(1)
1506 sync += r1.wb.stb.eq(1)
1507
1508 # Track that we had one request sent
1509 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
1510 sync += r1.write_tag.eq(1)
1511
1512 with m.Case(Op.OP_LOAD_NC):
1513 sync += r1.wb.cyc.eq(1)
1514 sync += r1.wb.stb.eq(1)
1515 sync += r1.wb.we.eq(0)
1516 sync += r1.state.eq(State.NC_LOAD_WAIT_ACK)
1517
1518 with m.Case(Op.OP_STORE_HIT, Op.OP_STORE_MISS):
1519 with m.If(~req.dcbz):
1520 sync += r1.state.eq(State.STORE_WAIT_ACK)
1521 sync += r1.acks_pending.eq(1)
1522 sync += r1.full.eq(0)
1523 comb += r1_next_cycle.eq(0)
1524 sync += r1.slow_valid.eq(1)
1525
1526 with m.If(req.mmu_req):
1527 sync += r1.mmu_done.eq(1)
1528 with m.Else():
1529 sync += r1.ls_valid.eq(1)
1530
1531 with m.If(req.op == Op.OP_STORE_HIT):
1532 sync += r1.write_bram.eq(1)
1533 with m.Else():
1534 # dcbz is handled much like a load miss except
1535 # that we are writing to memory instead of reading
1536 sync += r1.state.eq(State.RELOAD_WAIT_ACK)
1537
1538 with m.If(req.op == Op.OP_STORE_MISS):
1539 sync += r1.write_tag.eq(1)
1540
1541 sync += r1.wb.we.eq(1)
1542 sync += r1.wb.cyc.eq(1)
1543 sync += r1.wb.stb.eq(1)
1544
1545 # OP_NONE and OP_BAD do nothing
1546 # OP_BAD & OP_STCX_FAIL were
1547 # handled above already
1548 with m.Case(Op.OP_NONE):
1549 pass
1550 with m.Case(Op.OP_BAD):
1551 pass
1552 with m.Case(Op.OP_STCX_FAIL):
1553 pass
1554
1555 with m.Case(State.RELOAD_WAIT_ACK):
1556 ld_stbs_done = Signal()
1557 # Requests are all sent if stb is 0
1558 comb += ld_stbs_done.eq(~r1.wb.stb)
1559
1560 # If we are still sending requests, was one accepted?
1561 with m.If((~bus.stall) & r1.wb.stb):
1562 # That was the last word? We are done sending.
1563 # Clear stb and set ld_stbs_done so we can handle an
1564 # eventual last ack on the same cycle.
1565 # sigh - reconstruct wb adr with 3 extra 0s at front
1566 wb_adr = Cat(Const(0, ROW_OFF_BITS), r1.wb.adr)
1567 with m.If(is_last_row_addr(wb_adr, r1.end_row_ix)):
1568 sync += r1.wb.stb.eq(0)
1569 comb += ld_stbs_done.eq(1)
1570
1571 # Calculate the next row address in the current cache line
1572 row = Signal(LINE_OFF_BITS-ROW_OFF_BITS)
1573 comb += row.eq(r1.wb.adr)
1574 sync += r1.wb.adr[:LINE_OFF_BITS-ROW_OFF_BITS].eq(row+1)
1575
1576 # Incoming acks processing
1577 sync += r1.forward_valid1.eq(bus.ack)
1578 with m.If(bus.ack):
1579 srow = Signal(ROW_LINE_BITS)
1580 comb += srow.eq(r1.store_row)
1581 sync += r1.rows_valid[srow].eq(1)
1582
1583 # If this is the data we were looking for,
1584 # we can complete the request next cycle.
1585 # Compare the whole address in case the
1586 # request in r1.req is not the one that
1587 # started this refill.
1588 with m.If(r1.full & r1.req.same_tag &
1589 ((r1.dcbz & req.dcbz) |
1590 (r1.req.op == Op.OP_LOAD_MISS)) &
1591 (r1.store_row == get_row(r1.req.real_addr))):
1592 sync += r1.full.eq(r1_next_cycle)
1593 sync += r1.slow_valid.eq(1)
1594 with m.If(r1.mmu_req):
1595 sync += r1.mmu_done.eq(1)
1596 with m.Else():
1597 sync += r1.ls_valid.eq(1)
1598 sync += r1.forward_sel.eq(~0) # all 1s
1599 sync += r1.use_forward1.eq(1)
1600
1601 # Check for completion
1602 with m.If(ld_stbs_done & is_last_row(r1.store_row,
1603 r1.end_row_ix)):
1604 # Complete wishbone cycle
1605 sync += r1.wb.cyc.eq(0)
1606
1607 # Cache line is now valid
1608 cv = Signal(INDEX_BITS)
1609 comb += cv.eq(cache_tags[r1.store_index].valid)
1610 comb += cv.bit_select(r1.store_way, 1).eq(1)
1611 sync += cache_tags[r1.store_index].valid.eq(cv)
1612
1613 sync += r1.state.eq(State.IDLE)
1614 sync += Display("cache valid set %x "
1615 "idx %d way %d",
1616 cv, r1.store_index, r1.store_way)
1617
1618 # Increment store row counter
1619 sync += r1.store_row.eq(next_row(r1.store_row))
1620
1621 with m.Case(State.STORE_WAIT_ACK):
1622 st_stbs_done = Signal()
1623 adjust_acks = Signal(3)
1624
1625 comb += st_stbs_done.eq(~r1.wb.stb)
1626
1627 with m.If(r1.inc_acks != r1.dec_acks):
1628 with m.If(r1.inc_acks):
1629 comb += adjust_acks.eq(r1.acks_pending + 1)
1630 with m.Else():
1631 comb += adjust_acks.eq(r1.acks_pending - 1)
1632 with m.Else():
1633 comb += adjust_acks.eq(r1.acks_pending)
1634
1635 sync += r1.acks_pending.eq(adjust_acks)
1636
1637 # Clear stb when slave accepted request
1638 with m.If(~bus.stall):
1639 # See if there is another store waiting
1640 # to be done which is in the same real page.
1641 with m.If(req.valid):
1642 _ra = req.real_addr[ROW_LINE_BITS:SET_SIZE_BITS]
1643 sync += r1.wb.adr[0:SET_SIZE_BITS].eq(_ra)
1644 sync += r1.wb.dat.eq(req.data)
1645 sync += r1.wb.sel.eq(req.byte_sel)
1646
1647 with m.If((adjust_acks < 7) & req.same_tag &
1648 ((req.op == Op.OP_STORE_MISS) |
1649 (req.op == Op.OP_STORE_HIT))):
1650 sync += r1.wb.stb.eq(1)
1651 comb += st_stbs_done.eq(0)
1652 sync += r1.store_way.eq(req.hit_way)
1653 sync += r1.store_row.eq(get_row(req.real_addr))
1654
1655 with m.If(req.op == Op.OP_STORE_HIT):
1656 sync += r1.write_bram.eq(1)
1657 sync += r1.full.eq(r1_next_cycle)
1658 sync += r1.slow_valid.eq(1)
1659
1660 # Store requests never come from the MMU
1661 sync += r1.ls_valid.eq(1)
1662 comb += st_stbs_done.eq(0)
1663 sync += r1.inc_acks.eq(1)
1664 with m.Else():
1665 sync += r1.wb.stb.eq(0)
1666 comb += st_stbs_done.eq(1)
1667
1668 # Got ack ? See if complete.
1669 sync += Display("got ack %d %d stbs %d adjust_acks %d",
1670 bus.ack, bus.ack, st_stbs_done, adjust_acks)
1671 with m.If(bus.ack):
1672 with m.If(st_stbs_done & (adjust_acks == 1)):
1673 sync += r1.state.eq(State.IDLE)
1674 sync += r1.wb.cyc.eq(0)
1675 sync += r1.wb.stb.eq(0)
1676 sync += r1.dec_acks.eq(1)
1677
1678 with m.Case(State.NC_LOAD_WAIT_ACK):
1679 # Clear stb when slave accepted request
1680 with m.If(~bus.stall):
1681 sync += r1.wb.stb.eq(0)
1682
1683 # Got ack ? complete.
1684 with m.If(bus.ack):
1685 sync += r1.state.eq(State.IDLE)
1686 sync += r1.full.eq(r1_next_cycle)
1687 sync += r1.slow_valid.eq(1)
1688
1689 with m.If(r1.mmu_req):
1690 sync += r1.mmu_done.eq(1)
1691 with m.Else():
1692 sync += r1.ls_valid.eq(1)
1693
1694 sync += r1.forward_sel.eq(~0) # all 1s
1695 sync += r1.use_forward1.eq(1)
1696 sync += r1.wb.cyc.eq(0)
1697 sync += r1.wb.stb.eq(0)
1698
1699 def dcache_log(self, m, r1, valid_ra, tlb_hit, stall_out):
1700
1701 sync = m.d.sync
1702 d_out, bus, log_out = self.d_out, self.bus, self.log_out
1703
1704 sync += log_out.eq(Cat(r1.state[:3], valid_ra, tlb_hit.way[:3],
1705 stall_out, req_op[:3], d_out.valid, d_out.error,
1706 r1.wb.cyc, r1.wb.stb, bus.ack, bus.stall,
1707 r1.real_adr[3:6]))
1708
1709 def elaborate(self, platform):
1710
1711 m = Module()
1712 comb, sync = m.d.comb, m.d.sync
1713 m_in, d_in = self.m_in, self.d_in
1714
1715 # Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
1716 cache_tags = CacheTagArray()
1717 cache_tag_set = Signal(TAG_RAM_WIDTH)
1718
1719 # TODO attribute ram_style : string;
1720 # TODO attribute ram_style of cache_tags : signal is "distributed";
1721
1722 """note: these are passed to nmigen.hdl.Memory as "attributes".
1723 don't know how, just that they are.
1724 """
1725 # TODO attribute ram_style of
1726 # dtlb_tags : signal is "distributed";
1727 # TODO attribute ram_style of
1728 # dtlb_ptes : signal is "distributed";
1729
1730 r0 = RegStage0("r0")
1731 r0_full = Signal()
1732
1733 r1 = RegStage1("r1")
1734
1735 reservation = Reservation("rsrv")
1736
1737 # Async signals on incoming request
1738 req_index = Signal(INDEX_BITS)
1739 req_row = Signal(ROW_BITS)
1740 req_hit_way = Signal(WAY_BITS)
1741 req_tag = Signal(TAG_BITS)
1742 req_op = Signal(Op)
1743 req_data = Signal(64)
1744 req_same_tag = Signal()
1745 req_go = Signal()
1746
1747 early_req_row = Signal(ROW_BITS)
1748
1749 cancel_store = Signal()
1750 set_rsrv = Signal()
1751 clear_rsrv = Signal()
1752
1753 r0_valid = Signal()
1754 r0_stall = Signal()
1755
1756 use_forward1_next = Signal()
1757 use_forward2_next = Signal()
1758
1759 cache_out_row = Signal(WB_DATA_BITS)
1760
1761 plru_victim = Signal(WAY_BITS)
1762 replace_way = Signal(WAY_BITS)
1763
1764 # Wishbone read/write/cache write formatting signals
1765 bus_sel = Signal(8)
1766
1767 # TLB signals
1768 tlb_way = TLBRecord("tlb_way")
1769 tlb_req_index = Signal(TLB_SET_BITS)
1770 tlb_hit = TLBHit("tlb_hit")
1771 pte = Signal(TLB_PTE_BITS)
1772 ra = Signal(REAL_ADDR_BITS)
1773 valid_ra = Signal()
1774 perm_attr = PermAttr("dc_perms")
1775 rc_ok = Signal()
1776 perm_ok = Signal()
1777 access_ok = Signal()
1778
1779 tlb_plru_victim = Signal(TLB_WAY_BITS)
1780
1781 # we don't yet handle collisions between loadstore1 requests
1782 # and MMU requests
1783 comb += self.m_out.stall.eq(0)
1784
1785 # Hold off the request in r0 when r1 has an uncompleted request
1786 comb += r0_stall.eq(r0_full & (r1.full | d_in.hold))
1787 comb += r0_valid.eq(r0_full & ~r1.full & ~d_in.hold)
1788 comb += self.stall_out.eq(r0_stall)
1789 # debugging: detect if any stall ever requested, which is fine,
1790 # but if a request comes in when stall requested, that's bad.
1791 with m.If(r0_stall):
1792 sync += self.any_stall_out.eq(1)
1793 with m.If(d_in.valid):
1794 sync += self.dreq_when_stall.eq(1)
1795 with m.If(m_in.valid):
1796 sync += self.mreq_when_stall.eq(1)
1797
1798 # deal with litex not doing wishbone pipeline mode
1799 # XXX in wrong way. FIFOs are needed in the SRAM test
1800 # so that stb/ack match up. same thing done in icache.py
1801 if not self.microwatt_compat:
1802 comb += self.bus.stall.eq(self.bus.cyc & ~self.bus.ack)
1803
1804 # Wire up wishbone request latch out of stage 1
1805 comb += self.bus.we.eq(r1.wb.we)
1806 comb += self.bus.adr.eq(r1.wb.adr)
1807 comb += self.bus.sel.eq(r1.wb.sel)
1808 comb += self.bus.stb.eq(r1.wb.stb)
1809 comb += self.bus.dat_w.eq(r1.wb.dat)
1810 comb += self.bus.cyc.eq(r1.wb.cyc)
1811
1812 # create submodule TLBUpdate
1813 m.submodules.dtlb_update = self.dtlb_update = DTLBUpdate()
1814
1815 # call sub-functions putting everything together, using shared
1816 # signals established above
1817 self.stage_0(m, r0, r1, r0_full)
1818 self.tlb_read(m, r0_stall, tlb_way)
1819 self.tlb_search(m, tlb_req_index, r0, r0_valid,
1820 tlb_way,
1821 pte, tlb_hit, valid_ra, perm_attr, ra)
1822 self.tlb_update(m, r0_valid, r0, tlb_req_index,
1823 tlb_hit, tlb_plru_victim)
1824 self.maybe_plrus(m, r1, plru_victim)
1825 self.maybe_tlb_plrus(m, r1, tlb_plru_victim, tlb_req_index)
1826 self.cache_tag_read(m, r0_stall, req_index, cache_tag_set, cache_tags)
1827 self.dcache_request(m, r0, ra, req_index, req_row, req_tag,
1828 r0_valid, r1, cache_tags, replace_way,
1829 use_forward1_next, use_forward2_next,
1830 req_hit_way, plru_victim, rc_ok, perm_attr,
1831 valid_ra, perm_ok, access_ok, req_op, req_go,
1832 tlb_hit, tlb_way, cache_tag_set,
1833 cancel_store, req_same_tag, r0_stall, early_req_row)
1834 self.reservation_comb(m, cancel_store, set_rsrv, clear_rsrv,
1835 r0_valid, r0, reservation)
1836 self.reservation_reg(m, r0_valid, access_ok, set_rsrv, clear_rsrv,
1837 reservation, r0)
1838 self.writeback_control(m, r1, cache_out_row)
1839 self.rams(m, r1, early_req_row, cache_out_row, replace_way)
1840 self.dcache_fast_hit(m, req_op, r0_valid, r0, r1,
1841 req_hit_way, req_index, req_tag, access_ok,
1842 tlb_hit, tlb_req_index)
1843 self.dcache_slow(m, r1, use_forward1_next, use_forward2_next,
1844 r0, replace_way,
1845 req_hit_way, req_same_tag,
1846 r0_valid, req_op, cache_tags, req_go, ra)
1847 #self.dcache_log(m, r1, valid_ra, tlb_hit, stall_out)
1848
1849 return m
1850
1851
1852 if __name__ == '__main__':
1853 dut = DCache()
1854 vl = rtlil.convert(dut, ports=[])
1855 with open("test_dcache.il", "w") as f:
1856 f.write(vl)