56c28c81713f79ef2e0ac8f1df1d6043f09f7655
[soc.git] / src / soc / fu / ldst / loadstore.py
1 """LoadStore1 FSM.
2
3 based on microwatt loadstore1.vhdl, but conforming to PortInterface.
4 unlike loadstore1.vhdl this does *not* deal with actual Load/Store
5 ops: that job is handled by LDSTCompUnit, which talks to LoadStore1
6 by way of PortInterface. PortInterface is where things need extending,
7 such as adding dcbz support, etc.
8
9 this module basically handles "pure" load / store operations, and
10 its first job is to ask the D-Cache for the data. if that fails,
11 the second task (if virtual memory is enabled) is to ask the MMU
12 to perform a TLB, then to go *back* to the cache and ask again.
13
14 Links:
15
16 * https://bugs.libre-soc.org/show_bug.cgi?id=465
17
18 """
19
20 from nmigen import (Elaboratable, Module, Signal, Shape, unsigned, Cat, Mux,
21 Record, Memory,
22 Const, C)
23 from nmutil.iocontrol import RecordObject
24 from nmutil.util import rising_edge, Display
25 from enum import Enum, unique
26
27 from soc.experiment.dcache import DCache
28 from soc.experiment.icache import ICache
29 from soc.experiment.pimem import PortInterfaceBase
30 from soc.experiment.mem_types import LoadStore1ToMMUType
31 from soc.experiment.mem_types import MMUToLoadStore1Type
32
33 from soc.minerva.wishbone import make_wb_layout
34 from soc.bus.sram import SRAM
35 from nmutil.util import Display
36
37
38 @unique
39 class State(Enum):
40 IDLE = 0 # ready for instruction
41 ACK_WAIT = 1 # waiting for ack from dcache
42 MMU_LOOKUP = 2 # waiting for MMU to look up translation
43 #SECOND_REQ = 3 # second request for unaligned transfer
44
45 @unique
46 class Misalign(Enum):
47 ONEWORD = 0 # only one word needed, all good
48 NEED2WORDS = 1 # need to send/receive two words
49 WAITFIRST = 2 # waiting for the first word
50 WAITSECOND = 3 # waiting for the second word
51
52
53 # captures the LDSTRequest from the PortInterface, which "blips" most
54 # of this at us (pipeline-style).
55 class LDSTRequest(RecordObject):
56 def __init__(self, name=None):
57 RecordObject.__init__(self, name=name)
58
59 self.load = Signal()
60 self.dcbz = Signal()
61 self.raddr = Signal(64)
62 # self.store_data = Signal(64) # this is already sync (on a delay)
63 self.byte_sel = Signal(16)
64 self.nc = Signal() # non-cacheable access
65 self.virt_mode = Signal()
66 self.priv_mode = Signal()
67 self.mode_32bit = Signal() # XXX UNUSED AT PRESENT
68 self.alignstate = Signal(Misalign) # progress of alignment request
69 self.align_intr = Signal()
70 # atomic (LR/SC reservation)
71 self.reserve = Signal()
72 self.atomic = Signal()
73 self.atomic_last = Signal()
74
75
76 # glue logic for microwatt mmu and dcache
77 class LoadStore1(PortInterfaceBase):
78 def __init__(self, pspec):
79 self.pspec = pspec
80 self.disable_cache = (hasattr(pspec, "disable_cache") and
81 pspec.disable_cache == True)
82 regwid = pspec.reg_wid
83 addrwid = pspec.addr_wid
84
85 super().__init__(regwid, addrwid)
86 self.dcache = DCache(pspec)
87 self.icache = ICache(pspec)
88 # these names are from the perspective of here (LoadStore1)
89 self.d_out = self.dcache.d_in # in to dcache is out for LoadStore
90 self.d_in = self.dcache.d_out # out from dcache is in for LoadStore
91 self.i_out = self.icache.i_in # in to icache is out for LoadStore
92 self.i_in = self.icache.i_out # out from icache is in for LoadStore
93 self.m_out = LoadStore1ToMMUType("m_out") # out *to* MMU
94 self.m_in = MMUToLoadStore1Type("m_in") # in *from* MMU
95 self.req = LDSTRequest(name="ldst_req")
96
97 # TODO, convert dcache wb_in/wb_out to "standard" nmigen Wishbone bus
98 self.dbus = Record(make_wb_layout(pspec))
99 self.ibus = Record(make_wb_layout(pspec))
100
101 # for creating a single clock blip to DCache
102 self.d_valid = Signal()
103 self.d_w_valid = Signal()
104 self.d_validblip = Signal()
105
106 # state info for LD/ST
107 self.done = Signal()
108 self.done_delay = Signal()
109 # latch most of the input request
110 self.load = Signal()
111 self.tlbie = Signal()
112 self.dcbz = Signal()
113 self.raddr = Signal(64)
114 self.maddr = Signal(64)
115 self.store_data = Signal(64) # first half (aligned)
116 self.store_data2 = Signal(64) # second half (misaligned)
117 self.load_data = Signal(128) # 128 to cope with misalignment
118 self.load_data_delay = Signal(128) # perform 2 LD/STs
119 self.byte_sel = Signal(16) # also for misaligned, 16-bit
120 self.alignstate = Signal(Misalign) # progress of alignment request
121 self.next_addr = Signal(64) # 2nd (aligned) read/write addr
122 #self.xerc : xer_common_t;
123 #self.rc = Signal()
124 self.nc = Signal() # non-cacheable access
125 self.mode_32bit = Signal() # XXX UNUSED AT PRESENT
126 self.state = Signal(State)
127 self.instr_fault = Signal() # indicator to request i-cache MMU lookup
128 self.r_instr_fault = Signal() # accessed in external_busy
129 self.priv_mode = Signal() # only for instruction fetch (not LDST)
130 self.align_intr = Signal()
131 self.busy = Signal()
132 self.wait_dcache = Signal()
133 self.wait_mmu = Signal()
134 self.lrsc_misalign = Signal()
135 #self.intr_vec : integer range 0 to 16#fff#;
136 #self.nia = Signal(64)
137 #self.srr1 = Signal(16)
138 # use these to set the dsisr or dar respectively
139 self.mmu_set_spr = Signal()
140 self.mmu_set_dsisr = Signal()
141 self.mmu_set_dar = Signal()
142 self.sprval_in = Signal(64)
143
144 # ONLY access these read-only, do NOT attempt to change
145 self.dsisr = Signal(32)
146 self.dar = Signal(64)
147
148 # when external_busy set, do not allow PortInterface to proceed
149 def external_busy(self, m):
150 return self.instr_fault | self.r_instr_fault
151
152 def set_wr_addr(self, m, addr, mask, misalign, msr, is_dcbz):
153 m.d.comb += self.req.load.eq(0) # store operation
154 m.d.comb += self.req.byte_sel.eq(mask)
155 m.d.comb += self.req.raddr.eq(addr)
156 m.d.comb += self.req.priv_mode.eq(~msr.pr) # not-problem ==> priv
157 m.d.comb += self.req.virt_mode.eq(msr.dr) # DR ==> virt
158 m.d.comb += self.req.mode_32bit.eq(~msr.sf) # not-sixty-four ==> 32bit
159 m.d.comb += self.req.dcbz.eq(is_dcbz)
160 with m.If(misalign):
161 m.d.comb += self.req.alignstate.eq(Misalign.NEED2WORDS)
162 m.d.sync += self.next_addr.eq(Cat(C(0, 3), addr[3:]+1))
163
164 # m.d.comb += Display("set_wr_addr %i dcbz %i",addr,is_dcbz)
165
166 # option to disable the cache entirely for write
167 if self.disable_cache:
168 m.d.comb += self.req.nc.eq(1)
169
170 # dcbz cannot do no-cache
171 with m.If(is_dcbz & self.req.nc):
172 m.d.comb += self.req.align_intr.eq(1)
173
174 # hmm, rather than add yet another argument to set_wr_addr
175 # read direct from PortInterface
176 m.d.comb += self.req.reserve.eq(self.pi.reserve) # atomic request
177 m.d.comb += self.req.atomic.eq(~self.lrsc_misalign)
178 m.d.comb += self.req.atomic_last.eq(~self.lrsc_misalign)
179
180 return None
181
182 def set_rd_addr(self, m, addr, mask, misalign, msr):
183 m.d.comb += self.d_valid.eq(1)
184 m.d.comb += self.req.load.eq(1) # load operation
185 m.d.comb += self.req.byte_sel.eq(mask)
186 m.d.comb += self.req.raddr.eq(addr)
187 m.d.comb += self.req.priv_mode.eq(~msr.pr) # not-problem ==> priv
188 m.d.comb += self.req.virt_mode.eq(msr.dr) # DR ==> virt
189 m.d.comb += self.req.mode_32bit.eq(~msr.sf) # not-sixty-four ==> 32bit
190 # BAD HACK! disable cacheing on LD when address is 0xCxxx_xxxx
191 # this is for peripherals. same thing done in Microwatt loadstore1.vhdl
192 with m.If(addr[28:] == Const(0xc, 4)):
193 m.d.comb += self.req.nc.eq(1)
194 # option to disable the cache entirely for read
195 if self.disable_cache:
196 m.d.comb += self.req.nc.eq(1)
197 with m.If(misalign):
198 # need two reads: prepare next address in advance
199 m.d.comb += self.req.alignstate.eq(Misalign.NEED2WORDS)
200 m.d.sync += self.next_addr.eq(Cat(C(0, 3), addr[3:]+1))
201
202 # hmm, rather than add yet another argument to set_rd_addr
203 # read direct from PortInterface
204 m.d.comb += self.req.reserve.eq(self.pi.reserve) # atomic request
205 m.d.comb += self.req.atomic.eq(~self.lrsc_misalign)
206 m.d.comb += self.req.atomic_last.eq(~self.lrsc_misalign)
207
208 return None #FIXME return value
209
210 def set_wr_data(self, m, data, wen):
211 # do the "blip" on write data
212 m.d.comb += self.d_valid.eq(1)
213 # put data into comb which is picked up in main elaborate()
214 m.d.comb += self.d_w_valid.eq(1)
215 m.d.comb += self.store_data.eq(data)
216 m.d.sync += self.store_data2.eq(data[64:128])
217 st_ok = self.done # TODO indicates write data is valid
218 m.d.comb += self.pi.store_done.data.eq(self.d_in.store_done)
219 m.d.comb += self.pi.store_done.ok.eq(1)
220 return st_ok
221
222 def get_rd_data(self, m):
223 ld_ok = self.done_delay # indicates read data is valid
224 data = self.load_data_delay # actual read data
225 return data, ld_ok
226
227 def elaborate(self, platform):
228 m = super().elaborate(platform)
229 comb, sync = m.d.comb, m.d.sync
230
231 # microwatt takes one more cycle before next operation can be issued
232 sync += self.done_delay.eq(self.done)
233 #sync += self.load_data_delay[0:64].eq(self.load_data[0:64])
234
235 # create dcache and icache module
236 m.submodules.dcache = dcache = self.dcache
237 m.submodules.icache = icache = self.icache
238
239 # temp vars
240 d_out, d_in, dbus = self.d_out, self.d_in, self.dbus
241 i_out, i_in, ibus = self.i_out, self.i_in, self.ibus
242 m_out, m_in = self.m_out, self.m_in
243 exc = self.pi.exc_o
244 exception = exc.happened
245 mmureq = Signal()
246
247 # copy of address, but gets over-ridden for instr_fault
248 maddr = Signal(64)
249 m.d.comb += maddr.eq(self.raddr)
250
251 # check for LR/SC misalignment, used in set_rd/wr_addr above
252 comb += self.lrsc_misalign.eq(((self.pi.data_len[0:3]-1) &
253 self.req.raddr[0:3]).bool())
254 with m.If(self.lrsc_misalign & self.req.reserve):
255 m.d.comb += self.req.align_intr.eq(1)
256
257 # create a blip (single pulse) on valid read/write request
258 # this can be over-ridden in the FSM to get dcache to re-run
259 # a request when MMU_LOOKUP completes.
260 m.d.comb += self.d_validblip.eq(rising_edge(m, self.d_valid))
261 ldst_r = LDSTRequest("ldst_r")
262 sync += Display("MMUTEST: LoadStore1 d_in.error=%i",d_in.error)
263
264 # fsm skeleton
265 with m.Switch(self.state):
266 with m.Case(State.IDLE):
267 sync += self.load_data_delay.eq(0) # clear out
268 with m.If((self.d_validblip | self.instr_fault) &
269 ~exc.happened):
270 comb += self.busy.eq(1)
271 sync += self.state.eq(State.ACK_WAIT)
272 sync += ldst_r.eq(self.req) # copy of LDSTRequest on "blip"
273 # sync += Display("validblip self.req.virt_mode=%i",
274 # self.req.virt_mode)
275 with m.If(self.instr_fault):
276 comb += mmureq.eq(1)
277 sync += self.r_instr_fault.eq(1)
278 comb += maddr.eq(self.maddr)
279 sync += self.state.eq(State.MMU_LOOKUP)
280 with m.Else():
281 sync += self.r_instr_fault.eq(0)
282 # if the LD/ST requires two dwords, move to waiting
283 # for first word
284 with m.If(self.req.alignstate == Misalign.NEED2WORDS):
285 sync += ldst_r.alignstate.eq(Misalign.WAITFIRST)
286 with m.Else():
287 sync += ldst_r.eq(0)
288
289 # waiting for completion
290 with m.Case(State.ACK_WAIT):
291 sync += Display("MMUTEST: ACK_WAIT")
292 comb += self.busy.eq(~exc.happened)
293
294 with m.If(d_in.error):
295 # cache error is not necessarily "final", it could
296 # be that it was just a TLB miss
297 with m.If(d_in.cache_paradox):
298 comb += exception.eq(1)
299 sync += self.state.eq(State.IDLE)
300 sync += ldst_r.eq(0)
301 sync += Display("cache error -> update dsisr")
302 sync += self.dsisr[63 - 38].eq(~ldst_r.load)
303 # XXX there is no architected bit for this
304 # (probably should be a machine check in fact)
305 sync += self.dsisr[63 - 35].eq(d_in.cache_paradox)
306 sync += self.r_instr_fault.eq(0)
307
308 with m.Else():
309 # Look up the translation for TLB miss
310 # and also for permission error and RC error
311 # in case the PTE has been updated.
312 comb += mmureq.eq(1)
313 sync += self.state.eq(State.MMU_LOOKUP)
314 with m.If(d_in.valid):
315 with m.If(self.done):
316 sync += Display("ACK_WAIT, done %x", self.raddr)
317 with m.If(ldst_r.alignstate == Misalign.ONEWORD):
318 # done if there is only one dcache operation
319 sync += self.state.eq(State.IDLE)
320 sync += ldst_r.eq(0)
321 with m.If(ldst_r.load):
322 m.d.comb += self.load_data.eq(d_in.data)
323 sync += self.load_data_delay[0:64].eq(d_in.data)
324 m.d.comb += self.done.eq(~mmureq) # done if not MMU
325 with m.Elif(ldst_r.alignstate == Misalign.WAITFIRST):
326 # first LD done: load data, initiate 2nd request.
327 # leave in ACK_WAIT state
328 with m.If(ldst_r.load):
329 m.d.comb += self.load_data[0:63].eq(d_in.data)
330 sync += self.load_data_delay[0:64].eq(d_in.data)
331 with m.Else():
332 m.d.sync += d_out.data.eq(self.store_data2)
333 # mmm kinda cheating, make a 2nd blip.
334 # use an aligned version of the address
335 m.d.comb += self.d_validblip.eq(1)
336 comb += self.req.eq(ldst_r) # from copy of request
337 comb += self.req.raddr.eq(self.next_addr)
338 comb += self.req.byte_sel.eq(ldst_r.byte_sel[8:])
339 comb += self.req.alignstate.eq(Misalign.WAITSECOND)
340 sync += ldst_r.raddr.eq(self.next_addr)
341 sync += ldst_r.byte_sel.eq(ldst_r.byte_sel[8:])
342 sync += ldst_r.alignstate.eq(Misalign.WAITSECOND)
343 sync += Display(" second req %x", self.req.raddr)
344 with m.Elif(ldst_r.alignstate == Misalign.WAITSECOND):
345 sync += Display(" done second %x", d_in.data)
346 # done second load
347 sync += self.state.eq(State.IDLE)
348 sync += ldst_r.eq(0)
349 with m.If(ldst_r.load):
350 m.d.comb += self.load_data[64:128].eq(d_in.data)
351 sync += self.load_data_delay[64:128].eq(d_in.data)
352 m.d.comb += self.done.eq(~mmureq) # done if not MMU
353
354 # waiting here for the MMU TLB lookup to complete.
355 # either re-try the dcache lookup or throw MMU exception
356 with m.Case(State.MMU_LOOKUP):
357 comb += self.busy.eq(~exception)
358 with m.If(m_in.done):
359 with m.If(~self.r_instr_fault):
360 sync += Display("MMU_LOOKUP, done %x -> %x",
361 self.raddr, d_out.addr)
362 # retry the request now that the MMU has
363 # installed a TLB entry, if not exception raised
364 m.d.comb += self.d_out.valid.eq(~exception)
365 sync += self.state.eq(State.ACK_WAIT)
366 with m.Else():
367 sync += self.state.eq(State.IDLE)
368 sync += self.r_instr_fault.eq(0)
369 comb += self.done.eq(1)
370
371 with m.If(m_in.err):
372 # MMU RADIX exception thrown. XXX
373 # TODO: critical that the write here has to
374 # notify the MMU FSM of the change to dsisr
375 comb += exception.eq(1)
376 comb += self.done.eq(1)
377 sync += Display("MMU RADIX exception thrown")
378 sync += self.dsisr[63 - 33].eq(m_in.invalid)
379 sync += self.dsisr[63 - 36].eq(m_in.perm_error) # noexec
380 sync += self.dsisr[63 - 38].eq(~ldst_r.load)
381 sync += self.dsisr[63 - 44].eq(m_in.badtree)
382 sync += self.dsisr[63 - 45].eq(m_in.rc_error)
383 sync += self.state.eq(State.IDLE)
384 # exception thrown, clear out instruction fault state
385 sync += self.r_instr_fault.eq(0)
386
387 # MMU FSM communicating a request to update DSISR or DAR (OP_MTSPR)
388 with m.If(self.mmu_set_spr):
389 with m.If(self.mmu_set_dsisr):
390 sync += self.dsisr.eq(self.sprval_in)
391 with m.If(self.mmu_set_dar):
392 sync += self.dar.eq(self.sprval_in)
393
394 # hmmm, alignment occurs in set_rd_addr/set_wr_addr, note exception
395 with m.If(self.align_intr):
396 comb += exc.happened.eq(1)
397 # check for updating DAR
398 with m.If(exception):
399 sync += Display("exception %x", self.raddr)
400 # alignment error: store address in DAR
401 with m.If(self.align_intr):
402 sync += Display("alignment error: addr in DAR %x", self.raddr)
403 sync += self.dar.eq(self.raddr)
404 with m.Elif(~self.r_instr_fault):
405 sync += Display("not instr fault, addr in DAR %x", self.raddr)
406 sync += self.dar.eq(self.raddr)
407
408 # when done or exception, return to idle state
409 with m.If(self.done | exception):
410 sync += self.state.eq(State.IDLE)
411 comb += self.busy.eq(0)
412
413 # happened, alignment, instr_fault, invalid.
414 # note that all of these flow through - eventually to the TRAP
415 # pipeline, via PowerDecoder2.
416 comb += self.align_intr.eq(self.req.align_intr)
417 comb += exc.invalid.eq(m_in.invalid)
418 comb += exc.alignment.eq(self.align_intr)
419 comb += exc.instr_fault.eq(self.r_instr_fault)
420 # badtree, perm_error, rc_error, segment_fault
421 comb += exc.badtree.eq(m_in.badtree)
422 comb += exc.perm_error.eq(m_in.perm_error)
423 comb += exc.rc_error.eq(m_in.rc_error)
424 comb += exc.segment_fault.eq(m_in.segerr)
425 # conditions for 0x400 trap need these in SRR1
426 with m.If(exception & ~exc.alignment & exc.instr_fault):
427 comb += exc.srr1[14].eq(exc.invalid) # 47-33
428 comb += exc.srr1[12].eq(exc.perm_error) # 47-35
429 comb += exc.srr1[3].eq(exc.badtree) # 47-44
430 comb += exc.srr1[2].eq(exc.rc_error) # 47-45
431
432 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
433 comb += dbus.adr.eq(dcache.bus.adr)
434 comb += dbus.dat_w.eq(dcache.bus.dat_w)
435 comb += dbus.sel.eq(dcache.bus.sel)
436 comb += dbus.cyc.eq(dcache.bus.cyc)
437 comb += dbus.stb.eq(dcache.bus.stb)
438 comb += dbus.we.eq(dcache.bus.we)
439
440 comb += dcache.bus.dat_r.eq(dbus.dat_r)
441 comb += dcache.bus.ack.eq(dbus.ack)
442 if hasattr(dbus, "stall"):
443 comb += dcache.bus.stall.eq(dbus.stall)
444
445 # update out d data when flag set, for first half (second done in FSM)
446 with m.If(self.d_w_valid):
447 m.d.sync += d_out.data.eq(self.store_data)
448 #with m.Else():
449 # m.d.sync += d_out.data.eq(0)
450 # unit test passes with that change
451
452 # this must move into the FSM, conditionally noticing that
453 # the "blip" comes from self.d_validblip.
454 # task 1: look up in dcache
455 # task 2: if dcache fails, look up in MMU.
456 # do **NOT** confuse the two.
457 with m.If(self.d_validblip):
458 m.d.comb += self.d_out.valid.eq(~exc.happened)
459 m.d.comb += d_out.load.eq(self.req.load)
460 m.d.comb += d_out.byte_sel.eq(self.req.byte_sel)
461 m.d.comb += self.raddr.eq(self.req.raddr)
462 m.d.comb += d_out.nc.eq(self.req.nc)
463 m.d.comb += d_out.priv_mode.eq(self.req.priv_mode)
464 m.d.comb += d_out.virt_mode.eq(self.req.virt_mode)
465 m.d.comb += d_out.reserve.eq(self.req.reserve)
466 m.d.comb += d_out.atomic.eq(self.req.atomic)
467 m.d.comb += d_out.atomic_last.eq(self.req.atomic_last)
468 #m.d.comb += Display("validblip dcbz=%i addr=%x",
469 #self.req.dcbz,self.req.addr)
470 m.d.comb += d_out.dcbz.eq(self.req.dcbz)
471 with m.Else():
472 m.d.comb += d_out.load.eq(ldst_r.load)
473 m.d.comb += d_out.byte_sel.eq(ldst_r.byte_sel)
474 m.d.comb += self.raddr.eq(ldst_r.raddr)
475 m.d.comb += d_out.nc.eq(ldst_r.nc)
476 m.d.comb += d_out.priv_mode.eq(ldst_r.priv_mode)
477 m.d.comb += d_out.virt_mode.eq(ldst_r.virt_mode)
478 m.d.comb += d_out.reserve.eq(ldst_r.reserve)
479 m.d.comb += d_out.atomic.eq(ldst_r.atomic)
480 m.d.comb += d_out.atomic_last.eq(ldst_r.atomic_last)
481 #m.d.comb += Display("no_validblip dcbz=%i addr=%x",
482 #ldst_r.dcbz,ldst_r.addr)
483 m.d.comb += d_out.dcbz.eq(ldst_r.dcbz)
484 m.d.comb += d_out.addr.eq(self.raddr)
485
486 # Update outputs to MMU
487 m.d.comb += m_out.valid.eq(mmureq)
488 m.d.comb += m_out.iside.eq(self.instr_fault)
489 m.d.comb += m_out.load.eq(ldst_r.load)
490 with m.If(self.instr_fault):
491 m.d.comb += m_out.priv.eq(self.priv_mode)
492 with m.Else():
493 m.d.comb += m_out.priv.eq(ldst_r.priv_mode)
494 m.d.comb += m_out.tlbie.eq(self.tlbie)
495 # m_out.mtspr <= mmu_mtspr; # TODO
496 # m_out.sprn <= sprn; # TODO
497 m.d.comb += m_out.addr.eq(maddr)
498 # m_out.slbia <= l_in.insn(7); # TODO: no idea what this is
499 # m_out.rs <= l_in.data; # nope, probably not needed, TODO investigate
500
501 return m
502
503 def ports(self):
504 yield from super().ports()
505 # TODO: memory ports
506
507
508 class TestSRAMLoadStore1(LoadStore1):
509 def __init__(self, pspec):
510 super().__init__(pspec)
511 pspec = self.pspec
512 # small 32-entry Memory
513 if (hasattr(pspec, "dmem_test_depth") and
514 isinstance(pspec.dmem_test_depth, int)):
515 depth = pspec.dmem_test_depth
516 else:
517 depth = 32
518 print("TestSRAMBareLoadStoreUnit depth", depth)
519
520 self.mem = Memory(width=pspec.reg_wid, depth=depth)
521
522 def elaborate(self, platform):
523 m = super().elaborate(platform)
524 comb = m.d.comb
525 m.submodules.sram = sram = SRAM(memory=self.mem, granularity=8,
526 features={'cti', 'bte', 'err'})
527 dbus = self.dbus
528
529 # directly connect the wishbone bus of LoadStoreUnitInterface to SRAM
530 # note: SRAM is a target (slave), dbus is initiator (master)
531 fanouts = ['dat_w', 'sel', 'cyc', 'stb', 'we', 'cti', 'bte']
532 fanins = ['dat_r', 'ack', 'err']
533 for fanout in fanouts:
534 print("fanout", fanout, getattr(sram.bus, fanout).shape(),
535 getattr(dbus, fanout).shape())
536 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
537 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
538 for fanin in fanins:
539 comb += getattr(dbus, fanin).eq(getattr(sram.bus, fanin))
540 # connect address
541 comb += sram.bus.adr.eq(dbus.adr)
542
543 return m
544