500024aa5494249919cdd265d91c251966a983b0
[soc.git] / src / soc / fu / ldst / loadstore.py
1 """LoadStore1 FSM.
2
3 based on microwatt loadstore1.vhdl, but conforming to PortInterface.
4 unlike loadstore1.vhdl this does *not* deal with actual Load/Store
5 ops: that job is handled by LDSTCompUnit, which talks to LoadStore1
6 by way of PortInterface. PortInterface is where things need extending,
7 such as adding dcbz support, etc.
8
9 this module basically handles "pure" load / store operations, and
10 its first job is to ask the D-Cache for the data. if that fails,
11 the second task (if virtual memory is enabled) is to ask the MMU
12 to perform a TLB, then to go *back* to the cache and ask again.
13
14 Links:
15
16 * https://bugs.libre-soc.org/show_bug.cgi?id=465
17
18 """
19
20 from nmigen import (Elaboratable, Module, Signal, Shape, unsigned, Cat, Mux,
21 Record, Memory,
22 Const)
23 from nmutil.iocontrol import RecordObject
24 from nmutil.util import rising_edge, Display
25 from enum import Enum, unique
26
27 from soc.experiment.dcache import DCache
28 from soc.experiment.icache import ICache
29 from soc.experiment.pimem import PortInterfaceBase
30 from soc.experiment.mem_types import LoadStore1ToMMUType
31 from soc.experiment.mem_types import MMUToLoadStore1Type
32
33 from soc.minerva.wishbone import make_wb_layout
34 from soc.bus.sram import SRAM
35 from nmutil.util import Display
36
37
38 @unique
39 class State(Enum):
40 IDLE = 0 # ready for instruction
41 ACK_WAIT = 1 # waiting for ack from dcache
42 MMU_LOOKUP = 2 # waiting for MMU to look up translation
43 #SECOND_REQ = 3 # second request for unaligned transfer
44
45 @unique
46 class Misalign(Enum):
47 ONEWORD = 0 # only one word needed, all good
48 NEED2WORDS = 1 # need to send/receive two words
49 WAITFIRST = 2 # waiting for the first word
50 WAITSECOND = 3 # waiting for the second word
51
52
53 # captures the LDSTRequest from the PortInterface, which "blips" most
54 # of this at us (pipeline-style).
55 class LDSTRequest(RecordObject):
56 def __init__(self, name=None):
57 RecordObject.__init__(self, name=name)
58
59 self.load = Signal()
60 self.dcbz = Signal()
61 self.raddr = Signal(64)
62 # self.store_data = Signal(64) # this is already sync (on a delay)
63 self.byte_sel = Signal(16)
64 self.nc = Signal() # non-cacheable access
65 self.virt_mode = Signal()
66 self.priv_mode = Signal()
67 self.mode_32bit = Signal() # XXX UNUSED AT PRESENT
68 self.alignstate = Signal(Misalign) # progress of alignment request
69 self.align_intr = Signal()
70 # atomic (LR/SC reservation)
71 self.reserve = Signal()
72 self.atomic = Signal()
73 self.atomic_last = Signal()
74
75
76 # glue logic for microwatt mmu and dcache
77 class LoadStore1(PortInterfaceBase):
78 def __init__(self, pspec):
79 self.pspec = pspec
80 self.disable_cache = (hasattr(pspec, "disable_cache") and
81 pspec.disable_cache == True)
82 regwid = pspec.reg_wid
83 addrwid = pspec.addr_wid
84
85 super().__init__(regwid, addrwid)
86 self.dcache = DCache(pspec)
87 self.icache = ICache(pspec)
88 # these names are from the perspective of here (LoadStore1)
89 self.d_out = self.dcache.d_in # in to dcache is out for LoadStore
90 self.d_in = self.dcache.d_out # out from dcache is in for LoadStore
91 self.i_out = self.icache.i_in # in to icache is out for LoadStore
92 self.i_in = self.icache.i_out # out from icache is in for LoadStore
93 self.m_out = LoadStore1ToMMUType("m_out") # out *to* MMU
94 self.m_in = MMUToLoadStore1Type("m_in") # in *from* MMU
95 self.req = LDSTRequest(name="ldst_req")
96
97 # TODO, convert dcache wb_in/wb_out to "standard" nmigen Wishbone bus
98 self.dbus = Record(make_wb_layout(pspec))
99 self.ibus = Record(make_wb_layout(pspec))
100
101 # for creating a single clock blip to DCache
102 self.d_valid = Signal()
103 self.d_w_valid = Signal()
104 self.d_validblip = Signal()
105
106 # state info for LD/ST
107 self.done = Signal()
108 self.done_delay = Signal()
109 # latch most of the input request
110 self.load = Signal()
111 self.tlbie = Signal()
112 self.dcbz = Signal()
113 self.raddr = Signal(64)
114 self.maddr = Signal(64)
115 self.store_data = Signal(128) # 128-bit to cope with
116 self.load_data = Signal(128) # misalignment
117 self.load_data_delay = Signal(128) # perform 2 LD/STs
118 self.byte_sel = Signal(16) # also for misaligned, 16-bit
119 self.alignstate = Signal(Misalign) # progress of alignment request
120 #self.xerc : xer_common_t;
121 #self.rc = Signal()
122 self.nc = Signal() # non-cacheable access
123 self.mode_32bit = Signal() # XXX UNUSED AT PRESENT
124 self.state = Signal(State)
125 self.instr_fault = Signal() # indicator to request i-cache MMU lookup
126 self.r_instr_fault = Signal() # accessed in external_busy
127 self.priv_mode = Signal() # only for instruction fetch (not LDST)
128 self.align_intr = Signal()
129 self.busy = Signal()
130 self.wait_dcache = Signal()
131 self.wait_mmu = Signal()
132 self.lrsc_misalign = Signal()
133 #self.intr_vec : integer range 0 to 16#fff#;
134 #self.nia = Signal(64)
135 #self.srr1 = Signal(16)
136 # use these to set the dsisr or dar respectively
137 self.mmu_set_spr = Signal()
138 self.mmu_set_dsisr = Signal()
139 self.mmu_set_dar = Signal()
140 self.sprval_in = Signal(64)
141
142 # ONLY access these read-only, do NOT attempt to change
143 self.dsisr = Signal(32)
144 self.dar = Signal(64)
145
146 # when external_busy set, do not allow PortInterface to proceed
147 def external_busy(self, m):
148 return self.instr_fault | self.r_instr_fault
149
150 def set_wr_addr(self, m, addr, mask, misalign, msr, is_dcbz):
151 m.d.comb += self.req.load.eq(0) # store operation
152 m.d.comb += self.req.byte_sel.eq(mask)
153 m.d.comb += self.req.raddr.eq(addr)
154 m.d.comb += self.req.priv_mode.eq(~msr.pr) # not-problem ==> priv
155 m.d.comb += self.req.virt_mode.eq(msr.dr) # DR ==> virt
156 m.d.comb += self.req.mode_32bit.eq(~msr.sf) # not-sixty-four ==> 32bit
157 m.d.comb += self.req.dcbz.eq(is_dcbz)
158 with m.If(misalign):
159 m.d.comb += self.req.alignstate.eq(Misalign.NEED2WORDS)
160
161 # m.d.comb += Display("set_wr_addr %i dcbz %i",addr,is_dcbz)
162
163 # option to disable the cache entirely for write
164 if self.disable_cache:
165 m.d.comb += self.req.nc.eq(1)
166
167 # dcbz cannot do no-cache
168 with m.If(is_dcbz & self.req.nc):
169 m.d.comb += self.req.align_intr.eq(1)
170
171 # hmm, rather than add yet another argument to set_wr_addr
172 # read direct from PortInterface
173 m.d.comb += self.req.reserve.eq(self.pi.reserve) # atomic request
174 m.d.comb += self.req.atomic.eq(~self.lrsc_misalign)
175 m.d.comb += self.req.atomic_last.eq(~self.lrsc_misalign)
176
177 return None
178
179 def set_rd_addr(self, m, addr, mask, misalign, msr):
180 m.d.comb += self.d_valid.eq(1)
181 m.d.comb += self.req.load.eq(1) # load operation
182 m.d.comb += self.req.byte_sel.eq(mask)
183 m.d.comb += self.req.raddr.eq(addr)
184 m.d.comb += self.req.priv_mode.eq(~msr.pr) # not-problem ==> priv
185 m.d.comb += self.req.virt_mode.eq(msr.dr) # DR ==> virt
186 m.d.comb += self.req.mode_32bit.eq(~msr.sf) # not-sixty-four ==> 32bit
187 # BAD HACK! disable cacheing on LD when address is 0xCxxx_xxxx
188 # this is for peripherals. same thing done in Microwatt loadstore1.vhdl
189 with m.If(addr[28:] == Const(0xc, 4)):
190 m.d.comb += self.req.nc.eq(1)
191 # option to disable the cache entirely for read
192 if self.disable_cache:
193 m.d.comb += self.req.nc.eq(1)
194 with m.If(misalign):
195 m.d.comb += self.req.alignstate.eq(Misalign.NEED2WORDS)
196
197 # hmm, rather than add yet another argument to set_rd_addr
198 # read direct from PortInterface
199 m.d.comb += self.req.reserve.eq(self.pi.reserve) # atomic request
200 m.d.comb += self.req.atomic.eq(~self.lrsc_misalign)
201 m.d.comb += self.req.atomic_last.eq(~self.lrsc_misalign)
202
203 return None #FIXME return value
204
205 def set_wr_data(self, m, data, wen):
206 # do the "blip" on write data
207 m.d.comb += self.d_valid.eq(1)
208 # put data into comb which is picked up in main elaborate()
209 m.d.comb += self.d_w_valid.eq(1)
210 m.d.comb += self.store_data.eq(data)
211 st_ok = self.done # TODO indicates write data is valid
212 m.d.comb += self.pi.store_done.data.eq(self.d_in.store_done)
213 m.d.comb += self.pi.store_done.ok.eq(1)
214 return st_ok
215
216 def get_rd_data(self, m):
217 ld_ok = self.done_delay # indicates read data is valid
218 data = self.load_data_delay # actual read data
219 return data, ld_ok
220
221 def elaborate(self, platform):
222 m = super().elaborate(platform)
223 comb, sync = m.d.comb, m.d.sync
224
225 # microwatt takes one more cycle before next operation can be issued
226 sync += self.done_delay.eq(self.done)
227 #sync += self.load_data_delay[0:64].eq(self.load_data[0:64])
228
229 # create dcache and icache module
230 m.submodules.dcache = dcache = self.dcache
231 m.submodules.icache = icache = self.icache
232
233 # temp vars
234 d_out, d_in, dbus = self.d_out, self.d_in, self.dbus
235 i_out, i_in, ibus = self.i_out, self.i_in, self.ibus
236 m_out, m_in = self.m_out, self.m_in
237 exc = self.pi.exc_o
238 exception = exc.happened
239 mmureq = Signal()
240
241 # copy of address, but gets over-ridden for instr_fault
242 maddr = Signal(64)
243 m.d.comb += maddr.eq(self.raddr)
244
245 # check for LR/SC misalignment, used in set_rd/wr_addr above
246 comb += self.lrsc_misalign.eq(((self.pi.data_len[0:3]-1) &
247 self.req.raddr[0:3]).bool())
248 with m.If(self.lrsc_misalign & self.req.reserve):
249 m.d.comb += self.req.align_intr.eq(1)
250
251 # create a blip (single pulse) on valid read/write request
252 # this can be over-ridden in the FSM to get dcache to re-run
253 # a request when MMU_LOOKUP completes.
254 m.d.comb += self.d_validblip.eq(rising_edge(m, self.d_valid))
255 ldst_r = LDSTRequest("ldst_r")
256 sync += Display("MMUTEST: LoadStore1 d_in.error=%i",d_in.error)
257
258 # fsm skeleton
259 with m.Switch(self.state):
260 with m.Case(State.IDLE):
261 with m.If((self.d_validblip | self.instr_fault) &
262 ~exc.happened):
263 comb += self.busy.eq(1)
264 sync += self.state.eq(State.ACK_WAIT)
265 sync += ldst_r.eq(self.req) # copy of LDSTRequest on "blip"
266 # sync += Display("validblip self.req.virt_mode=%i",
267 # self.req.virt_mode)
268 with m.If(self.instr_fault):
269 comb += mmureq.eq(1)
270 sync += self.r_instr_fault.eq(1)
271 comb += maddr.eq(self.maddr)
272 sync += self.state.eq(State.MMU_LOOKUP)
273 with m.Else():
274 sync += self.r_instr_fault.eq(0)
275 # if the LD/ST requires two dwords, move to waiting
276 # for first word
277 with m.If(self.req.alignstate == Misalign.NEED2WORDS):
278 sync += ldst_r.alignstate.eq(Misalign.WAITFIRST)
279 with m.Else():
280 sync += ldst_r.eq(0)
281
282 # waiting for completion
283 with m.Case(State.ACK_WAIT):
284 sync += Display("MMUTEST: ACK_WAIT")
285 comb += self.busy.eq(~exc.happened)
286
287 with m.If(d_in.error):
288 # cache error is not necessarily "final", it could
289 # be that it was just a TLB miss
290 with m.If(d_in.cache_paradox):
291 comb += exception.eq(1)
292 sync += self.state.eq(State.IDLE)
293 sync += ldst_r.eq(0)
294 sync += Display("cache error -> update dsisr")
295 sync += self.dsisr[63 - 38].eq(~ldst_r.load)
296 # XXX there is no architected bit for this
297 # (probably should be a machine check in fact)
298 sync += self.dsisr[63 - 35].eq(d_in.cache_paradox)
299 sync += self.r_instr_fault.eq(0)
300
301 with m.Else():
302 # Look up the translation for TLB miss
303 # and also for permission error and RC error
304 # in case the PTE has been updated.
305 comb += mmureq.eq(1)
306 sync += self.state.eq(State.MMU_LOOKUP)
307 with m.If(d_in.valid):
308 with m.If(self.done):
309 sync += Display("ACK_WAIT, done %x", self.raddr)
310 with m.If(ldst_r.alignstate == Misalign.ONEWORD):
311 # done if there is only one dcache operation
312 sync += self.state.eq(State.IDLE)
313 sync += ldst_r.eq(0)
314 with m.If(ldst_r.load):
315 m.d.comb += self.load_data.eq(d_in.data)
316 sync += self.load_data_delay[0:64].eq(d_in.data)
317 m.d.comb += self.done.eq(~mmureq) # done if not MMU
318 with m.Elif(ldst_r.alignstate == Misalign.WAITFIRST):
319 # first LD done: load data, initiate 2nd request.
320 # leave in ACK_WAIT state
321 with m.If(ldst_r.load):
322 m.d.comb += self.load_data[0:63].eq(d_in.data)
323 sync += self.load_data_delay[0:64].eq(d_in.data)
324 # mmm kinda cheating, make a 2nd blip.
325 # use an aligned version of the address
326 addr_aligned, z3 = Signal(64), Const(0, 3)
327 comb += addr_aligned.eq(Cat(z3, ldst_r.raddr[3:]+1))
328 m.d.comb += self.d_validblip.eq(1)
329 comb += self.req.eq(ldst_r) # from copy of request
330 comb += self.req.raddr.eq(addr_aligned)
331 comb += self.req.byte_sel.eq(ldst_r.byte_sel[8:])
332 comb += self.req.alignstate.eq(Misalign.WAITSECOND)
333 sync += ldst_r.raddr.eq(addr_aligned)
334 sync += ldst_r.byte_sel.eq(ldst_r.byte_sel[8:])
335 sync += ldst_r.alignstate.eq(Misalign.WAITSECOND)
336 sync += Display(" second req %x", self.req.raddr)
337 with m.Elif(ldst_r.alignstate == Misalign.WAITSECOND):
338 sync += Display(" done second %x", d_in.data)
339 # done second load
340 sync += self.state.eq(State.IDLE)
341 sync += ldst_r.eq(0)
342 with m.If(ldst_r.load):
343 m.d.comb += self.load_data[64:128].eq(d_in.data)
344 sync += self.load_data_delay[64:128].eq(d_in.data)
345 m.d.comb += self.done.eq(~mmureq) # done if not MMU
346
347 # waiting here for the MMU TLB lookup to complete.
348 # either re-try the dcache lookup or throw MMU exception
349 with m.Case(State.MMU_LOOKUP):
350 comb += self.busy.eq(~exception)
351 with m.If(m_in.done):
352 with m.If(~self.r_instr_fault):
353 sync += Display("MMU_LOOKUP, done %x -> %x",
354 self.raddr, d_out.addr)
355 # retry the request now that the MMU has
356 # installed a TLB entry, if not exception raised
357 m.d.comb += self.d_out.valid.eq(~exception)
358 sync += self.state.eq(State.ACK_WAIT)
359 with m.Else():
360 sync += self.state.eq(State.IDLE)
361 sync += self.r_instr_fault.eq(0)
362 comb += self.done.eq(1)
363
364 with m.If(m_in.err):
365 # MMU RADIX exception thrown. XXX
366 # TODO: critical that the write here has to
367 # notify the MMU FSM of the change to dsisr
368 comb += exception.eq(1)
369 comb += self.done.eq(1)
370 sync += Display("MMU RADIX exception thrown")
371 sync += self.dsisr[63 - 33].eq(m_in.invalid)
372 sync += self.dsisr[63 - 36].eq(m_in.perm_error) # noexec
373 sync += self.dsisr[63 - 38].eq(~ldst_r.load)
374 sync += self.dsisr[63 - 44].eq(m_in.badtree)
375 sync += self.dsisr[63 - 45].eq(m_in.rc_error)
376 sync += self.state.eq(State.IDLE)
377 # exception thrown, clear out instruction fault state
378 sync += self.r_instr_fault.eq(0)
379
380 # MMU FSM communicating a request to update DSISR or DAR (OP_MTSPR)
381 with m.If(self.mmu_set_spr):
382 with m.If(self.mmu_set_dsisr):
383 sync += self.dsisr.eq(self.sprval_in)
384 with m.If(self.mmu_set_dar):
385 sync += self.dar.eq(self.sprval_in)
386
387 # hmmm, alignment occurs in set_rd_addr/set_wr_addr, note exception
388 with m.If(self.align_intr):
389 comb += exc.happened.eq(1)
390 # check for updating DAR
391 with m.If(exception):
392 sync += Display("exception %x", self.raddr)
393 # alignment error: store address in DAR
394 with m.If(self.align_intr):
395 sync += Display("alignment error: addr in DAR %x", self.raddr)
396 sync += self.dar.eq(self.raddr)
397 with m.Elif(~self.r_instr_fault):
398 sync += Display("not instr fault, addr in DAR %x", self.raddr)
399 sync += self.dar.eq(self.raddr)
400
401 # when done or exception, return to idle state
402 with m.If(self.done | exception):
403 sync += self.state.eq(State.IDLE)
404 comb += self.busy.eq(0)
405
406 # happened, alignment, instr_fault, invalid.
407 # note that all of these flow through - eventually to the TRAP
408 # pipeline, via PowerDecoder2.
409 comb += self.align_intr.eq(self.req.align_intr)
410 comb += exc.invalid.eq(m_in.invalid)
411 comb += exc.alignment.eq(self.align_intr)
412 comb += exc.instr_fault.eq(self.r_instr_fault)
413 # badtree, perm_error, rc_error, segment_fault
414 comb += exc.badtree.eq(m_in.badtree)
415 comb += exc.perm_error.eq(m_in.perm_error)
416 comb += exc.rc_error.eq(m_in.rc_error)
417 comb += exc.segment_fault.eq(m_in.segerr)
418 # conditions for 0x400 trap need these in SRR1
419 with m.If(exception & ~exc.alignment & exc.instr_fault):
420 comb += exc.srr1[14].eq(exc.invalid) # 47-33
421 comb += exc.srr1[12].eq(exc.perm_error) # 47-35
422 comb += exc.srr1[3].eq(exc.badtree) # 47-44
423 comb += exc.srr1[2].eq(exc.rc_error) # 47-45
424
425 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
426 comb += dbus.adr.eq(dcache.bus.adr)
427 comb += dbus.dat_w.eq(dcache.bus.dat_w)
428 comb += dbus.sel.eq(dcache.bus.sel)
429 comb += dbus.cyc.eq(dcache.bus.cyc)
430 comb += dbus.stb.eq(dcache.bus.stb)
431 comb += dbus.we.eq(dcache.bus.we)
432
433 comb += dcache.bus.dat_r.eq(dbus.dat_r)
434 comb += dcache.bus.ack.eq(dbus.ack)
435 if hasattr(dbus, "stall"):
436 comb += dcache.bus.stall.eq(dbus.stall)
437
438 # update out d data when flag set
439 with m.If(self.d_w_valid):
440 with m.If(ldst_r.alignstate == Misalign.WAITSECOND):
441 m.d.sync += d_out.data.eq(self.store_data[64:128])
442 with m.Else():
443 m.d.sync += d_out.data.eq(self.store_data[0:64])
444 #with m.Else():
445 # m.d.sync += d_out.data.eq(0)
446 # unit test passes with that change
447
448 # this must move into the FSM, conditionally noticing that
449 # the "blip" comes from self.d_validblip.
450 # task 1: look up in dcache
451 # task 2: if dcache fails, look up in MMU.
452 # do **NOT** confuse the two.
453 with m.If(self.d_validblip):
454 m.d.comb += self.d_out.valid.eq(~exc.happened)
455 m.d.comb += d_out.load.eq(self.req.load)
456 m.d.comb += d_out.byte_sel.eq(self.req.byte_sel)
457 m.d.comb += self.raddr.eq(self.req.raddr)
458 m.d.comb += d_out.nc.eq(self.req.nc)
459 m.d.comb += d_out.priv_mode.eq(self.req.priv_mode)
460 m.d.comb += d_out.virt_mode.eq(self.req.virt_mode)
461 m.d.comb += d_out.reserve.eq(self.req.reserve)
462 m.d.comb += d_out.atomic.eq(self.req.atomic)
463 m.d.comb += d_out.atomic_last.eq(self.req.atomic_last)
464 #m.d.comb += Display("validblip dcbz=%i addr=%x",
465 #self.req.dcbz,self.req.addr)
466 m.d.comb += d_out.dcbz.eq(self.req.dcbz)
467 with m.Else():
468 m.d.comb += d_out.load.eq(ldst_r.load)
469 m.d.comb += d_out.byte_sel.eq(ldst_r.byte_sel)
470 m.d.comb += self.raddr.eq(ldst_r.raddr)
471 m.d.comb += d_out.nc.eq(ldst_r.nc)
472 m.d.comb += d_out.priv_mode.eq(ldst_r.priv_mode)
473 m.d.comb += d_out.virt_mode.eq(ldst_r.virt_mode)
474 m.d.comb += d_out.reserve.eq(ldst_r.reserve)
475 m.d.comb += d_out.atomic.eq(ldst_r.atomic)
476 m.d.comb += d_out.atomic_last.eq(ldst_r.atomic_last)
477 #m.d.comb += Display("no_validblip dcbz=%i addr=%x",
478 #ldst_r.dcbz,ldst_r.addr)
479 m.d.comb += d_out.dcbz.eq(ldst_r.dcbz)
480 m.d.comb += d_out.addr.eq(self.raddr)
481
482 # Update outputs to MMU
483 m.d.comb += m_out.valid.eq(mmureq)
484 m.d.comb += m_out.iside.eq(self.instr_fault)
485 m.d.comb += m_out.load.eq(ldst_r.load)
486 with m.If(self.instr_fault):
487 m.d.comb += m_out.priv.eq(self.priv_mode)
488 with m.Else():
489 m.d.comb += m_out.priv.eq(ldst_r.priv_mode)
490 m.d.comb += m_out.tlbie.eq(self.tlbie)
491 # m_out.mtspr <= mmu_mtspr; # TODO
492 # m_out.sprn <= sprn; # TODO
493 m.d.comb += m_out.addr.eq(maddr)
494 # m_out.slbia <= l_in.insn(7); # TODO: no idea what this is
495 # m_out.rs <= l_in.data; # nope, probably not needed, TODO investigate
496
497 return m
498
499 def ports(self):
500 yield from super().ports()
501 # TODO: memory ports
502
503
504 class TestSRAMLoadStore1(LoadStore1):
505 def __init__(self, pspec):
506 super().__init__(pspec)
507 pspec = self.pspec
508 # small 32-entry Memory
509 if (hasattr(pspec, "dmem_test_depth") and
510 isinstance(pspec.dmem_test_depth, int)):
511 depth = pspec.dmem_test_depth
512 else:
513 depth = 32
514 print("TestSRAMBareLoadStoreUnit depth", depth)
515
516 self.mem = Memory(width=pspec.reg_wid, depth=depth)
517
518 def elaborate(self, platform):
519 m = super().elaborate(platform)
520 comb = m.d.comb
521 m.submodules.sram = sram = SRAM(memory=self.mem, granularity=8,
522 features={'cti', 'bte', 'err'})
523 dbus = self.dbus
524
525 # directly connect the wishbone bus of LoadStoreUnitInterface to SRAM
526 # note: SRAM is a target (slave), dbus is initiator (master)
527 fanouts = ['dat_w', 'sel', 'cyc', 'stb', 'we', 'cti', 'bte']
528 fanins = ['dat_r', 'ack', 'err']
529 for fanout in fanouts:
530 print("fanout", fanout, getattr(sram.bus, fanout).shape(),
531 getattr(dbus, fanout).shape())
532 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
533 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
534 for fanin in fanins:
535 comb += getattr(dbus, fanin).eq(getattr(sram.bus, fanin))
536 # connect address
537 comb += sram.bus.adr.eq(dbus.adr)
538
539 return m
540