e5815b16639d3d86e44be87f3a22201b06a9d931
[soc.git] / src / soc / fu / ldst / loadstore.py
1 """LoadStore1 FSM.
2
3 based on microwatt loadstore1.vhdl, but conforming to PortInterface.
4 unlike loadstore1.vhdl this does *not* deal with actual Load/Store
5 ops: that job is handled by LDSTCompUnit, which talks to LoadStore1
6 by way of PortInterface. PortInterface is where things need extending,
7 such as adding dcbz support, etc.
8
9 this module basically handles "pure" load / store operations, and
10 its first job is to ask the D-Cache for the data. if that fails,
11 the second task (if virtual memory is enabled) is to ask the MMU
12 to perform a TLB, then to go *back* to the cache and ask again.
13
14 Links:
15
16 * https://bugs.libre-soc.org/show_bug.cgi?id=465
17
18 """
19
20 from nmigen import (Elaboratable, Module, Signal, Shape, unsigned, Cat, Mux,
21 Record, Memory,
22 Const)
23 from nmutil.util import rising_edge
24 from enum import Enum, unique
25
26 from soc.experiment.dcache import DCache
27 from soc.experiment.pimem import PortInterfaceBase
28 from soc.experiment.mem_types import LoadStore1ToMMUType
29 from soc.experiment.mem_types import MMUToLoadStore1Type
30
31 from soc.minerva.wishbone import make_wb_layout
32 from soc.bus.sram import SRAM
33
34
35 @unique
36 class State(Enum):
37 IDLE = 0 # ready for instruction
38 ACK_WAIT = 1 # waiting for ack from dcache
39 MMU_LOOKUP = 2 # waiting for MMU to look up translation
40 TLBIE_WAIT = 3 # waiting for MMU to finish doing a tlbie
41
42
43 # glue logic for microwatt mmu and dcache
44 class LoadStore1(PortInterfaceBase):
45 def __init__(self, pspec):
46 self.pspec = pspec
47 self.disable_cache = (hasattr(pspec, "disable_cache") and
48 pspec.disable_cache == True)
49 regwid = pspec.reg_wid
50 addrwid = pspec.addr_wid
51
52 super().__init__(regwid, addrwid)
53 self.dcache = DCache()
54 # these names are from the perspective of here (LoadStore1)
55 self.d_out = self.dcache.d_in # in to dcache is out for LoadStore
56 self.d_in = self.dcache.d_out # out from dcache is in for LoadStore
57 self.m_out = LoadStore1ToMMUType() # out *to* MMU
58 self.m_in = MMUToLoadStore1Type() # in *from* MMU
59
60 # TODO, convert dcache wb_in/wb_out to "standard" nmigen Wishbone bus
61 self.dbus = Record(make_wb_layout(pspec))
62
63 # for creating a single clock blip to DCache
64 self.d_valid = Signal()
65 self.d_w_valid = Signal()
66 self.d_validblip = Signal()
67
68 # DSISR and DAR cached values. note that the MMU FSM is where
69 # these are accessed by OP_MTSPR/OP_MFSPR, on behalf of LoadStore1.
70 # by contrast microwatt has the spr set/get done *in* loadstore1.vhdl
71 self.dsisr = Signal(64)
72 self.dar = Signal(64)
73
74 # state info for LD/ST
75 self.done = Signal()
76 # latch most of the input request
77 self.load = Signal()
78 self.tlbie = Signal()
79 self.dcbz = Signal()
80 self.addr = Signal(64)
81 self.store_data = Signal(64)
82 self.load_data = Signal(64)
83 self.byte_sel = Signal(8)
84 self.update = Signal()
85 #self.xerc : xer_common_t;
86 #self.reserve = Signal()
87 #self.atomic = Signal()
88 #self.atomic_last = Signal()
89 #self.rc = Signal()
90 self.nc = Signal() # non-cacheable access
91 self.virt_mode = Signal()
92 self.priv_mode = Signal()
93 self.state = Signal(State)
94 self.instr_fault = Signal()
95 self.align_intr = Signal()
96 self.busy = Signal()
97 self.wait_dcache = Signal()
98 self.wait_mmu = Signal()
99 #self.mode_32bit = Signal()
100 self.wr_sel = Signal(2)
101 self.interrupt = Signal()
102 #self.intr_vec : integer range 0 to 16#fff#;
103 #self.nia = Signal(64)
104 #self.srr1 = Signal(16)
105
106 def set_wr_addr(self, m, addr, mask, misalign, msr_pr):
107 m.d.comb += self.load.eq(0) # store operation
108
109 m.d.comb += self.d_out.load.eq(0)
110 m.d.comb += self.byte_sel.eq(mask)
111 m.d.comb += self.addr.eq(addr)
112 m.d.comb += self.priv_mode.eq(~msr_pr) # not-problem ==> priv
113 m.d.comb += self.virt_mode.eq(msr_pr) # problem-state ==> virt
114 m.d.comb += self.align_intr.eq(misalign)
115 # option to disable the cache entirely for write
116 if self.disable_cache:
117 m.d.comb += self.nc.eq(1)
118 return None
119
120 def set_rd_addr(self, m, addr, mask, misalign, msr_pr):
121 m.d.comb += self.d_valid.eq(1)
122 m.d.comb += self.d_out.valid.eq(self.d_validblip)
123 m.d.comb += self.load.eq(1) # load operation
124 m.d.comb += self.d_out.load.eq(1)
125 m.d.comb += self.byte_sel.eq(mask)
126 m.d.comb += self.align_intr.eq(misalign)
127 m.d.comb += self.addr.eq(addr)
128 m.d.comb += self.priv_mode.eq(~msr_pr) # not-problem ==> priv
129 m.d.comb += self.virt_mode.eq(msr_pr) # problem-state ==> virt
130 # BAD HACK! disable cacheing on LD when address is 0xCxxx_xxxx
131 # this is for peripherals. same thing done in Microwatt loadstore1.vhdl
132 with m.If(addr[28:] == Const(0xc, 4)):
133 m.d.comb += self.nc.eq(1)
134 # option to disable the cache entirely for read
135 if self.disable_cache:
136 m.d.comb += self.nc.eq(1)
137 return None #FIXME return value
138
139 def set_wr_data(self, m, data, wen):
140 # do the "blip" on write data
141 m.d.comb += self.d_valid.eq(1)
142 m.d.comb += self.d_out.valid.eq(self.d_validblip)
143 # put data into comb which is picked up in main elaborate()
144 m.d.comb += self.d_w_valid.eq(1)
145 m.d.comb += self.store_data.eq(data)
146 #m.d.sync += self.d_out.byte_sel.eq(wen) # this might not be needed
147 st_ok = self.done # TODO indicates write data is valid
148 return st_ok
149
150 def get_rd_data(self, m):
151 ld_ok = self.done # indicates read data is valid
152 data = self.load_data # actual read data
153 return data, ld_ok
154
155 def elaborate(self, platform):
156 m = super().elaborate(platform)
157 comb, sync = m.d.comb, m.d.sync
158
159 # create dcache module
160 m.submodules.dcache = dcache = self.dcache
161
162 # temp vars
163 d_out, d_in, dbus = self.d_out, self.d_in, self.dbus
164 m_out, m_in = self.m_out, self.m_in
165 exc = self.pi.exc_o
166 exception = exc.happened
167 mmureq = Signal()
168
169 # copy of address, but gets over-ridden for OP_FETCH_FAILED
170 maddr = Signal(64)
171 m.d.comb += maddr.eq(self.addr)
172
173 # create a blip (single pulse) on valid read/write request
174 # this can be over-ridden in the FSM to get dcache to re-run
175 # a request when MMU_LOOKUP completes
176 m.d.comb += self.d_validblip.eq(rising_edge(m, self.d_valid))
177
178 # fsm skeleton
179 with m.Switch(self.state):
180 with m.Case(State.IDLE):
181 with m.If(self.d_validblip):
182 sync += self.state.eq(State.ACK_WAIT)
183
184 # waiting for completion
185 with m.Case(State.ACK_WAIT):
186
187 with m.If(d_in.error):
188 # cache error is not necessarily "final", it could
189 # be that it was just a TLB miss
190 with m.If(d_in.cache_paradox):
191 comb += exception.eq(1)
192 sync += self.state.eq(State.IDLE)
193 sync += self.dsisr[63 - 38].eq(~self.load)
194 # XXX there is no architected bit for this
195 # (probably should be a machine check in fact)
196 sync += self.dsisr[63 - 35].eq(d_in.cache_paradox)
197
198 with m.Else():
199 # Look up the translation for TLB miss
200 # and also for permission error and RC error
201 # in case the PTE has been updated.
202 comb += mmureq.eq(1)
203 sync += self.state.eq(State.MMU_LOOKUP)
204 with m.If(d_in.valid):
205 m.d.comb += self.done.eq(1)
206 sync += self.state.eq(State.IDLE)
207 with m.If(self.load):
208 m.d.comb += self.load_data.eq(d_in.data)
209
210 # waiting here for the MMU TLB lookup to complete.
211 # either re-try the dcache lookup or throw MMU exception
212 with m.Case(State.MMU_LOOKUP):
213 with m.If(m_in.done):
214 with m.If(~self.instr_fault):
215 # retry the request now that the MMU has
216 # installed a TLB entry
217 m.d.comb += self.d_validblip.eq(1) # re-run dcache req
218 sync += self.state.eq(State.ACK_WAIT)
219 with m.Else():
220 # instruction lookup fault: store address in DAR
221 comb += exc.happened.eq(1)
222 sync += self.dar.eq(self.addr)
223
224 with m.If(m_in.err):
225 # MMU RADIX exception thrown
226 comb += exception.eq(1)
227 sync += self.dsisr[63 - 33].eq(m_in.invalid)
228 sync += self.dsisr[63 - 36].eq(m_in.perm_error)
229 sync += self.dsisr[63 - 38].eq(self.load)
230 sync += self.dsisr[63 - 44].eq(m_in.badtree)
231 sync += self.dsisr[63 - 45].eq(m_in.rc_error)
232
233 with m.Case(State.TLBIE_WAIT):
234 pass
235
236 # alignment error: store address in DAR
237 with m.If(self.align_intr):
238 comb += exc.happened.eq(1)
239 sync += self.dar.eq(self.addr)
240
241 # happened, alignment, instr_fault, invalid.
242 # note that all of these flow through - eventually to the TRAP
243 # pipeline, via PowerDecoder2.
244 comb += exc.invalid.eq(m_in.invalid)
245 comb += exc.alignment.eq(self.align_intr)
246 comb += exc.instr_fault.eq(self.instr_fault)
247 # badtree, perm_error, rc_error, segment_fault
248 comb += exc.badtree.eq(m_in.badtree)
249 comb += exc.perm_error.eq(m_in.perm_error)
250 comb += exc.rc_error.eq(m_in.rc_error)
251 comb += exc.segment_fault.eq(m_in.segerr)
252
253 # TODO, connect dcache wb_in/wb_out to "standard" nmigen Wishbone bus
254 comb += dbus.adr.eq(dcache.wb_out.adr)
255 comb += dbus.dat_w.eq(dcache.wb_out.dat)
256 comb += dbus.sel.eq(dcache.wb_out.sel)
257 comb += dbus.cyc.eq(dcache.wb_out.cyc)
258 comb += dbus.stb.eq(dcache.wb_out.stb)
259 comb += dbus.we.eq(dcache.wb_out.we)
260
261 comb += dcache.wb_in.dat.eq(dbus.dat_r)
262 comb += dcache.wb_in.ack.eq(dbus.ack)
263 if hasattr(dbus, "stall"):
264 comb += dcache.wb_in.stall.eq(dbus.stall)
265
266 # write out d data only when flag set
267 with m.If(self.d_w_valid):
268 m.d.sync += d_out.data.eq(self.store_data)
269 with m.Else():
270 m.d.sync += d_out.data.eq(0)
271
272 # this must move into the FSM, conditionally noticing that
273 # the "blip" comes from self.d_validblip.
274 # task 1: look up in dcache
275 # task 2: if dcache fails, look up in MMU.
276 # do **NOT** confuse the two.
277 m.d.comb += d_out.load.eq(self.load)
278 m.d.comb += d_out.byte_sel.eq(self.byte_sel)
279 m.d.comb += d_out.addr.eq(self.addr)
280 m.d.comb += d_out.nc.eq(self.nc)
281 m.d.comb += d_out.priv_mode.eq(self.priv_mode)
282 m.d.comb += d_out.virt_mode.eq(self.virt_mode)
283
284 # XXX these should be possible to remove but for some reason
285 # cannot be... yet. TODO, investigate
286 m.d.comb += self.done.eq(d_in.valid)
287 m.d.comb += self.load_data.eq(d_in.data)
288
289 # Update outputs to MMU
290 m.d.comb += m_out.valid.eq(mmureq)
291 m.d.comb += m_out.iside.eq(self.instr_fault)
292 m.d.comb += m_out.load.eq(self.load)
293 # m_out.priv <= r.priv_mode; TODO
294 m.d.comb += m_out.tlbie.eq(self.tlbie)
295 # m_out.mtspr <= mmu_mtspr; # TODO
296 # m_out.sprn <= sprn; # TODO
297 m.d.comb += m_out.addr.eq(maddr)
298 # m_out.slbia <= l_in.insn(7); # TODO: no idea what this is
299 # m_out.rs <= l_in.data; # nope, probably not needed, TODO investigate
300
301 return m
302
303 def ports(self):
304 yield from super().ports()
305 # TODO: memory ports
306
307
308 class TestSRAMLoadStore1(LoadStore1):
309 def __init__(self, pspec):
310 super().__init__(pspec)
311 pspec = self.pspec
312 # small 32-entry Memory
313 if (hasattr(pspec, "dmem_test_depth") and
314 isinstance(pspec.dmem_test_depth, int)):
315 depth = pspec.dmem_test_depth
316 else:
317 depth = 32
318 print("TestSRAMBareLoadStoreUnit depth", depth)
319
320 self.mem = Memory(width=pspec.reg_wid, depth=depth)
321
322 def elaborate(self, platform):
323 m = super().elaborate(platform)
324 comb = m.d.comb
325 m.submodules.sram = sram = SRAM(memory=self.mem, granularity=8,
326 features={'cti', 'bte', 'err'})
327 dbus = self.dbus
328
329 # directly connect the wishbone bus of LoadStoreUnitInterface to SRAM
330 # note: SRAM is a target (slave), dbus is initiator (master)
331 fanouts = ['dat_w', 'sel', 'cyc', 'stb', 'we', 'cti', 'bte']
332 fanins = ['dat_r', 'ack', 'err']
333 for fanout in fanouts:
334 print("fanout", fanout, getattr(sram.bus, fanout).shape(),
335 getattr(dbus, fanout).shape())
336 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
337 comb += getattr(sram.bus, fanout).eq(getattr(dbus, fanout))
338 for fanin in fanins:
339 comb += getattr(dbus, fanin).eq(getattr(sram.bus, fanin))
340 # connect address
341 comb += sram.bus.adr.eq(dbus.adr)
342
343 return m
344