create PGTBL Record and use it in MMU page_table_idle
[soc.git] / src / soc / experiment / mmu.py
1 # MMU
2 #
3 # License for original copyright mmu.vhdl by microwatt authors: CC4
4 # License for copyrighted modifications made in mmu.py: LGPLv3+
5 #
6 # This derivative work although includes CC4 licensed material is
7 # covered by the LGPLv3+
8
9 """MMU
10
11 based on Anton Blanchard microwatt mmu.vhdl
12
13 """
14 from enum import Enum, unique
15 from nmigen import (C, Module, Signal, Elaboratable, Mux, Cat, Repl, Signal)
16 from nmigen.cli import main
17 from nmigen.cli import rtlil
18 from nmutil.iocontrol import RecordObject
19 from nmutil.byterev import byte_reverse
20 from nmutil.mask import Mask, masked
21 from nmutil.util import Display
22
23 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
24 # Also, check out the cxxsim nmigen branch, and latest yosys from git
25 from nmutil.sim_tmp_alternative import Simulator, Settle
26
27 from nmutil.util import wrap
28
29 from soc.experiment.mem_types import (LoadStore1ToMMUType,
30 MMUToLoadStore1Type,
31 MMUToDCacheType,
32 DCacheToMMUType,
33 MMUToICacheType)
34
35 # Radix Tree Page Table Entry Record, TODO put this somewhere sensible
36 # v3.0C Book III p1016 section 7.7.10.2
37 class RTPTE(RecordObject):
38 def __init__(self, name=None):
39 super().__init__(name=name)
40 self.eaa = Signal(4) # Encoded Access Auth bits 60:63 LSB0 0:3
41 self.att = Signal(2) # Attributes bits 58:59 LSB0 4:5
42 self.rs1 = Signal(1) # Reserved bit 57 LSB0 6
43 self.c = Signal(1) # Change bit 56 LSB0 7
44 self.r = Signal(1) # Reference bit 55 LSB0 8
45 self.sw = Signal(3) # SW bits 1:3 bits 52:54 LSB0 9:11
46 self.rpn = Signal(45) # Real Page Number bits 7:51 LSB0 12:56
47 self.rs2 = Signal(4) # Reserved bit 3:6 LSB0 57-60
48 self.sw0 = Signal(1) # SW bit 0 bit 2 LSB0 61
49 self.leaf = Signal(1) # leaf bit 1 LSB0 62
50 self.valid = Signal(1) # valid bit 0 LSB0 63
51
52 # and these... which of course are turned round to LSB0 order.
53 # TODO: sigh. use botchify and put them in openpower.consts
54 EAA_PRIV = 3 # bit 0 (in MSB0) set ==> problem-state banned (priv=1 only)
55 EAA_RD = 2 # bit 1 (in MSB0) set ==> loads are permitted
56 EAA_WR = 1 # bit 2 (in MSB0) set ==> load and stores permitted
57 EAA_EXE = 0 # bit 3 (in MSB0) set ==> execute permitted
58
59 # for debugging
60 display_invalid = True
61
62 @unique
63 class State(Enum):
64 IDLE = 0 # zero is default on reset for r.state
65 DO_TLBIE = 1
66 TLB_WAIT = 2
67 PROC_TBL_READ = 3
68 PROC_TBL_WAIT = 4
69 SEGMENT_CHECK = 5
70 RADIX_LOOKUP = 6
71 RADIX_READ_WAIT = 7
72 RADIX_LOAD_TLB = 8
73 RADIX_FINISH = 9
74
75
76 class RegStage(RecordObject):
77 def __init__(self, name=None):
78 super().__init__(name=name)
79 # latched request from loadstore1
80 self.valid = Signal()
81 self.iside = Signal()
82 self.store = Signal()
83 self.priv = Signal()
84 self.addr = Signal(64)
85 self.inval_all = Signal()
86 # config SPRs
87 self.prtbl = Signal(64)
88 self.pid = Signal(32)
89 # internal state
90 self.state = Signal(State) # resets to IDLE
91 self.done = Signal()
92 self.err = Signal()
93 self.pgtbl0 = Signal(64)
94 self.pt0_valid = Signal()
95 self.pgtbl3 = Signal(64)
96 self.pt3_valid = Signal()
97 self.shift = Signal(6)
98 self.mask_size = Signal(5)
99 self.pgbase = Signal(56)
100 self.pde = Signal(64)
101 self.invalid = Signal()
102 self.badtree = Signal()
103 self.segerror = Signal()
104 self.perm_err = Signal()
105 self.rc_error = Signal()
106
107
108 # Page Table Record - note that HR bit is treated as part of rts below
109 # v3.0C Book III Section 6.7.6.1 p1003
110 class PGTBL(RecordObject):
111 def __init__(self, name=None):
112 super().__init__(name=name)
113 self.rpds = Signal(5) # Root Page Directory Size 59:63 LSB0 0:4
114 self.rts2 = Signal(3) # Radix Tree Size part 2 56:58 LSB0 5:7
115 self.rpdb = Signal(52) # Root Page Directory Base 4:55 LSB0 8:59
116 self.s = Signal(1) # Host Secure 3 LSB0 60
117 self.rts1 = Signal(2) # Radix Tree Size part 1 1:2 LSB0 61:62
118 self.hr = Signal(1) # Host Radix 0 LSB0 63
119
120
121 class MMU(Elaboratable):
122 """Radix MMU
123
124 Supports 4-level trees as in arch 3.0B, but not the
125 two-step translation for guests under a hypervisor
126 (i.e. there is no gRA -> hRA translation).
127 """
128 def __init__(self):
129 self.l_in = LoadStore1ToMMUType("l_in")
130 self.l_out = MMUToLoadStore1Type("l_out")
131 self.d_out = MMUToDCacheType("d_out")
132 self.d_in = DCacheToMMUType("d_in")
133 self.i_out = MMUToICacheType("i_out")
134
135 def radix_tree_idle(self, m, l_in, r, v):
136 comb = m.d.comb
137 sync = m.d.sync
138
139 pt_valid = Signal()
140 pgtbl = PGTBL("pgtbl")
141 rts = Signal(6)
142 mbits = Signal(6)
143
144 with m.If(~l_in.addr[63]):
145 comb += pgtbl.eq(r.pgtbl0)
146 comb += pt_valid.eq(r.pt0_valid)
147 with m.Else():
148 comb += pgtbl.eq(r.pgtbl3)
149 comb += pt_valid.eq(r.pt3_valid)
150
151 # rts == radix tree size, number of address bits
152 # being translated. takes bits 5:7 and 61:63
153 comb += rts.eq(Cat(pgtbl.rts2, pgtbl.rts1, pgtbl.hr))
154
155 # mbits == number of address bits to index top
156 # level of tree. takes bits 0:4
157 comb += mbits.eq(pgtbl.rpds)
158
159 # set v.shift to rts so that we can use finalmask
160 # for the segment check.
161 # note: rpdb (52 bits long) is truncated to 48 bits
162 comb += v.shift.eq(rts)
163 comb += v.mask_size.eq(mbits[0:5])
164 comb += v.pgbase.eq(Cat(C(0, 8), pgtbl.rpdb[:48])) # bits 8:55
165
166 with m.If(l_in.valid):
167 comb += v.addr.eq(l_in.addr)
168 comb += v.iside.eq(l_in.iside)
169 comb += v.store.eq(~(l_in.load | l_in.iside))
170 comb += v.priv.eq(l_in.priv)
171
172 comb += Display("state %d l_in.valid addr %x iside %d store %d "
173 "rts %x mbits %x pt_valid %d",
174 v.state, v.addr, v.iside, v.store,
175 rts, mbits, pt_valid)
176
177 with m.If(l_in.tlbie):
178 # Invalidate all iTLB/dTLB entries for
179 # tlbie with RB[IS] != 0 or RB[AP] != 0,
180 # or for slbia
181 comb += v.inval_all.eq(l_in.slbia
182 | l_in.addr[11]
183 | l_in.addr[10]
184 | l_in.addr[7]
185 | l_in.addr[6]
186 | l_in.addr[5]
187 )
188 # The RIC field of the tlbie instruction
189 # comes across on the sprn bus as bits 2--3.
190 # RIC=2 flushes process table caches.
191 with m.If(l_in.sprn[3]):
192 comb += v.pt0_valid.eq(0)
193 comb += v.pt3_valid.eq(0)
194 comb += v.state.eq(State.DO_TLBIE)
195 with m.Else():
196 comb += v.valid.eq(1)
197 with m.If(~pt_valid):
198 # need to fetch process table entry
199 # set v.shift so we can use finalmask
200 # for generating the process table
201 # entry address
202 comb += v.shift.eq(r.prtbl[0:5])
203 comb += v.state.eq(State.PROC_TBL_READ)
204
205 with m.Elif(mbits == 0):
206 # Use RPDS = 0 to disable radix tree walks
207 comb += v.state.eq(State.RADIX_FINISH)
208 comb += v.invalid.eq(1)
209 if(display_invalid):
210 sync += Display("MMUBUG: Use RPDS = 0 to disable"
211 " radix tree walks")
212 with m.Else():
213 comb += v.state.eq(State.SEGMENT_CHECK)
214
215 with m.If(l_in.mtspr):
216 # Move to PID needs to invalidate L1 TLBs
217 # and cached pgtbl0 value. Move to PRTBL
218 # does that plus invalidating the cached
219 # pgtbl3 value as well.
220 with m.If(~l_in.sprn[9]):
221 comb += v.pid.eq(l_in.rs[0:32])
222 with m.Else():
223 comb += v.prtbl.eq(l_in.rs)
224 comb += v.pt3_valid.eq(0)
225
226 comb += v.pt0_valid.eq(0)
227 comb += v.inval_all.eq(1)
228 comb += v.state.eq(State.DO_TLBIE)
229
230 def proc_tbl_wait(self, m, v, r, data):
231 comb = m.d.comb
232 with m.If(r.addr[63]):
233 comb += v.pgtbl3.eq(data)
234 comb += v.pt3_valid.eq(1)
235 with m.Else():
236 comb += v.pgtbl0.eq(data)
237 comb += v.pt0_valid.eq(1)
238
239 rts = Signal(6)
240 mbits = Signal(6)
241
242 # rts == radix tree size, # address bits being translated
243 comb += rts.eq(Cat(data[5:8], data[61:63]))
244
245 # mbits == # address bits to index top level of tree
246 comb += mbits.eq(data[0:5])
247
248 # set v.shift to rts so that we can use finalmask for the segment check
249 comb += v.shift.eq(rts)
250 comb += v.mask_size.eq(mbits[0:5])
251 comb += v.pgbase.eq(Cat(C(0, 8), data[8:56]))
252
253 with m.If(mbits):
254 comb += v.state.eq(State.SEGMENT_CHECK)
255 with m.Else():
256 comb += v.state.eq(State.RADIX_FINISH)
257 comb += v.invalid.eq(1)
258 if(display_invalid): m.d.sync += Display("MMUBUG: mbits is invalid")
259
260 def radix_read_wait(self, m, v, r, d_in, data):
261 comb = m.d.comb
262 sync = m.d.sync
263
264 rpte = RTPTE(name="radix_rpte")
265
266 perm_ok = Signal()
267 rc_ok = Signal()
268 mbits = Signal(6)
269 valid = rpte.valid
270 eaa = rpte.eaa
271 leaf = rpte.leaf
272 badtree = Signal()
273
274 comb += Display("RDW %016x done %d "
275 "perm %d rc %d mbits %d shf %d "
276 "valid %d leaf %d bad %d",
277 data, d_in.done, perm_ok, rc_ok,
278 mbits, r.shift, valid, leaf, badtree)
279
280 # set pde and interpret as Radix Tree Page Table Entry (leaf=1 case)
281 comb += v.pde.eq(data)
282 comb += rpte.eq(data)
283
284 # valid & leaf
285 with m.If(valid):
286 with m.If(leaf):
287 # check permissions and RC bits
288 with m.If(r.priv | ~eaa[EAA_PRIV]):
289 with m.If(~r.iside):
290 comb += perm_ok.eq(eaa[EAA_WR] |
291 (eaa[EAA_RD] & ~r.store))
292 with m.Else():
293 # no IAMR, so no KUEP support for now
294 # deny execute permission if cache inhibited
295 comb += perm_ok.eq(eaa[EAA_EXE] & ~rpte.att[1])
296
297 comb += rc_ok.eq(rpte.r & (rpte.c | ~r.store))
298 with m.If(perm_ok & rc_ok):
299 comb += v.state.eq(State.RADIX_LOAD_TLB)
300 with m.Else():
301 comb += v.state.eq(State.RADIX_FINISH)
302 comb += v.perm_err.eq(~perm_ok)
303 # permission error takes precedence over RC error
304 comb += v.rc_error.eq(perm_ok)
305
306 # valid & !leaf
307 with m.Else():
308 comb += mbits.eq(data[0:5])
309 comb += badtree.eq((mbits < 5) |
310 (mbits > 16) |
311 (mbits > r.shift))
312 with m.If(badtree):
313 comb += v.state.eq(State.RADIX_FINISH)
314 comb += v.badtree.eq(1)
315 with m.Else():
316 comb += v.shift.eq(r.shift - mbits)
317 comb += v.mask_size.eq(mbits[0:5])
318 comb += v.pgbase.eq(Cat(C(0, 8), data[8:56]))
319 comb += v.state.eq(State.RADIX_LOOKUP)
320
321 with m.Else():
322 # non-present PTE, generate a DSI
323 comb += v.state.eq(State.RADIX_FINISH)
324 comb += v.invalid.eq(1)
325 if(display_invalid):
326 sync += Display("MMUBUG: non-present PTE, generate a DSI")
327
328 def segment_check(self, m, v, r, data, finalmask):
329 comb = m.d.comb
330
331 mbits = Signal(6)
332 nonzero = Signal()
333 comb += mbits.eq(r.mask_size)
334 comb += v.shift.eq(r.shift + (31 - 12) - mbits)
335 comb += nonzero.eq((r.addr[31:62] & ~finalmask[0:31]).bool())
336 with m.If((r.addr[63] ^ r.addr[62]) | nonzero):
337 comb += v.state.eq(State.RADIX_FINISH)
338 comb += v.segerror.eq(1)
339 with m.Elif((mbits < 5) | (mbits > 16) |
340 (mbits > (r.shift + (31-12)))):
341 comb += v.state.eq(State.RADIX_FINISH)
342 comb += v.badtree.eq(1)
343 with m.Else():
344 comb += v.state.eq(State.RADIX_LOOKUP)
345
346 def mmu_0(self, m, r, rin, l_in, l_out, d_out, addrsh, mask):
347 comb = m.d.comb
348 sync = m.d.sync
349
350 # Multiplex internal SPR values back to loadstore1,
351 # selected by l_in.sprn.
352 with m.If(l_in.sprn[9]):
353 comb += l_out.sprval.eq(r.prtbl)
354 with m.Else():
355 comb += l_out.sprval.eq(r.pid)
356
357 with m.If(rin.valid):
358 sync += Display("MMU got tlb miss for %x", rin.addr)
359
360 with m.If(l_out.done):
361 sync += Display("MMU completing op without error")
362
363 with m.If(l_out.err):
364 sync += Display("MMU completing op with err invalid="
365 "%d badtree=%d", l_out.invalid, l_out.badtree)
366
367 with m.If(rin.state == State.RADIX_LOOKUP):
368 sync += Display ("radix lookup shift=%d msize=%d",
369 rin.shift, rin.mask_size)
370
371 with m.If(r.state == State.RADIX_LOOKUP):
372 sync += Display(f"send load addr=%x addrsh=%d mask=%x",
373 d_out.addr, addrsh, mask)
374 sync += r.eq(rin)
375
376 def elaborate(self, platform):
377 m = Module()
378
379 comb = m.d.comb
380 sync = m.d.sync
381
382 addrsh = Signal(16)
383 mask = Signal(16)
384 finalmask = Signal(44)
385
386 self.rin = rin = RegStage("r_in")
387 r = RegStage("r")
388
389 # get access to prtbl and pid for debug / testing purposes ONLY
390 # (actually, not needed, because setup_regs() triggers mmu direct)
391 # self._prtbl = r.prtbl
392 # self._pid = r.pid
393
394 l_in = self.l_in
395 l_out = self.l_out
396 d_out = self.d_out
397 d_in = self.d_in
398 i_out = self.i_out
399
400 self.mmu_0(m, r, rin, l_in, l_out, d_out, addrsh, mask)
401
402 v = RegStage()
403 dcreq = Signal()
404 tlb_load = Signal()
405 itlb_load = Signal()
406 tlbie_req = Signal()
407 prtbl_rd = Signal()
408 effpid = Signal(32)
409 prtb_adr = Signal(64)
410 pgtb_adr = Signal(64)
411 pte = Signal(64)
412 tlb_data = Signal(64)
413 addr = Signal(64)
414
415 comb += v.eq(r)
416 comb += v.valid.eq(0)
417 comb += dcreq.eq(0)
418 comb += v.done.eq(0)
419 comb += v.err.eq(0)
420 comb += v.invalid.eq(0)
421 comb += v.badtree.eq(0)
422 comb += v.segerror.eq(0)
423 comb += v.perm_err.eq(0)
424 comb += v.rc_error.eq(0)
425 comb += tlb_load.eq(0)
426 comb += itlb_load.eq(0)
427 comb += tlbie_req.eq(0)
428 comb += v.inval_all.eq(0)
429 comb += prtbl_rd.eq(0)
430
431 # Radix tree data structures in memory are
432 # big-endian, so we need to byte-swap them
433 data = byte_reverse(m, "data", d_in.data, 8)
434
435 # generate mask for extracting address fields for PTE addr generation
436 m.submodules.pte_mask = pte_mask = Mask(16-5)
437 comb += pte_mask.shift.eq(r.mask_size - 5)
438 comb += mask.eq(Cat(C(0x1f, 5), pte_mask.mask))
439
440 # generate mask for extracting address bits to go in
441 # TLB entry in order to support pages > 4kB
442 m.submodules.tlb_mask = tlb_mask = Mask(44)
443 comb += tlb_mask.shift.eq(r.shift)
444 comb += finalmask.eq(tlb_mask.mask)
445
446 with m.If(r.state != State.IDLE):
447 sync += Display("MMU state %d %016x", r.state, data)
448
449 ##########
450 # Main FSM
451 ##########
452
453 with m.Switch(r.state):
454 with m.Case(State.IDLE):
455 self.radix_tree_idle(m, l_in, r, v)
456
457 with m.Case(State.DO_TLBIE):
458 comb += dcreq.eq(1)
459 comb += tlbie_req.eq(1)
460 comb += v.state.eq(State.TLB_WAIT)
461
462 with m.Case(State.TLB_WAIT):
463 with m.If(d_in.done):
464 comb += v.state.eq(State.RADIX_FINISH)
465
466 with m.Case(State.PROC_TBL_READ):
467 sync += Display(" TBL_READ %016x", prtb_adr)
468 comb += dcreq.eq(1)
469 comb += prtbl_rd.eq(1)
470 comb += v.state.eq(State.PROC_TBL_WAIT)
471
472 with m.Case(State.PROC_TBL_WAIT):
473 with m.If(d_in.done):
474 self.proc_tbl_wait(m, v, r, data)
475
476 with m.If(d_in.err):
477 comb += v.state.eq(State.RADIX_FINISH)
478 comb += v.badtree.eq(1)
479
480 with m.Case(State.SEGMENT_CHECK):
481 self.segment_check(m, v, r, data, finalmask)
482
483 with m.Case(State.RADIX_LOOKUP):
484 sync += Display(" RADIX_LOOKUP")
485 comb += dcreq.eq(1)
486 comb += v.state.eq(State.RADIX_READ_WAIT)
487
488 with m.Case(State.RADIX_READ_WAIT):
489 sync += Display(" READ_WAIT")
490 with m.If(d_in.done):
491 self.radix_read_wait(m, v, r, d_in, data)
492 with m.If(d_in.err):
493 comb += v.state.eq(State.RADIX_FINISH)
494 comb += v.badtree.eq(1)
495
496 with m.Case(State.RADIX_LOAD_TLB):
497 comb += tlb_load.eq(1)
498 with m.If(~r.iside):
499 comb += dcreq.eq(1)
500 comb += v.state.eq(State.TLB_WAIT)
501 with m.Else():
502 comb += itlb_load.eq(1)
503 comb += v.state.eq(State.IDLE)
504
505 with m.Case(State.RADIX_FINISH):
506 sync += Display(" RADIX_FINISH")
507 comb += v.state.eq(State.IDLE)
508
509 with m.If((v.state == State.RADIX_FINISH) |
510 ((v.state == State.RADIX_LOAD_TLB) & r.iside)):
511 comb += v.err.eq(v.invalid | v.badtree | v.segerror
512 | v.perm_err | v.rc_error)
513 comb += v.done.eq(~v.err)
514
515 with m.If(~r.addr[63]):
516 comb += effpid.eq(r.pid)
517
518 # calculate Process Table Address
519 pr24 = Signal(24, reset_less=True)
520 comb += pr24.eq(masked(r.prtbl[12:36], effpid[8:32], finalmask))
521 comb += prtb_adr.eq(Cat(C(0, 4), effpid[0:8], pr24, r.prtbl[36:56]))
522
523 # calculate Page Table Address
524 pg16 = Signal(16, reset_less=True)
525 comb += pg16.eq(masked(r.pgbase[3:19], addrsh, mask))
526 comb += pgtb_adr.eq(Cat(C(0, 3), pg16, r.pgbase[19:56]))
527
528 # calculate Page Table Entry from Real Page Number (leaf=1, RTPTE)
529 rpte = RTPTE(name="rpte")
530 comb += rpte.eq(r.pde)
531 pd44 = Signal(44, reset_less=True)
532 comb += pd44.eq(masked(rpte.rpn, r.addr[12:56], finalmask))
533 comb += pte.eq(Cat(r.pde[0:12], pd44))
534
535 # update registers
536 comb += rin.eq(v)
537
538 # drive outputs
539 with m.If(tlbie_req):
540 comb += addr.eq(r.addr)
541 with m.Elif(tlb_load):
542 comb += addr.eq(Cat(C(0, 12), r.addr[12:64]))
543 comb += tlb_data.eq(pte)
544 with m.Elif(prtbl_rd):
545 comb += addr.eq(prtb_adr)
546 with m.Else():
547 comb += addr.eq(pgtb_adr)
548
549 # connect to other interfaces: LDST, D-Cache, I-Cache
550 comb += l_out.done.eq(r.done)
551 comb += l_out.err.eq(r.err)
552 comb += l_out.invalid.eq(r.invalid)
553 comb += l_out.badtree.eq(r.badtree)
554 comb += l_out.segerr.eq(r.segerror)
555 comb += l_out.perm_error.eq(r.perm_err)
556 comb += l_out.rc_error.eq(r.rc_error)
557
558 comb += d_out.valid.eq(dcreq)
559 comb += d_out.tlbie.eq(tlbie_req)
560 comb += d_out.doall.eq(r.inval_all)
561 comb += d_out.tlbld.eq(tlb_load)
562 comb += d_out.addr.eq(addr)
563 comb += d_out.pte.eq(tlb_data)
564
565 comb += i_out.tlbld.eq(itlb_load)
566 comb += i_out.tlbie.eq(tlbie_req)
567 comb += i_out.doall.eq(r.inval_all)
568 comb += i_out.addr.eq(addr)
569 comb += i_out.pte.eq(tlb_data)
570
571 return m
572
573 stop = False
574
575 def dcache_get(dut):
576 """simulator process for getting memory load requests
577 """
578
579 global stop
580
581 def b(x):
582 return int.from_bytes(x.to_bytes(8, byteorder='little'),
583 byteorder='big', signed=False)
584
585 mem = {0x0: 0x000000, # to get mtspr prtbl working
586
587 0x10000: # PARTITION_TABLE_2
588 # PATB_GR=1 PRTB=0x1000 PRTS=0xb
589 b(0x800000000100000b),
590
591 0x30000: # RADIX_ROOT_PTE
592 # V = 1 L = 0 NLB = 0x400 NLS = 9
593 b(0x8000000000040009),
594
595 0x40000: # RADIX_SECOND_LEVEL
596 # V = 1 L = 1 SW = 0 RPN = 0
597 # R = 1 C = 1 ATT = 0 EAA 0x7
598 b(0xc000000000000187),
599
600 0x1000000: # PROCESS_TABLE_3
601 # RTS1 = 0x2 RPDB = 0x300 RTS2 = 0x5 RPDS = 13
602 b(0x40000000000300ad),
603 }
604
605 while not stop:
606 while True: # wait for dc_valid
607 if stop:
608 return
609 dc_valid = yield (dut.d_out.valid)
610 if dc_valid:
611 break
612 yield
613 addr = yield dut.d_out.addr
614 if addr not in mem:
615 print (" DCACHE LOOKUP FAIL %x" % (addr))
616 stop = True
617 return
618
619 yield
620 data = mem[addr]
621 yield dut.d_in.data.eq(data)
622 print (" DCACHE GET %x data %x" % (addr, data))
623 yield dut.d_in.done.eq(1)
624 yield
625 yield dut.d_in.done.eq(0)
626
627 def mmu_wait(dut):
628 global stop
629 while not stop: # wait for dc_valid / err
630 l_done = yield (dut.l_out.done)
631 l_err = yield (dut.l_out.err)
632 l_badtree = yield (dut.l_out.badtree)
633 l_permerr = yield (dut.l_out.perm_error)
634 l_rc_err = yield (dut.l_out.rc_error)
635 l_segerr = yield (dut.l_out.segerr)
636 l_invalid = yield (dut.l_out.invalid)
637 if (l_done or l_err or l_badtree or
638 l_permerr or l_rc_err or l_segerr or l_invalid):
639 break
640 yield
641 yield dut.l_in.valid.eq(0) # data already in MMU by now
642 yield dut.l_in.mtspr.eq(0) # captured by RegStage(s)
643 yield dut.l_in.load.eq(0) # can reset everything safely
644
645 def mmu_sim(dut):
646 global stop
647
648 # MMU MTSPR set prtbl
649 yield dut.l_in.mtspr.eq(1)
650 yield dut.l_in.sprn[9].eq(1) # totally fake way to set SPR=prtbl
651 yield dut.l_in.rs.eq(0x1000000) # set process table
652 yield dut.l_in.valid.eq(1)
653 yield from mmu_wait(dut)
654 yield
655 yield dut.l_in.sprn.eq(0)
656 yield dut.l_in.rs.eq(0)
657 yield
658
659 prtbl = yield (dut.rin.prtbl)
660 print ("prtbl after MTSPR %x" % prtbl)
661 assert prtbl == 0x1000000
662
663 #yield dut.rin.prtbl.eq(0x1000000) # manually set process table
664 #yield
665
666
667 # MMU PTE request
668 yield dut.l_in.load.eq(1)
669 yield dut.l_in.priv.eq(1)
670 yield dut.l_in.addr.eq(0x10000)
671 yield dut.l_in.valid.eq(1)
672 yield from mmu_wait(dut)
673
674 addr = yield dut.d_out.addr
675 pte = yield dut.d_out.pte
676 l_done = yield (dut.l_out.done)
677 l_err = yield (dut.l_out.err)
678 l_badtree = yield (dut.l_out.badtree)
679 print ("translated done %d err %d badtree %d addr %x pte %x" % \
680 (l_done, l_err, l_badtree, addr, pte))
681 yield
682 yield dut.l_in.priv.eq(0)
683 yield dut.l_in.addr.eq(0)
684
685
686 stop = True
687
688
689 def test_mmu():
690 dut = MMU()
691 vl = rtlil.convert(dut, ports=[])#dut.ports())
692 with open("test_mmu.il", "w") as f:
693 f.write(vl)
694
695 m = Module()
696 m.submodules.mmu = dut
697
698 # nmigen Simulation
699 sim = Simulator(m)
700 sim.add_clock(1e-6)
701
702 sim.add_sync_process(wrap(mmu_sim(dut)))
703 sim.add_sync_process(wrap(dcache_get(dut)))
704 with sim.write_vcd('test_mmu.vcd'):
705 sim.run()
706
707 if __name__ == '__main__':
708 test_mmu()