1f69ec1f46f3db29857e84316dcfeb898306b30e
[soc.git] / src / soc / experiment / mmu.py
1 # MMU
2 #
3 # License for original copyright mmu.vhdl by microwatt authors: CC4
4 # License for copyrighted modifications made in mmu.py: LGPLv3+
5 #
6 # This derivative work although includes CC4 licensed material is
7 # covered by the LGPLv3+
8
9 """MMU
10
11 based on Anton Blanchard microwatt mmu.vhdl
12
13 """
14 from enum import Enum, unique
15 from nmigen import (C, Module, Signal, Elaboratable, Mux, Cat, Repl, Signal)
16 from nmigen.cli import main
17 from nmigen.cli import rtlil
18 from nmutil.iocontrol import RecordObject
19 from nmutil.byterev import byte_reverse
20 from nmutil.mask import Mask, masked
21 from nmutil.util import Display
22
23 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
24 # Also, check out the cxxsim nmigen branch, and latest yosys from git
25 from nmutil.sim_tmp_alternative import Simulator, Settle
26
27 from nmutil.util import wrap
28
29 from soc.experiment.mem_types import (LoadStore1ToMMUType,
30 MMUToLoadStore1Type,
31 MMUToDCacheType,
32 DCacheToMMUType,
33 MMUToICacheType)
34
35 # Radix Tree Page Table Entry Record, TODO put this somewhere sensible
36 # v3.0C Book III p1016 section 7.7.10.2
37 class RTPTE(RecordObject):
38 def __init__(self, name=None):
39 super().__init__(name=name)
40 self.eaa = Signal(4) # Encoded Access Auth bits 60:63 LSB0 0:3
41 self.att = Signal(2) # Attributes bits 58:59 LSB0 4:5
42 self.rs1 = Signal(1) # Reserved bit 57 LSB0 6
43 self.c = Signal(1) # Change bit 56 LSB0 7
44 self.r = Signal(1) # Reference bit 55 LSB0 8
45 self.sw = Signal(3) # SW bits 1:3 bits 52:54 LSB0 9:11
46 self.rpn = Signal(45) # Real Page Number bits 7:51 LSB0 12:56
47 self.rs2 = Signal(4) # Reserved bit 3:6 LSB0 57-60
48 self.sw0 = Signal(1) # SW bit 0 bit 2 LSB0 61
49 self.leaf = Signal(1) # leaf bit 1 LSB0 62
50 self.valid = Signal(1) # valid bit 0 LSB0 63
51
52 # and these... which of course are turned round to LSB0 order.
53 # TODO: sigh. use botchify and put them in openpower.consts
54 EAA_PRIV = 3 # bit 0 (in MSB0) set ==> problem-state banned (priv=1 only)
55 EAA_RD = 2 # bit 1 (in MSB0) set ==> loads are permitted
56 EAA_WR = 1 # bit 2 (in MSB0) set ==> load and stores permitted
57 EAA_EXE = 0 # bit 3 (in MSB0) set ==> execute permitted
58
59 # for debugging
60 display_invalid = True
61
62 @unique
63 class State(Enum):
64 IDLE = 0 # zero is default on reset for r.state
65 DO_TLBIE = 1
66 TLB_WAIT = 2
67 PROC_TBL_READ = 3
68 PROC_TBL_WAIT = 4
69 SEGMENT_CHECK = 5
70 RADIX_LOOKUP = 6
71 RADIX_READ_WAIT = 7
72 RADIX_LOAD_TLB = 8
73 RADIX_FINISH = 9
74
75
76 class RegStage(RecordObject):
77 def __init__(self, name=None):
78 super().__init__(name=name)
79 # latched request from loadstore1
80 self.valid = Signal()
81 self.iside = Signal()
82 self.store = Signal()
83 self.priv = Signal()
84 self.addr = Signal(64)
85 self.inval_all = Signal()
86 # config SPRs
87 self.prtbl = Signal(64)
88 self.pid = Signal(32)
89 # internal state
90 self.state = Signal(State) # resets to IDLE
91 self.done = Signal()
92 self.err = Signal()
93 self.pgtbl0 = Signal(64)
94 self.pt0_valid = Signal()
95 self.pgtbl3 = Signal(64)
96 self.pt3_valid = Signal()
97 self.shift = Signal(6)
98 self.mask_size = Signal(5)
99 self.pgbase = Signal(56)
100 self.pde = Signal(64)
101 self.invalid = Signal()
102 self.badtree = Signal()
103 self.segerror = Signal()
104 self.perm_err = Signal()
105 self.rc_error = Signal()
106
107
108 class MMU(Elaboratable):
109 """Radix MMU
110
111 Supports 4-level trees as in arch 3.0B, but not the
112 two-step translation for guests under a hypervisor
113 (i.e. there is no gRA -> hRA translation).
114 """
115 def __init__(self):
116 self.l_in = LoadStore1ToMMUType("l_in")
117 self.l_out = MMUToLoadStore1Type("l_out")
118 self.d_out = MMUToDCacheType("d_out")
119 self.d_in = DCacheToMMUType("d_in")
120 self.i_out = MMUToICacheType("i_out")
121
122 def radix_tree_idle(self, m, l_in, r, v):
123 comb = m.d.comb
124 sync = m.d.sync
125
126 pt_valid = Signal()
127 pgtbl = Signal(64)
128 rts = Signal(6)
129 mbits = Signal(6)
130
131 with m.If(~l_in.addr[63]):
132 comb += pgtbl.eq(r.pgtbl0)
133 comb += pt_valid.eq(r.pt0_valid)
134 with m.Else():
135 comb += pgtbl.eq(r.pgtbl3)
136 comb += pt_valid.eq(r.pt3_valid)
137
138 # rts == radix tree size, number of address bits
139 # being translated
140 comb += rts.eq(Cat(pgtbl[5:8], pgtbl[61:63]))
141
142 # mbits == number of address bits to index top
143 # level of tree
144 comb += mbits.eq(pgtbl[0:5])
145
146 # set v.shift to rts so that we can use finalmask
147 # for the segment check
148 comb += v.shift.eq(rts)
149 comb += v.mask_size.eq(mbits[0:5])
150 comb += v.pgbase.eq(Cat(C(0, 8), pgtbl[8:56]))
151
152 with m.If(l_in.valid):
153 comb += v.addr.eq(l_in.addr)
154 comb += v.iside.eq(l_in.iside)
155 comb += v.store.eq(~(l_in.load | l_in.iside))
156 comb += v.priv.eq(l_in.priv)
157
158 comb += Display("state %d l_in.valid addr %x iside %d store %d "
159 "rts %x mbits %x pt_valid %d",
160 v.state, v.addr, v.iside, v.store,
161 rts, mbits, pt_valid)
162
163 with m.If(l_in.tlbie):
164 # Invalidate all iTLB/dTLB entries for
165 # tlbie with RB[IS] != 0 or RB[AP] != 0,
166 # or for slbia
167 comb += v.inval_all.eq(l_in.slbia
168 | l_in.addr[11]
169 | l_in.addr[10]
170 | l_in.addr[7]
171 | l_in.addr[6]
172 | l_in.addr[5]
173 )
174 # The RIC field of the tlbie instruction
175 # comes across on the sprn bus as bits 2--3.
176 # RIC=2 flushes process table caches.
177 with m.If(l_in.sprn[3]):
178 comb += v.pt0_valid.eq(0)
179 comb += v.pt3_valid.eq(0)
180 comb += v.state.eq(State.DO_TLBIE)
181 with m.Else():
182 comb += v.valid.eq(1)
183 with m.If(~pt_valid):
184 # need to fetch process table entry
185 # set v.shift so we can use finalmask
186 # for generating the process table
187 # entry address
188 comb += v.shift.eq(r.prtbl[0:5])
189 comb += v.state.eq(State.PROC_TBL_READ)
190
191 with m.Elif(mbits == 0):
192 # Use RPDS = 0 to disable radix tree walks
193 comb += v.state.eq(State.RADIX_FINISH)
194 comb += v.invalid.eq(1)
195 if(display_invalid):
196 sync += Display("MMUBUG: Use RPDS = 0 to disable"
197 " radix tree walks")
198 with m.Else():
199 comb += v.state.eq(State.SEGMENT_CHECK)
200
201 with m.If(l_in.mtspr):
202 # Move to PID needs to invalidate L1 TLBs
203 # and cached pgtbl0 value. Move to PRTBL
204 # does that plus invalidating the cached
205 # pgtbl3 value as well.
206 with m.If(~l_in.sprn[9]):
207 comb += v.pid.eq(l_in.rs[0:32])
208 with m.Else():
209 comb += v.prtbl.eq(l_in.rs)
210 comb += v.pt3_valid.eq(0)
211
212 comb += v.pt0_valid.eq(0)
213 comb += v.inval_all.eq(1)
214 comb += v.state.eq(State.DO_TLBIE)
215
216 def proc_tbl_wait(self, m, v, r, data):
217 comb = m.d.comb
218 with m.If(r.addr[63]):
219 comb += v.pgtbl3.eq(data)
220 comb += v.pt3_valid.eq(1)
221 with m.Else():
222 comb += v.pgtbl0.eq(data)
223 comb += v.pt0_valid.eq(1)
224
225 rts = Signal(6)
226 mbits = Signal(6)
227
228 # rts == radix tree size, # address bits being translated
229 comb += rts.eq(Cat(data[5:8], data[61:63]))
230
231 # mbits == # address bits to index top level of tree
232 comb += mbits.eq(data[0:5])
233
234 # set v.shift to rts so that we can use finalmask for the segment check
235 comb += v.shift.eq(rts)
236 comb += v.mask_size.eq(mbits[0:5])
237 comb += v.pgbase.eq(Cat(C(0, 8), data[8:56]))
238
239 with m.If(mbits):
240 comb += v.state.eq(State.SEGMENT_CHECK)
241 with m.Else():
242 comb += v.state.eq(State.RADIX_FINISH)
243 comb += v.invalid.eq(1)
244 if(display_invalid): m.d.sync += Display("MMUBUG: mbits is invalid")
245
246 def radix_read_wait(self, m, v, r, d_in, data):
247 comb = m.d.comb
248 sync = m.d.sync
249
250 rpte = RTPTE(name="radix_rpte")
251
252 perm_ok = Signal()
253 rc_ok = Signal()
254 mbits = Signal(6)
255 valid = rpte.valid
256 eaa = rpte.eaa
257 leaf = rpte.leaf
258 badtree = Signal()
259
260 comb += Display("RDW %016x done %d "
261 "perm %d rc %d mbits %d shf %d "
262 "valid %d leaf %d bad %d",
263 data, d_in.done, perm_ok, rc_ok,
264 mbits, r.shift, valid, leaf, badtree)
265
266 # set pde and interpret as Radix Tree Page Table Entry (leaf=1 case)
267 comb += v.pde.eq(data)
268 comb += rpte.eq(data)
269
270 # valid & leaf
271 with m.If(valid):
272 with m.If(leaf):
273 # check permissions and RC bits
274 with m.If(r.priv | ~eaa[EAA_PRIV]):
275 with m.If(~r.iside):
276 comb += perm_ok.eq(eaa[EAA_WR] |
277 (eaa[EAA_RD] & ~r.store))
278 with m.Else():
279 # no IAMR, so no KUEP support for now
280 # deny execute permission if cache inhibited
281 comb += perm_ok.eq(eaa[EAA_EXE] & ~rpte.att[1])
282
283 comb += rc_ok.eq(rpte.r & (rpte.c | ~r.store))
284 with m.If(perm_ok & rc_ok):
285 comb += v.state.eq(State.RADIX_LOAD_TLB)
286 with m.Else():
287 comb += v.state.eq(State.RADIX_FINISH)
288 comb += v.perm_err.eq(~perm_ok)
289 # permission error takes precedence over RC error
290 comb += v.rc_error.eq(perm_ok)
291
292 # valid & !leaf
293 with m.Else():
294 comb += mbits.eq(data[0:5])
295 comb += badtree.eq((mbits < 5) |
296 (mbits > 16) |
297 (mbits > r.shift))
298 with m.If(badtree):
299 comb += v.state.eq(State.RADIX_FINISH)
300 comb += v.badtree.eq(1)
301 with m.Else():
302 comb += v.shift.eq(r.shift - mbits)
303 comb += v.mask_size.eq(mbits[0:5])
304 comb += v.pgbase.eq(Cat(C(0, 8), data[8:56]))
305 comb += v.state.eq(State.RADIX_LOOKUP)
306
307 with m.Else():
308 # non-present PTE, generate a DSI
309 comb += v.state.eq(State.RADIX_FINISH)
310 comb += v.invalid.eq(1)
311 if(display_invalid):
312 sync += Display("MMUBUG: non-present PTE, generate a DSI")
313
314 def segment_check(self, m, v, r, data, finalmask):
315 comb = m.d.comb
316
317 mbits = Signal(6)
318 nonzero = Signal()
319 comb += mbits.eq(r.mask_size)
320 comb += v.shift.eq(r.shift + (31 - 12) - mbits)
321 comb += nonzero.eq((r.addr[31:62] & ~finalmask[0:31]).bool())
322 with m.If((r.addr[63] ^ r.addr[62]) | nonzero):
323 comb += v.state.eq(State.RADIX_FINISH)
324 comb += v.segerror.eq(1)
325 with m.Elif((mbits < 5) | (mbits > 16) |
326 (mbits > (r.shift + (31-12)))):
327 comb += v.state.eq(State.RADIX_FINISH)
328 comb += v.badtree.eq(1)
329 with m.Else():
330 comb += v.state.eq(State.RADIX_LOOKUP)
331
332 def mmu_0(self, m, r, rin, l_in, l_out, d_out, addrsh, mask):
333 comb = m.d.comb
334 sync = m.d.sync
335
336 # Multiplex internal SPR values back to loadstore1,
337 # selected by l_in.sprn.
338 with m.If(l_in.sprn[9]):
339 comb += l_out.sprval.eq(r.prtbl)
340 with m.Else():
341 comb += l_out.sprval.eq(r.pid)
342
343 with m.If(rin.valid):
344 sync += Display("MMU got tlb miss for %x", rin.addr)
345
346 with m.If(l_out.done):
347 sync += Display("MMU completing op without error")
348
349 with m.If(l_out.err):
350 sync += Display("MMU completing op with err invalid="
351 "%d badtree=%d", l_out.invalid, l_out.badtree)
352
353 with m.If(rin.state == State.RADIX_LOOKUP):
354 sync += Display ("radix lookup shift=%d msize=%d",
355 rin.shift, rin.mask_size)
356
357 with m.If(r.state == State.RADIX_LOOKUP):
358 sync += Display(f"send load addr=%x addrsh=%d mask=%x",
359 d_out.addr, addrsh, mask)
360 sync += r.eq(rin)
361
362 def elaborate(self, platform):
363 m = Module()
364
365 comb = m.d.comb
366 sync = m.d.sync
367
368 addrsh = Signal(16)
369 mask = Signal(16)
370 finalmask = Signal(44)
371
372 self.rin = rin = RegStage("r_in")
373 r = RegStage("r")
374
375 # get access to prtbl and pid for debug / testing purposes ONLY
376 # (actually, not needed, because setup_regs() triggers mmu direct)
377 # self._prtbl = r.prtbl
378 # self._pid = r.pid
379
380 l_in = self.l_in
381 l_out = self.l_out
382 d_out = self.d_out
383 d_in = self.d_in
384 i_out = self.i_out
385
386 self.mmu_0(m, r, rin, l_in, l_out, d_out, addrsh, mask)
387
388 v = RegStage()
389 dcreq = Signal()
390 tlb_load = Signal()
391 itlb_load = Signal()
392 tlbie_req = Signal()
393 prtbl_rd = Signal()
394 effpid = Signal(32)
395 prtb_adr = Signal(64)
396 pgtb_adr = Signal(64)
397 pte = Signal(64)
398 tlb_data = Signal(64)
399 addr = Signal(64)
400
401 comb += v.eq(r)
402 comb += v.valid.eq(0)
403 comb += dcreq.eq(0)
404 comb += v.done.eq(0)
405 comb += v.err.eq(0)
406 comb += v.invalid.eq(0)
407 comb += v.badtree.eq(0)
408 comb += v.segerror.eq(0)
409 comb += v.perm_err.eq(0)
410 comb += v.rc_error.eq(0)
411 comb += tlb_load.eq(0)
412 comb += itlb_load.eq(0)
413 comb += tlbie_req.eq(0)
414 comb += v.inval_all.eq(0)
415 comb += prtbl_rd.eq(0)
416
417 # Radix tree data structures in memory are
418 # big-endian, so we need to byte-swap them
419 data = byte_reverse(m, "data", d_in.data, 8)
420
421 # generate mask for extracting address fields for PTE addr generation
422 m.submodules.pte_mask = pte_mask = Mask(16-5)
423 comb += pte_mask.shift.eq(r.mask_size - 5)
424 comb += mask.eq(Cat(C(0x1f, 5), pte_mask.mask))
425
426 # generate mask for extracting address bits to go in
427 # TLB entry in order to support pages > 4kB
428 m.submodules.tlb_mask = tlb_mask = Mask(44)
429 comb += tlb_mask.shift.eq(r.shift)
430 comb += finalmask.eq(tlb_mask.mask)
431
432 with m.If(r.state != State.IDLE):
433 sync += Display("MMU state %d %016x", r.state, data)
434
435 ##########
436 # Main FSM
437 ##########
438
439 with m.Switch(r.state):
440 with m.Case(State.IDLE):
441 self.radix_tree_idle(m, l_in, r, v)
442
443 with m.Case(State.DO_TLBIE):
444 comb += dcreq.eq(1)
445 comb += tlbie_req.eq(1)
446 comb += v.state.eq(State.TLB_WAIT)
447
448 with m.Case(State.TLB_WAIT):
449 with m.If(d_in.done):
450 comb += v.state.eq(State.RADIX_FINISH)
451
452 with m.Case(State.PROC_TBL_READ):
453 sync += Display(" TBL_READ %016x", prtb_adr)
454 comb += dcreq.eq(1)
455 comb += prtbl_rd.eq(1)
456 comb += v.state.eq(State.PROC_TBL_WAIT)
457
458 with m.Case(State.PROC_TBL_WAIT):
459 with m.If(d_in.done):
460 self.proc_tbl_wait(m, v, r, data)
461
462 with m.If(d_in.err):
463 comb += v.state.eq(State.RADIX_FINISH)
464 comb += v.badtree.eq(1)
465
466 with m.Case(State.SEGMENT_CHECK):
467 self.segment_check(m, v, r, data, finalmask)
468
469 with m.Case(State.RADIX_LOOKUP):
470 sync += Display(" RADIX_LOOKUP")
471 comb += dcreq.eq(1)
472 comb += v.state.eq(State.RADIX_READ_WAIT)
473
474 with m.Case(State.RADIX_READ_WAIT):
475 sync += Display(" READ_WAIT")
476 with m.If(d_in.done):
477 self.radix_read_wait(m, v, r, d_in, data)
478 with m.If(d_in.err):
479 comb += v.state.eq(State.RADIX_FINISH)
480 comb += v.badtree.eq(1)
481
482 with m.Case(State.RADIX_LOAD_TLB):
483 comb += tlb_load.eq(1)
484 with m.If(~r.iside):
485 comb += dcreq.eq(1)
486 comb += v.state.eq(State.TLB_WAIT)
487 with m.Else():
488 comb += itlb_load.eq(1)
489 comb += v.state.eq(State.IDLE)
490
491 with m.Case(State.RADIX_FINISH):
492 sync += Display(" RADIX_FINISH")
493 comb += v.state.eq(State.IDLE)
494
495 with m.If((v.state == State.RADIX_FINISH) |
496 ((v.state == State.RADIX_LOAD_TLB) & r.iside)):
497 comb += v.err.eq(v.invalid | v.badtree | v.segerror
498 | v.perm_err | v.rc_error)
499 comb += v.done.eq(~v.err)
500
501 with m.If(~r.addr[63]):
502 comb += effpid.eq(r.pid)
503
504 # calculate Process Table Address
505 pr24 = Signal(24, reset_less=True)
506 comb += pr24.eq(masked(r.prtbl[12:36], effpid[8:32], finalmask))
507 comb += prtb_adr.eq(Cat(C(0, 4), effpid[0:8], pr24, r.prtbl[36:56]))
508
509 # calculate Page Table Address
510 pg16 = Signal(16, reset_less=True)
511 comb += pg16.eq(masked(r.pgbase[3:19], addrsh, mask))
512 comb += pgtb_adr.eq(Cat(C(0, 3), pg16, r.pgbase[19:56]))
513
514 # calculate Page Table Entry from Real Page Number (leaf=1, RTPTE)
515 rpte = RTPTE(name="rpte")
516 comb += rpte.eq(r.pde)
517 pd44 = Signal(44, reset_less=True)
518 comb += pd44.eq(masked(rpte.rpn, r.addr[12:56], finalmask))
519 comb += pte.eq(Cat(r.pde[0:12], pd44))
520
521 # update registers
522 comb += rin.eq(v)
523
524 # drive outputs
525 with m.If(tlbie_req):
526 comb += addr.eq(r.addr)
527 with m.Elif(tlb_load):
528 comb += addr.eq(Cat(C(0, 12), r.addr[12:64]))
529 comb += tlb_data.eq(pte)
530 with m.Elif(prtbl_rd):
531 comb += addr.eq(prtb_adr)
532 with m.Else():
533 comb += addr.eq(pgtb_adr)
534
535 # connect to other interfaces: LDST, D-Cache, I-Cache
536 comb += l_out.done.eq(r.done)
537 comb += l_out.err.eq(r.err)
538 comb += l_out.invalid.eq(r.invalid)
539 comb += l_out.badtree.eq(r.badtree)
540 comb += l_out.segerr.eq(r.segerror)
541 comb += l_out.perm_error.eq(r.perm_err)
542 comb += l_out.rc_error.eq(r.rc_error)
543
544 comb += d_out.valid.eq(dcreq)
545 comb += d_out.tlbie.eq(tlbie_req)
546 comb += d_out.doall.eq(r.inval_all)
547 comb += d_out.tlbld.eq(tlb_load)
548 comb += d_out.addr.eq(addr)
549 comb += d_out.pte.eq(tlb_data)
550
551 comb += i_out.tlbld.eq(itlb_load)
552 comb += i_out.tlbie.eq(tlbie_req)
553 comb += i_out.doall.eq(r.inval_all)
554 comb += i_out.addr.eq(addr)
555 comb += i_out.pte.eq(tlb_data)
556
557 return m
558
559 stop = False
560
561 def dcache_get(dut):
562 """simulator process for getting memory load requests
563 """
564
565 global stop
566
567 def b(x):
568 return int.from_bytes(x.to_bytes(8, byteorder='little'),
569 byteorder='big', signed=False)
570
571 mem = {0x0: 0x000000, # to get mtspr prtbl working
572
573 0x10000: # PARTITION_TABLE_2
574 # PATB_GR=1 PRTB=0x1000 PRTS=0xb
575 b(0x800000000100000b),
576
577 0x30000: # RADIX_ROOT_PTE
578 # V = 1 L = 0 NLB = 0x400 NLS = 9
579 b(0x8000000000040009),
580
581 0x40000: # RADIX_SECOND_LEVEL
582 # V = 1 L = 1 SW = 0 RPN = 0
583 # R = 1 C = 1 ATT = 0 EAA 0x7
584 b(0xc000000000000187),
585
586 0x1000000: # PROCESS_TABLE_3
587 # RTS1 = 0x2 RPDB = 0x300 RTS2 = 0x5 RPDS = 13
588 b(0x40000000000300ad),
589 }
590
591 while not stop:
592 while True: # wait for dc_valid
593 if stop:
594 return
595 dc_valid = yield (dut.d_out.valid)
596 if dc_valid:
597 break
598 yield
599 addr = yield dut.d_out.addr
600 if addr not in mem:
601 print (" DCACHE LOOKUP FAIL %x" % (addr))
602 stop = True
603 return
604
605 yield
606 data = mem[addr]
607 yield dut.d_in.data.eq(data)
608 print (" DCACHE GET %x data %x" % (addr, data))
609 yield dut.d_in.done.eq(1)
610 yield
611 yield dut.d_in.done.eq(0)
612
613 def mmu_wait(dut):
614 global stop
615 while not stop: # wait for dc_valid / err
616 l_done = yield (dut.l_out.done)
617 l_err = yield (dut.l_out.err)
618 l_badtree = yield (dut.l_out.badtree)
619 l_permerr = yield (dut.l_out.perm_error)
620 l_rc_err = yield (dut.l_out.rc_error)
621 l_segerr = yield (dut.l_out.segerr)
622 l_invalid = yield (dut.l_out.invalid)
623 if (l_done or l_err or l_badtree or
624 l_permerr or l_rc_err or l_segerr or l_invalid):
625 break
626 yield
627 yield dut.l_in.valid.eq(0) # data already in MMU by now
628 yield dut.l_in.mtspr.eq(0) # captured by RegStage(s)
629 yield dut.l_in.load.eq(0) # can reset everything safely
630
631 def mmu_sim(dut):
632 global stop
633
634 # MMU MTSPR set prtbl
635 yield dut.l_in.mtspr.eq(1)
636 yield dut.l_in.sprn[9].eq(1) # totally fake way to set SPR=prtbl
637 yield dut.l_in.rs.eq(0x1000000) # set process table
638 yield dut.l_in.valid.eq(1)
639 yield from mmu_wait(dut)
640 yield
641 yield dut.l_in.sprn.eq(0)
642 yield dut.l_in.rs.eq(0)
643 yield
644
645 prtbl = yield (dut.rin.prtbl)
646 print ("prtbl after MTSPR %x" % prtbl)
647 assert prtbl == 0x1000000
648
649 #yield dut.rin.prtbl.eq(0x1000000) # manually set process table
650 #yield
651
652
653 # MMU PTE request
654 yield dut.l_in.load.eq(1)
655 yield dut.l_in.priv.eq(1)
656 yield dut.l_in.addr.eq(0x10000)
657 yield dut.l_in.valid.eq(1)
658 yield from mmu_wait(dut)
659
660 addr = yield dut.d_out.addr
661 pte = yield dut.d_out.pte
662 l_done = yield (dut.l_out.done)
663 l_err = yield (dut.l_out.err)
664 l_badtree = yield (dut.l_out.badtree)
665 print ("translated done %d err %d badtree %d addr %x pte %x" % \
666 (l_done, l_err, l_badtree, addr, pte))
667 yield
668 yield dut.l_in.priv.eq(0)
669 yield dut.l_in.addr.eq(0)
670
671
672 stop = True
673
674
675 def test_mmu():
676 dut = MMU()
677 vl = rtlil.convert(dut, ports=[])#dut.ports())
678 with open("test_mmu.il", "w") as f:
679 f.write(vl)
680
681 m = Module()
682 m.submodules.mmu = dut
683
684 # nmigen Simulation
685 sim = Simulator(m)
686 sim.add_clock(1e-6)
687
688 sim.add_sync_process(wrap(mmu_sim(dut)))
689 sim.add_sync_process(wrap(dcache_get(dut)))
690 with sim.write_vcd('test_mmu.vcd'):
691 sim.run()
692
693 if __name__ == '__main__':
694 test_mmu()