add (and ues) PRTBL Record in MMU
[soc.git] / src / soc / experiment / mmu.py
1 # MMU
2 #
3 # License for original copyright mmu.vhdl by microwatt authors: CC4
4 # License for copyrighted modifications made in mmu.py: LGPLv3+
5 #
6 # This derivative work although includes CC4 licensed material is
7 # covered by the LGPLv3+
8
9 """MMU
10
11 based on Anton Blanchard microwatt mmu.vhdl
12
13 """
14 from enum import Enum, unique
15 from nmigen import (C, Module, Signal, Elaboratable, Mux, Cat, Repl, Signal)
16 from nmigen.cli import main
17 from nmigen.cli import rtlil
18 from nmutil.iocontrol import RecordObject
19 from nmutil.byterev import byte_reverse
20 from nmutil.mask import Mask, masked
21 from nmutil.util import Display
22
23 # NOTE: to use cxxsim, export NMIGEN_SIM_MODE=cxxsim from the shell
24 # Also, check out the cxxsim nmigen branch, and latest yosys from git
25 from nmutil.sim_tmp_alternative import Simulator, Settle
26
27 from nmutil.util import wrap
28
29 from soc.experiment.mem_types import (LoadStore1ToMMUType,
30 MMUToLoadStore1Type,
31 MMUToDCacheType,
32 DCacheToMMUType,
33 MMUToICacheType)
34
35 # Radix Tree Page Table Entry Record, TODO put this somewhere sensible
36 # v3.0C Book III p1016 section 7.7.10.2
37 class RTPTE(RecordObject):
38 def __init__(self, name=None):
39 super().__init__(name=name)
40 self.eaa = Signal(4) # Encoded Access Auth bits 60:63 LSB0 0:3
41 self.att = Signal(2) # Attributes bits 58:59 LSB0 4:5
42 self.rs1 = Signal(1) # Reserved bit 57 LSB0 6
43 self.c = Signal(1) # Change bit 56 LSB0 7
44 self.r = Signal(1) # Reference bit 55 LSB0 8
45 self.sw = Signal(3) # SW bits 1:3 bits 52:54 LSB0 9:11
46 self.rpn = Signal(45) # Real Page Number bits 7:51 LSB0 12:56
47 self.rs2 = Signal(4) # Reserved bit 3:6 LSB0 57-60
48 self.sw0 = Signal(1) # SW bit 0 bit 2 LSB0 61
49 self.leaf = Signal(1) # leaf bit 1 LSB0 62
50 self.valid = Signal(1) # valid bit 0 LSB0 63
51
52 # and these... which of course are turned round to LSB0 order.
53 # TODO: sigh. use botchify and put them in openpower.consts
54 EAA_PRIV = 3 # bit 0 (in MSB0) set ==> problem-state banned (priv=1 only)
55 EAA_RD = 2 # bit 1 (in MSB0) set ==> loads are permitted
56 EAA_WR = 1 # bit 2 (in MSB0) set ==> load and stores permitted
57 EAA_EXE = 0 # bit 3 (in MSB0) set ==> execute permitted
58
59 # for debugging
60 display_invalid = True
61
62 @unique
63 class State(Enum):
64 IDLE = 0 # zero is default on reset for r.state
65 DO_TLBIE = 1
66 TLB_WAIT = 2
67 PROC_TBL_READ = 3
68 PROC_TBL_WAIT = 4
69 SEGMENT_CHECK = 5
70 RADIX_LOOKUP = 6
71 RADIX_READ_WAIT = 7
72 RADIX_LOAD_TLB = 8
73 RADIX_FINISH = 9
74
75
76 # Process Table Record - near-identical to Page Table Record (same format)
77 # v3.0C Book III Section 6.7.6.2 p1004
78 class PRTBL(RecordObject):
79 def __init__(self, name=None):
80 super().__init__(name=name)
81 self.rpds = Signal(5) # Root Page Directory Size 59:63 LSB0 0:4
82 self.rts2 = Signal(3) # Radix Tree Size part 2 56:58 LSB0 5:7
83 self.rpdb = Signal(52) # Root Page Directory Base 4:55 LSB0 8:59
84 self.rsv2 = Signal(1) # reserved 3 LSB0 60
85 self.rts1 = Signal(2) # Radix Tree Size part 1 1:2 LSB0 61:62
86 self.rsv1 = Signal(1) # reserved 0 LSB0 63
87
88
89 class RegStage(RecordObject):
90 def __init__(self, name=None):
91 super().__init__(name=name)
92 # latched request from loadstore1
93 self.valid = Signal()
94 self.iside = Signal()
95 self.store = Signal()
96 self.priv = Signal()
97 self.addr = Signal(64)
98 self.inval_all = Signal()
99 # config SPRs
100 self.prtbl = Signal(64)
101 self.pid = Signal(32)
102 # internal state
103 self.state = Signal(State) # resets to IDLE
104 self.done = Signal()
105 self.err = Signal()
106 self.pgtbl0 = Signal(64)
107 self.pt0_valid = Signal()
108 self.pgtbl3 = Signal(64)
109 self.pt3_valid = Signal()
110 self.shift = Signal(6)
111 self.mask_size = Signal(5)
112 self.pgbase = Signal(56)
113 self.pde = Signal(64)
114 self.invalid = Signal()
115 self.badtree = Signal()
116 self.segerror = Signal()
117 self.perm_err = Signal()
118 self.rc_error = Signal()
119
120
121 # Page Table Record - note that HR bit is treated as part of rts below
122 # (near-identical to Process Table Record - same format)
123 # v3.0C Book III Section 6.7.6.1 p1003
124 class PGTBL(RecordObject):
125 def __init__(self, name=None):
126 super().__init__(name=name)
127 self.rpds = Signal(5) # Root Page Directory Size 59:63 LSB0 0:4
128 self.rts2 = Signal(3) # Radix Tree Size part 2 56:58 LSB0 5:7
129 self.rpdb = Signal(52) # Root Page Directory Base 4:55 LSB0 8:59
130 self.s = Signal(1) # Host Secure 3 LSB0 60
131 self.rts1 = Signal(2) # Radix Tree Size part 1 1:2 LSB0 61:62
132 self.hr = Signal(1) # Host Radix 0 LSB0 63
133
134
135 class MMU(Elaboratable):
136 """Radix MMU
137
138 Supports 4-level trees as in arch 3.0B, but not the
139 two-step translation for guests under a hypervisor
140 (i.e. there is no gRA -> hRA translation).
141 """
142 def __init__(self):
143 self.l_in = LoadStore1ToMMUType("l_in")
144 self.l_out = MMUToLoadStore1Type("l_out")
145 self.d_out = MMUToDCacheType("d_out")
146 self.d_in = DCacheToMMUType("d_in")
147 self.i_out = MMUToICacheType("i_out")
148
149 def radix_tree_idle(self, m, l_in, r, v):
150 comb = m.d.comb
151 sync = m.d.sync
152
153 pt_valid = Signal()
154 pgtbl = PGTBL("pgtbl")
155 rts = Signal(6)
156 mbits = Signal(6)
157
158 with m.If(~l_in.addr[63]):
159 comb += pgtbl.eq(r.pgtbl0)
160 comb += pt_valid.eq(r.pt0_valid)
161 with m.Else():
162 comb += pgtbl.eq(r.pgtbl3)
163 comb += pt_valid.eq(r.pt3_valid)
164
165 # rts == radix tree size, number of address bits
166 # being translated. takes bits 5:7 and 61:63
167 comb += rts.eq(Cat(pgtbl.rts2, pgtbl.rts1, pgtbl.hr))
168
169 # mbits == number of address bits to index top
170 # level of tree. takes bits 0:4
171 comb += mbits.eq(pgtbl.rpds)
172
173 # set v.shift to rts so that we can use finalmask
174 # for the segment check.
175 # note: rpdb (52 bits long) is truncated to 48 bits
176 comb += v.shift.eq(rts)
177 comb += v.mask_size.eq(mbits[0:5])
178 comb += v.pgbase.eq(Cat(C(0, 8), pgtbl.rpdb[:48])) # bits 8:55
179
180 with m.If(l_in.valid):
181 comb += v.addr.eq(l_in.addr)
182 comb += v.iside.eq(l_in.iside)
183 comb += v.store.eq(~(l_in.load | l_in.iside))
184 comb += v.priv.eq(l_in.priv)
185
186 comb += Display("state %d l_in.valid addr %x iside %d store %d "
187 "rts %x mbits %x pt_valid %d",
188 v.state, v.addr, v.iside, v.store,
189 rts, mbits, pt_valid)
190
191 with m.If(l_in.tlbie):
192 # Invalidate all iTLB/dTLB entries for
193 # tlbie with RB[IS] != 0 or RB[AP] != 0,
194 # or for slbia
195 comb += v.inval_all.eq(l_in.slbia
196 | l_in.addr[11]
197 | l_in.addr[10]
198 | l_in.addr[7]
199 | l_in.addr[6]
200 | l_in.addr[5]
201 )
202 # The RIC field of the tlbie instruction
203 # comes across on the sprn bus as bits 2--3.
204 # RIC=2 flushes process table caches.
205 with m.If(l_in.sprn[3]):
206 comb += v.pt0_valid.eq(0)
207 comb += v.pt3_valid.eq(0)
208 comb += v.state.eq(State.DO_TLBIE)
209 with m.Else():
210 comb += v.valid.eq(1)
211 with m.If(~pt_valid):
212 # need to fetch process table entry
213 # set v.shift so we can use finalmask
214 # for generating the process table
215 # entry address
216 prtbl = PRTBL("prtbl")
217 comb += prtbl.eq(r.prtbl)
218 comb += v.shift.eq(prtbl.rpds)
219 comb += v.state.eq(State.PROC_TBL_READ)
220
221 with m.Elif(mbits == 0):
222 # Use RPDS = 0 to disable radix tree walks
223 comb += v.state.eq(State.RADIX_FINISH)
224 comb += v.invalid.eq(1)
225 if(display_invalid):
226 sync += Display("MMUBUG: Use RPDS = 0 to disable"
227 " radix tree walks")
228 with m.Else():
229 comb += v.state.eq(State.SEGMENT_CHECK)
230
231 with m.If(l_in.mtspr):
232 # Move to PID needs to invalidate L1 TLBs
233 # and cached pgtbl0 value. Move to PRTBL
234 # does that plus invalidating the cached
235 # pgtbl3 value as well.
236 with m.If(~l_in.sprn[9]):
237 comb += v.pid.eq(l_in.rs[0:32])
238 with m.Else():
239 comb += v.prtbl.eq(l_in.rs)
240 comb += v.pt3_valid.eq(0)
241
242 comb += v.pt0_valid.eq(0)
243 comb += v.inval_all.eq(1)
244 comb += v.state.eq(State.DO_TLBIE)
245
246 def proc_tbl_wait(self, m, v, r, data):
247 comb = m.d.comb
248 with m.If(r.addr[63]):
249 comb += v.pgtbl3.eq(data)
250 comb += v.pt3_valid.eq(1)
251 with m.Else():
252 comb += v.pgtbl0.eq(data)
253 comb += v.pt0_valid.eq(1)
254
255 rts = Signal(6)
256 mbits = Signal(6)
257
258 # rts == radix tree size, # address bits being translated
259 comb += rts.eq(Cat(data[5:8], data[61:63]))
260
261 # mbits == # address bits to index top level of tree
262 comb += mbits.eq(data[0:5])
263
264 # set v.shift to rts so that we can use finalmask for the segment check
265 comb += v.shift.eq(rts)
266 comb += v.mask_size.eq(mbits[0:5])
267 comb += v.pgbase.eq(Cat(C(0, 8), data[8:56]))
268
269 with m.If(mbits):
270 comb += v.state.eq(State.SEGMENT_CHECK)
271 with m.Else():
272 comb += v.state.eq(State.RADIX_FINISH)
273 comb += v.invalid.eq(1)
274 if(display_invalid): m.d.sync += Display("MMUBUG: mbits is invalid")
275
276 def radix_read_wait(self, m, v, r, d_in, data):
277 comb = m.d.comb
278 sync = m.d.sync
279
280 rpte = RTPTE(name="radix_rpte")
281
282 perm_ok = Signal()
283 rc_ok = Signal()
284 mbits = Signal(6)
285 valid = rpte.valid
286 eaa = rpte.eaa
287 leaf = rpte.leaf
288 badtree = Signal()
289
290 comb += Display("RDW %016x done %d "
291 "perm %d rc %d mbits %d shf %d "
292 "valid %d leaf %d bad %d",
293 data, d_in.done, perm_ok, rc_ok,
294 mbits, r.shift, valid, leaf, badtree)
295
296 # set pde and interpret as Radix Tree Page Table Entry (leaf=1 case)
297 comb += v.pde.eq(data)
298 comb += rpte.eq(data)
299
300 # valid & leaf
301 with m.If(valid):
302 with m.If(leaf):
303 # check permissions and RC bits
304 with m.If(r.priv | ~eaa[EAA_PRIV]):
305 with m.If(~r.iside):
306 comb += perm_ok.eq(eaa[EAA_WR] |
307 (eaa[EAA_RD] & ~r.store))
308 with m.Else():
309 # no IAMR, so no KUEP support for now
310 # deny execute permission if cache inhibited
311 comb += perm_ok.eq(eaa[EAA_EXE] & ~rpte.att[1])
312
313 comb += rc_ok.eq(rpte.r & (rpte.c | ~r.store))
314 with m.If(perm_ok & rc_ok):
315 comb += v.state.eq(State.RADIX_LOAD_TLB)
316 with m.Else():
317 comb += v.state.eq(State.RADIX_FINISH)
318 comb += v.perm_err.eq(~perm_ok)
319 # permission error takes precedence over RC error
320 comb += v.rc_error.eq(perm_ok)
321
322 # valid & !leaf
323 with m.Else():
324 comb += mbits.eq(data[0:5])
325 comb += badtree.eq((mbits < 5) |
326 (mbits > 16) |
327 (mbits > r.shift))
328 with m.If(badtree):
329 comb += v.state.eq(State.RADIX_FINISH)
330 comb += v.badtree.eq(1)
331 with m.Else():
332 comb += v.shift.eq(r.shift - mbits)
333 comb += v.mask_size.eq(mbits[0:5])
334 comb += v.pgbase.eq(Cat(C(0, 8), data[8:56]))
335 comb += v.state.eq(State.RADIX_LOOKUP)
336
337 with m.Else():
338 # non-present PTE, generate a DSI
339 comb += v.state.eq(State.RADIX_FINISH)
340 comb += v.invalid.eq(1)
341 if(display_invalid):
342 sync += Display("MMUBUG: non-present PTE, generate a DSI")
343
344 def segment_check(self, m, v, r, data, finalmask):
345 comb = m.d.comb
346
347 mbits = Signal(6)
348 nonzero = Signal()
349 comb += mbits.eq(r.mask_size)
350 comb += v.shift.eq(r.shift + (31 - 12) - mbits)
351 comb += nonzero.eq((r.addr[31:62] & ~finalmask[0:31]).bool())
352 with m.If((r.addr[63] ^ r.addr[62]) | nonzero):
353 comb += v.state.eq(State.RADIX_FINISH)
354 comb += v.segerror.eq(1)
355 with m.Elif((mbits < 5) | (mbits > 16) |
356 (mbits > (r.shift + (31-12)))):
357 comb += v.state.eq(State.RADIX_FINISH)
358 comb += v.badtree.eq(1)
359 with m.Else():
360 comb += v.state.eq(State.RADIX_LOOKUP)
361
362 def mmu_0(self, m, r, rin, l_in, l_out, d_out, addrsh, mask):
363 comb = m.d.comb
364 sync = m.d.sync
365
366 # Multiplex internal SPR values back to loadstore1,
367 # selected by l_in.sprn.
368 with m.If(l_in.sprn[9]):
369 comb += l_out.sprval.eq(r.prtbl)
370 with m.Else():
371 comb += l_out.sprval.eq(r.pid)
372
373 with m.If(rin.valid):
374 sync += Display("MMU got tlb miss for %x", rin.addr)
375
376 with m.If(l_out.done):
377 sync += Display("MMU completing op without error")
378
379 with m.If(l_out.err):
380 sync += Display("MMU completing op with err invalid="
381 "%d badtree=%d", l_out.invalid, l_out.badtree)
382
383 with m.If(rin.state == State.RADIX_LOOKUP):
384 sync += Display ("radix lookup shift=%d msize=%d",
385 rin.shift, rin.mask_size)
386
387 with m.If(r.state == State.RADIX_LOOKUP):
388 sync += Display(f"send load addr=%x addrsh=%d mask=%x",
389 d_out.addr, addrsh, mask)
390 sync += r.eq(rin)
391
392 def elaborate(self, platform):
393 m = Module()
394
395 comb = m.d.comb
396 sync = m.d.sync
397
398 addrsh = Signal(16)
399 mask = Signal(16)
400 finalmask = Signal(44)
401
402 self.rin = rin = RegStage("r_in")
403 r = RegStage("r")
404
405 # get access to prtbl and pid for debug / testing purposes ONLY
406 # (actually, not needed, because setup_regs() triggers mmu direct)
407 # self._prtbl = r.prtbl
408 # self._pid = r.pid
409
410 l_in = self.l_in
411 l_out = self.l_out
412 d_out = self.d_out
413 d_in = self.d_in
414 i_out = self.i_out
415
416 self.mmu_0(m, r, rin, l_in, l_out, d_out, addrsh, mask)
417
418 v = RegStage()
419 dcreq = Signal()
420 tlb_load = Signal()
421 itlb_load = Signal()
422 tlbie_req = Signal()
423 prtbl_rd = Signal()
424 effpid = Signal(32)
425 prtb_adr = Signal(64)
426 pgtb_adr = Signal(64)
427 pte = Signal(64)
428 tlb_data = Signal(64)
429 addr = Signal(64)
430
431 comb += v.eq(r)
432 comb += v.valid.eq(0)
433 comb += dcreq.eq(0)
434 comb += v.done.eq(0)
435 comb += v.err.eq(0)
436 comb += v.invalid.eq(0)
437 comb += v.badtree.eq(0)
438 comb += v.segerror.eq(0)
439 comb += v.perm_err.eq(0)
440 comb += v.rc_error.eq(0)
441 comb += tlb_load.eq(0)
442 comb += itlb_load.eq(0)
443 comb += tlbie_req.eq(0)
444 comb += v.inval_all.eq(0)
445 comb += prtbl_rd.eq(0)
446
447 # Radix tree data structures in memory are
448 # big-endian, so we need to byte-swap them
449 data = byte_reverse(m, "data", d_in.data, 8)
450
451 # generate mask for extracting address fields for PTE addr generation
452 m.submodules.pte_mask = pte_mask = Mask(16-5)
453 comb += pte_mask.shift.eq(r.mask_size - 5)
454 comb += mask.eq(Cat(C(0x1f, 5), pte_mask.mask))
455
456 # generate mask for extracting address bits to go in
457 # TLB entry in order to support pages > 4kB
458 m.submodules.tlb_mask = tlb_mask = Mask(44)
459 comb += tlb_mask.shift.eq(r.shift)
460 comb += finalmask.eq(tlb_mask.mask)
461
462 with m.If(r.state != State.IDLE):
463 sync += Display("MMU state %d %016x", r.state, data)
464
465 ##########
466 # Main FSM
467 ##########
468
469 with m.Switch(r.state):
470 with m.Case(State.IDLE):
471 self.radix_tree_idle(m, l_in, r, v)
472
473 with m.Case(State.DO_TLBIE):
474 comb += dcreq.eq(1)
475 comb += tlbie_req.eq(1)
476 comb += v.state.eq(State.TLB_WAIT)
477
478 with m.Case(State.TLB_WAIT):
479 with m.If(d_in.done):
480 comb += v.state.eq(State.RADIX_FINISH)
481
482 with m.Case(State.PROC_TBL_READ):
483 sync += Display(" TBL_READ %016x", prtb_adr)
484 comb += dcreq.eq(1)
485 comb += prtbl_rd.eq(1)
486 comb += v.state.eq(State.PROC_TBL_WAIT)
487
488 with m.Case(State.PROC_TBL_WAIT):
489 with m.If(d_in.done):
490 self.proc_tbl_wait(m, v, r, data)
491
492 with m.If(d_in.err):
493 comb += v.state.eq(State.RADIX_FINISH)
494 comb += v.badtree.eq(1)
495
496 with m.Case(State.SEGMENT_CHECK):
497 self.segment_check(m, v, r, data, finalmask)
498
499 with m.Case(State.RADIX_LOOKUP):
500 sync += Display(" RADIX_LOOKUP")
501 comb += dcreq.eq(1)
502 comb += v.state.eq(State.RADIX_READ_WAIT)
503
504 with m.Case(State.RADIX_READ_WAIT):
505 sync += Display(" READ_WAIT")
506 with m.If(d_in.done):
507 self.radix_read_wait(m, v, r, d_in, data)
508 with m.If(d_in.err):
509 comb += v.state.eq(State.RADIX_FINISH)
510 comb += v.badtree.eq(1)
511
512 with m.Case(State.RADIX_LOAD_TLB):
513 comb += tlb_load.eq(1)
514 with m.If(~r.iside):
515 comb += dcreq.eq(1)
516 comb += v.state.eq(State.TLB_WAIT)
517 with m.Else():
518 comb += itlb_load.eq(1)
519 comb += v.state.eq(State.IDLE)
520
521 with m.Case(State.RADIX_FINISH):
522 sync += Display(" RADIX_FINISH")
523 comb += v.state.eq(State.IDLE)
524
525 with m.If((v.state == State.RADIX_FINISH) |
526 ((v.state == State.RADIX_LOAD_TLB) & r.iside)):
527 comb += v.err.eq(v.invalid | v.badtree | v.segerror
528 | v.perm_err | v.rc_error)
529 comb += v.done.eq(~v.err)
530
531 with m.If(~r.addr[63]):
532 comb += effpid.eq(r.pid)
533
534 # calculate Process Table Address
535 pr24 = Signal(24, reset_less=True)
536 prtbla = PRTBL("prtbla")
537 comb += prtbla.eq(r.prtbl)
538 rpdb = prtbla.rpdb
539 comb += pr24.eq(masked(rpdb[4:28], effpid[8:32], finalmask))
540 comb += prtb_adr.eq(Cat(C(0, 4), effpid[0:8], pr24, rpdb[28:48]))
541
542 # calculate Page Table Address
543 pg16 = Signal(16, reset_less=True)
544 comb += pg16.eq(masked(r.pgbase[3:19], addrsh, mask))
545 comb += pgtb_adr.eq(Cat(C(0, 3), pg16, r.pgbase[19:56]))
546
547 # calculate Page Table Entry from Real Page Number (leaf=1, RTPTE)
548 rpte = RTPTE(name="rpte")
549 comb += rpte.eq(r.pde)
550 pd44 = Signal(44, reset_less=True)
551 comb += pd44.eq(masked(rpte.rpn, r.addr[12:56], finalmask))
552 comb += pte.eq(Cat(r.pde[0:12], pd44))
553
554 # update registers
555 comb += rin.eq(v)
556
557 # drive outputs
558 with m.If(tlbie_req):
559 comb += addr.eq(r.addr)
560 with m.Elif(tlb_load):
561 comb += addr.eq(Cat(C(0, 12), r.addr[12:64]))
562 comb += tlb_data.eq(pte)
563 with m.Elif(prtbl_rd):
564 comb += addr.eq(prtb_adr)
565 with m.Else():
566 comb += addr.eq(pgtb_adr)
567
568 # connect to other interfaces: LDST, D-Cache, I-Cache
569 comb += l_out.done.eq(r.done)
570 comb += l_out.err.eq(r.err)
571 comb += l_out.invalid.eq(r.invalid)
572 comb += l_out.badtree.eq(r.badtree)
573 comb += l_out.segerr.eq(r.segerror)
574 comb += l_out.perm_error.eq(r.perm_err)
575 comb += l_out.rc_error.eq(r.rc_error)
576
577 comb += d_out.valid.eq(dcreq)
578 comb += d_out.tlbie.eq(tlbie_req)
579 comb += d_out.doall.eq(r.inval_all)
580 comb += d_out.tlbld.eq(tlb_load)
581 comb += d_out.addr.eq(addr)
582 comb += d_out.pte.eq(tlb_data)
583
584 comb += i_out.tlbld.eq(itlb_load)
585 comb += i_out.tlbie.eq(tlbie_req)
586 comb += i_out.doall.eq(r.inval_all)
587 comb += i_out.addr.eq(addr)
588 comb += i_out.pte.eq(tlb_data)
589
590 return m
591
592 stop = False
593
594 def dcache_get(dut):
595 """simulator process for getting memory load requests
596 """
597
598 global stop
599
600 def b(x):
601 return int.from_bytes(x.to_bytes(8, byteorder='little'),
602 byteorder='big', signed=False)
603
604 mem = {0x0: 0x000000, # to get mtspr prtbl working
605
606 0x10000: # PARTITION_TABLE_2
607 # PATB_GR=1 PRTB=0x1000 PRTS=0xb
608 b(0x800000000100000b),
609
610 0x30000: # RADIX_ROOT_PTE
611 # V = 1 L = 0 NLB = 0x400 NLS = 9
612 b(0x8000000000040009),
613
614 0x40000: # RADIX_SECOND_LEVEL
615 # V = 1 L = 1 SW = 0 RPN = 0
616 # R = 1 C = 1 ATT = 0 EAA 0x7
617 b(0xc000000000000187),
618
619 0x1000000: # PROCESS_TABLE_3
620 # RTS1 = 0x2 RPDB = 0x300 RTS2 = 0x5 RPDS = 13
621 b(0x40000000000300ad),
622 }
623
624 while not stop:
625 while True: # wait for dc_valid
626 if stop:
627 return
628 dc_valid = yield (dut.d_out.valid)
629 if dc_valid:
630 break
631 yield
632 addr = yield dut.d_out.addr
633 if addr not in mem:
634 print (" DCACHE LOOKUP FAIL %x" % (addr))
635 stop = True
636 return
637
638 yield
639 data = mem[addr]
640 yield dut.d_in.data.eq(data)
641 print (" DCACHE GET %x data %x" % (addr, data))
642 yield dut.d_in.done.eq(1)
643 yield
644 yield dut.d_in.done.eq(0)
645
646 def mmu_wait(dut):
647 global stop
648 while not stop: # wait for dc_valid / err
649 l_done = yield (dut.l_out.done)
650 l_err = yield (dut.l_out.err)
651 l_badtree = yield (dut.l_out.badtree)
652 l_permerr = yield (dut.l_out.perm_error)
653 l_rc_err = yield (dut.l_out.rc_error)
654 l_segerr = yield (dut.l_out.segerr)
655 l_invalid = yield (dut.l_out.invalid)
656 if (l_done or l_err or l_badtree or
657 l_permerr or l_rc_err or l_segerr or l_invalid):
658 break
659 yield
660 yield dut.l_in.valid.eq(0) # data already in MMU by now
661 yield dut.l_in.mtspr.eq(0) # captured by RegStage(s)
662 yield dut.l_in.load.eq(0) # can reset everything safely
663
664 def mmu_sim(dut):
665 global stop
666
667 # MMU MTSPR set prtbl
668 yield dut.l_in.mtspr.eq(1)
669 yield dut.l_in.sprn[9].eq(1) # totally fake way to set SPR=prtbl
670 yield dut.l_in.rs.eq(0x1000000) # set process table
671 yield dut.l_in.valid.eq(1)
672 yield from mmu_wait(dut)
673 yield
674 yield dut.l_in.sprn.eq(0)
675 yield dut.l_in.rs.eq(0)
676 yield
677
678 prtbl = yield (dut.rin.prtbl)
679 print ("prtbl after MTSPR %x" % prtbl)
680 assert prtbl == 0x1000000
681
682 #yield dut.rin.prtbl.eq(0x1000000) # manually set process table
683 #yield
684
685
686 # MMU PTE request
687 yield dut.l_in.load.eq(1)
688 yield dut.l_in.priv.eq(1)
689 yield dut.l_in.addr.eq(0x10000)
690 yield dut.l_in.valid.eq(1)
691 yield from mmu_wait(dut)
692
693 addr = yield dut.d_out.addr
694 pte = yield dut.d_out.pte
695 l_done = yield (dut.l_out.done)
696 l_err = yield (dut.l_out.err)
697 l_badtree = yield (dut.l_out.badtree)
698 print ("translated done %d err %d badtree %d addr %x pte %x" % \
699 (l_done, l_err, l_badtree, addr, pte))
700 yield
701 yield dut.l_in.priv.eq(0)
702 yield dut.l_in.addr.eq(0)
703
704
705 stop = True
706
707
708 def test_mmu():
709 dut = MMU()
710 vl = rtlil.convert(dut, ports=[])#dut.ports())
711 with open("test_mmu.il", "w") as f:
712 f.write(vl)
713
714 m = Module()
715 m.submodules.mmu = dut
716
717 # nmigen Simulation
718 sim = Simulator(m)
719 sim.add_clock(1e-6)
720
721 sim.add_sync_process(wrap(mmu_sim(dut)))
722 sim.add_sync_process(wrap(dcache_get(dut)))
723 with sim.write_vcd('test_mmu.vcd'):
724 sim.run()
725
726 if __name__ == '__main__':
727 test_mmu()