3 based on Anton Blanchard microwatt mmu.vhdl
6 from enum
import Enum
, unique
7 from nmigen
import (Module
, Signal
, Elaboratable
, Mux
, Cat
, Repl
, signed
,
9 from nmigen
.cli
import main
10 from nmigen
.iocontrol
import RecordObject
12 # library ieee; use ieee.std_logic_1164.all;
13 # use ieee.numeric_std.all;
15 # library work; use work.common.all;
18 # start from common.vhdl
19 # type Loadstore1ToMmuType is record
27 # sprn : std_ulogic_vector(9 downto 0);
28 # addr : std_ulogic_vector(63 downto 0);
29 # rs : std_ulogic_vector(63 downto 0);
31 class LoadStore1ToMmuType(RecordObject
):
41 self
.sprn
= Signal(10)
42 self
.addr
= Signal(64)
45 # type MmuToLoadstore1Type is record
48 # invalid : std_ulogic;
49 # badtree : std_ulogic;
50 # segerr : std_ulogic;
51 # perm_error : std_ulogic;
52 # rc_error : std_ulogic;
53 # sprval : std_ulogic_vector(63 downto 0);
55 class MmuToLoadStore1Type(RecordObject
):
60 self
.invalid
= Signal()
61 self
.badtree
= Signal()
62 self
.segerr
= Signal()
63 self
.perm_error
= Signal()
64 self
.rc_error
= Signal()
65 self
.sprval
= Signal(64)
67 # type MmuToDcacheType is record
72 # addr : std_ulogic_vector(63 downto 0);
73 # pte : std_ulogic_vector(63 downto 0);
75 class MmuToDcacheType(RecordObject
):
82 self
.addr
= Signal(64)
85 # type DcacheToMmuType is record
89 # data : std_ulogic_vector(63 downto 0);
91 class DcacheToMmuType(RecordObject
):
97 self
.data
= Signal(64)
100 # type MmuToIcacheType is record
101 # tlbld : std_ulogic;
102 # tlbie : std_ulogic;
103 # doall : std_ulogic;
104 # addr : std_ulogic_vector(63 downto 0);
105 # pte : std_ulogic_vector(63 downto 0);
107 class MmuToIcacheType(RecordObject
):
109 self
.tlbld
= Signal()
110 self
.tlbie
= Signal()
111 self
.doall
= Signal()
112 self
.addr
= Signal(64)
113 self
.pte
= Signal(64)
114 # end from common.vhdl
122 # -- Supports 4-level trees as in arch 3.0B, but not the
123 # -- two-step translation
124 # -- for guests under a hypervisor (i.e. there is no gRA -> hRA translation).
139 # architecture behave of mmu is
154 # type reg_stage_t is record
155 # -- latched request from loadstore1
156 # valid : std_ulogic;
157 # iside : std_ulogic;
158 # store : std_ulogic;
160 # addr : std_ulogic_vector(63 downto 0);
161 # inval_all : std_ulogic;
163 # prtbl : std_ulogic_vector(63 downto 0);
164 # pid : std_ulogic_vector(31 downto 0);
169 # pgtbl0 : std_ulogic_vector(63 downto 0);
170 # pt0_valid : std_ulogic;
171 # pgtbl3 : std_ulogic_vector(63 downto 0);
172 # pt3_valid : std_ulogic;
173 # shift : unsigned(5 downto 0);
174 # mask_size : unsigned(4 downto 0);
175 # pgbase : std_ulogic_vector(55 downto 0);
176 # pde : std_ulogic_vector(63 downto 0);
177 # invalid : std_ulogic;
178 # badtree : std_ulogic;
179 # segerror : std_ulogic;
180 # perm_err : std_ulogic;
181 # rc_error : std_ulogic;
185 class RegStage(RecordObject
):
186 def __init__(self
, name
=None):
187 super().__init
__(self
, name
=name
)
188 # latched request from loadstore1
189 self
.valid
= Signal(reset_less
=True)
190 self
.iside
= Signal(reset_less
=True)
191 self
.store
= Signal(reset_less
=True)
192 self
.priv
= Signal(reset_less
=True)
193 self
.addr
= Signal(64, reset_less
=True)
194 self
.inval_all
= Signal(reset_less
=True)
196 self
.prtbl
= Signal(64, reset_less
=True)
197 self
.pid
= Signal(32, reset_less
=True)
199 self
.state
= State
.IDLE
200 self
.done
= Signal(reset_less
=True)
201 self
.err
= Signal(reset_less
=True)
202 self
.pgtbl0
= Signal(64, reset_less
=True)
203 self
.pt0_valid
= Signal(reset_less
=True)
204 self
.pgtbl3
= Signal(64, reset_less
=True)
205 self
.pt3_valid
= Signal(reset_less
=True)
206 self
.shift
= Signal(6, reset_less
=True)
207 self
.mask_size
= Signal(5, reset_less
=True)
208 self
.pgbase
= Signal(56, reset_less
=True)
209 self
.pde
= Signal(64, reset_less
=True)
210 self
.invalid
= Signal(reset_less
=True)
211 self
.badtree
= Signal(reset_less
=True)
212 self
.segerror
= Signal(reset_less
=True)
213 self
.perm_err
= Signal(reset_less
=True)
214 self
.rc_error
= Signal(reset_less
=True)
218 # Supports 4-level trees as in arch 3.0B, but not the
219 # two-step translation for guests under a hypervisor
220 # (i.e. there is no gRA -> hRA translation).
221 class MMU(Elaboratable
):
224 # clk : in std_ulogic;
225 # rst : in std_ulogic;
227 # l_in : in Loadstore1ToMmuType;
228 # l_out : out MmuToLoadstore1Type;
230 # d_out : out MmuToDcacheType;
231 # d_in : in DcacheToMmuType;
233 # i_out : out MmuToIcacheType
237 self
.l_in
= LoadStore1ToMmuType()
238 self
.l_out
= MmuToLoadStore1Type()
239 self
.d_out
= MmuToDcacheType()
240 self
.d_in
= DcacheToMmuType()
241 self
.i_out
= MmuToIcacheType()
243 # signal addrsh : std_ulogic_vector(15 downto 0);
244 # signal mask : std_ulogic_vector(15 downto 0);
245 # signal finalmask : std_ulogic_vector(43 downto 0);
246 self
.addrsh
= Signal(16)
247 self
.mask
= Signal(16)
248 self
.finalmask
= Signal(44)
250 # signal r, rin : reg_stage_t;
252 self
.rin
= RegStage()
255 def elaborate(self
, platform
):
256 # -- Multiplex internal SPR values back to loadstore1,
257 # -- selected by l_in.sprn.
258 # Multiplex internal SPR values back to loadstore1,
259 # selected by l_in.sprn.
273 finalmask
= self
.finalmask
278 # l_out.sprval <= r.prtbl when l_in.sprn(9) = '1'
279 with m
.If(l_in
.sprn
[9]):
280 comb
+= l_out
.sprval
.eq(r
.prtbl
)
282 # else x"00000000" & r.pid;
284 comb
+= l_out
.sprval
.eq(Cat(r
.pid
,
285 Const(0x00000000, 32))
287 # if rin.valid = '1' then
288 # report "MMU got tlb miss for "
289 # & to_hstring(rin.addr);
291 with m
.If(rin
.valid
):
292 print(f
"MMU got tlb miss for {rin.addr}")
294 # if l_out.done = '1' then
295 # report "MMU completing op without error";
297 with m
.If(l_out
.done
):
298 print("MMU completing op without error")
300 # if l_out.err = '1' then
301 # report "MMU completing op with err invalid=" &
302 # std_ulogic'image(l_out.invalid) &
303 # " badtree=" & std_ulogic'image(
306 with m
.If(l_out
.err
):
307 print(f
"MMU completing op with err invalid=
308 {l_out.invalid} badtree={l_out.badtree}")
310 # if rin.state = RADIX_LOOKUP then
311 # report "radix lookup shift=" & integer'image(
312 # to_integer(rin.shift)) & " msize=" &
313 # integer'image(to_integer(rin.mask_size));
315 with m
.If(rin
.state
== State
.RADIX_LOOKUP
):
316 print(f
"radix lookup shift={rin.shift}
317 msize={rin.mask_size}")
319 # if r.state = RADIX_LOOKUP then
320 # report "send load addr=" & to_hstring(d_out.addr)
321 # & " addrsh=" & to_hstring(addrsh) &
322 # " mask=" & to_hstring(mask);
324 with m
.If(r
.state
== State
.RADIX_LOOKUP
):
325 print(f
"send load addr={d_out.addr}
326 addrsh={addrsh} mask={mask}")
332 # -- generate mask for extracting address fields for PTE address
334 # addrmaskgen: process(all)
335 # generate mask for extracting address fields for PTE address
337 class AddrMaskGen(Elaboratable
, MMU
):
339 # variable m : std_ulogic_vector(15 downto 0);
341 self
.msk
= Signal(16)
344 def elaborate(self
, platform
):
357 # -- mask_count has to be >= 5
359 # mask_count has to be >= 5
360 comb
+= mask
.eq(Const(0x001F, 16)
362 # for i in 5 to 15 loop
363 for i
in range(5,16):
364 # if i < to_integer(r.mask_size) then
365 with m
.If(i
< r
.mask_size
):
374 # -- generate mask for extracting address bits to go in
375 # -- TLB entry in order to support pages > 4kB
376 # finalmaskgen: process(all)
377 # generate mask for extracting address bits to go in
378 # TLB entry in order to support pages > 4kB
379 class FinalMaskGen(Elaboratable
, MMU
):
381 # variable m : std_ulogic_vector(43 downto 0);
383 self
.msk
= Signal(44)
386 def elaborate(self
, platform
):
399 # for i in 0 to 43 loop
401 # if i < to_integer(r.shift) then
402 with m
.If(i
< r
.shift
):
408 comb
+= self
.finalmask(mask
)
411 # mmu_1: process(all)
412 class MMU1(Elaboratable
):
414 # variable v : reg_stage_t;
415 # variable dcreq : std_ulogic;
416 # variable tlb_load : std_ulogic;
417 # variable itlb_load : std_ulogic;
418 # variable tlbie_req : std_ulogic;
419 # variable prtbl_rd : std_ulogic;
420 # variable pt_valid : std_ulogic;
421 # variable effpid : std_ulogic_vector(31 downto 0);
422 # variable prtable_addr : std_ulogic_vector(63 downto 0);
423 # variable rts : unsigned(5 downto 0);
424 # variable mbits : unsigned(5 downto 0);
425 # variable pgtable_addr : std_ulogic_vector(63 downto 0);
426 # variable pte : std_ulogic_vector(63 downto 0);
427 # variable tlb_data : std_ulogic_vector(63 downto 0);
428 # variable nonzero : std_ulogic;
429 # variable pgtbl : std_ulogic_vector(63 downto 0);
430 # variable perm_ok : std_ulogic;
431 # variable rc_ok : std_ulogic;
432 # variable addr : std_ulogic_vector(63 downto 0);
433 # variable data : std_ulogic_vector(63 downto 0);
436 self
.tlb_load
= Signal()
437 self
.itlb_load
= Signal()
438 self
.tlbie_req
= Signal()
439 self
.prtbl_rd
= Signal()
440 self
.pt_valid
= Signal()
441 self
.effpid
= Signal(32)
442 self
.prtable_addr
= Signal(64)
444 self
.mbits
= Signal(6)
445 self
.pgtable_addr
= Signal(64)
446 self
.pte
= Signal(64)
447 self
.tlb_data
= Signal(64)
448 self
.nonzero
= Signal()
449 self
.pgtbl
= Signal(64)
450 self
.perm_ok
= Signal()
451 self
.rc_ok
= Signal()
452 self
.addr
= Signal(64)
453 self
.data
= Signal(64)
456 def elaborate(self
, platform
):
473 tlb_load
= self
.tlb_load
474 itlb_load
= self
.itlb_load
475 tlbie_req
= self
.tlbie_req
476 prtbl_rd
= self
.prtbl_rd
477 pt_valid
= self
.pt_valid
479 prtable_addr
= self
.prtable_addr
482 pgtable_addr
= self
.pgtable_addr
484 tlb_data
= self
.tlb_data
485 nonzero
= self
.nonzero
487 perm_ok
= self
.perm_ok
505 # v.inval_all := '0';
509 comb
+= v
.valid
.eq(0)
513 comb
+= v
.invalid
.eq(0)
514 comb
+= v
.badtree
.eq(0)
515 comb
+= v
.segerror
.eq(0)
516 comb
+= v
.perm_err
.eq(0)
517 comb
+= v
.rc_error
.eq(0)
518 comb
+= tlb_load
.eq(0)
519 comb
+= itlb_load
.eq(0)
520 comb
+= tlbie_req
.eq(0)
521 comb
+= v
.inval_all
.eq(0)
522 comb
+= prtbl_rd
.eq(0)
525 # -- Radix tree data structures in memory are
526 # -- big-endian, so we need to byte-swap them
527 # for i in 0 to 7 loop
528 # Radix tree data structures in memory are
529 # big-endian, so we need to byte-swap them
531 # data(i * 8 + 7 downto i * 8) := d_in.data(
532 # (7 - i) * 8 + 7 downto (7 - i) * 8);
533 comb
+= data
[i
* 8:i
* 8 + 7 + 1].eq(
535 (7 - i
) * 8:(7 - i
) * 8 + 7 + 1
540 with m
.Switch(r
.state
):
542 with m
.Case(State
.IDLE
):
543 # if l_in.addr(63) = '0' then
545 # pt_valid := r.pt0_valid;
546 with m
.If(~l_in
.addr
[63]):
547 comb
+= pgtbl
.eq(r
.pgtbl0
)
548 comb
+= pt_valid
.eq(r
.pt0_valid
)
551 # pt_valid := r.pt3_valid;
553 comb
+= pgtbl
.eq(r
.pt3_valid
)
554 comb
+= pt_valid
.eq(r
.pt3_valid
)
557 # -- rts == radix tree size, # address bits being
559 # rts := unsigned('0' & pgtbl(62 downto 61) &
560 # pgtbl(7 downto 5));
561 # rts == radix tree size, number of address bits
571 # -- mbits == # address bits to index top level
573 # mbits := unsigned('0' & pgtbl(4 downto 0));
574 # mbits == number of address bits to index top
577 Cat(pgtbl
[0:5], Const(0b0, 1))
579 # -- set v.shift to rts so that we can use finalmask
580 # -- for the segment check
582 # v.mask_size := mbits(4 downto 0);
583 # v.pgbase := pgtbl(55 downto 8) & x"00";
584 # set v.shift to rts so that we can use finalmask
585 # for the segment check
586 comb
+= v
.shift
.eq(rts
)
587 comb
+= v
.mask_size
.eq(mbits
[0:5])
588 comb
+= v
.pgbase
.eq(Cat(
593 # if l_in.valid = '1' then
594 with m
.If(l_in
.valid
):
595 # v.addr := l_in.addr;
596 # v.iside := l_in.iside;
597 # v.store := not (l_in.load or l_in.iside);
598 # v.priv := l_in.priv;
599 comb
+= v
.addr
.eq(l_in
.addr
600 comb
+= v
.iside
.eq(l_in
.iside
)
601 comb
+= v
.store
.eq(~
(l_in
.load | l_in
.siside
))
602 # if l_in.tlbie = '1' then
603 with m
.If(l_in
.tlbie
):
604 # -- Invalidate all iTLB/dTLB entries for
605 # -- tlbie with RB[IS] != 0 or RB[AP] != 0,
607 # v.inval_all := l_in.slbia or l_in.addr(11)
608 # or l_in.addr(10) or
609 # l_in.addr(7) or l_in.addr(6)
611 # Invalidate all iTLB/dTLB entries for
612 # tlbie with RB[IS] != 0 or RB[AP] != 0,
614 comb
+= v
.inval_all
.eq(l_in
.slbia
621 # -- The RIC field of the tlbie instruction
622 # -- comes across on the sprn bus as bits 2--3.
623 # -- RIC=2 flushes process table caches.
624 # if l_in.sprn(3) = '1' then
625 # The RIC field of the tlbie instruction
626 # comes across on the sprn bus as bits 2--3.
627 # RIC=2 flushes process table caches.
628 with m
.If(l_in
.sprn
[3]):
629 # v.pt0_valid := '0';
630 # v.pt3_valid := '0';
631 comb
+= v
.pt0_valid
.eq(0)
632 comb
+= v
.pt3_valid
.eq(0)
634 # v.state := DO_TLBIE;
635 comb
+= v
.state
.eq(State
.DO_TLBIE
)
639 comb
+= v
.valid
.eq(1)
640 # if pt_valid = '0' then
641 with m
.If(~pt_valid
):
642 # -- need to fetch process table entry
643 # -- set v.shift so we can use finalmask
644 # -- for generating the process table
646 # v.shift := unsigned('0' & r.prtbl(
648 # v.state := PROC_TBL_READ;
649 # need to fetch process table entry
650 # set v.shift so we can use finalmask
651 # for generating the process table
653 comb
+= v
.shift
.eq((Cat(
657 comb
+= v
.state
.eq(State
.PROC_TBL_READ
)
659 # elsif mbits = 0 then
661 # -- Use RPDS = 0 to disable radix
663 # v.state := RADIX_FINISH;
665 # Use RPDS = 0 to disable radix
667 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
668 comb
+= v
.invalid
.eq(1)
671 # v.state := SEGMENT_CHECK;
672 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
677 # if l_in.mtspr = '1' then
678 with m
.If(l_in
.mtspr
):
679 # -- Move to PID needs to invalidate L1 TLBs
680 # -- and cached pgtbl0 value. Move to PRTBL
681 # -- does that plus invalidating the cached
682 # -- pgtbl3 value as well.
683 # if l_in.sprn(9) = '0' then
684 # Move to PID needs to invalidate L1 TLBs
685 # and cached pgtbl0 value. Move to PRTBL
686 # does that plus invalidating the cached
687 # pgtbl3 value as well.
688 with m
.If(~l_in
.sprn
[9]):
689 # v.pid := l_in.rs(31 downto 0);
690 comb
+= v
.pid
.eq(l_in
.rs
[0:32])
693 # v.prtbl := l_in.rs;
694 # v.pt3_valid := '0';
695 comb
+= v
.prtbl
.eq(l_in
.rs
)
696 comb
+= v
.pt3_valid
.eq(0)
699 # v.pt0_valid := '0';
700 # v.inval_all := '1';
701 # v.state := DO_TLBIE;
702 comb
+= v
.pt0_valid
.eq(0)
703 comb
+= v
.inval_all
.eq(1)
704 comb
+= v
.state
.eq(State
.DO_TLBIE
)
708 with m
.Case(State
.DO_TLBIE
):
711 # v.state := TLB_WAIT;
713 comb
+= tlbie_req
.eq(1)
714 comb
+= v
.state
.eq(State
.TLB_WAIT
)
717 with m
.Case(State
.TLB_WAIT
):
718 # if d_in.done = '1' then
719 with m
.If(d_in
.done
):
720 # v.state := RADIX_FINISH;
721 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
724 # when PROC_TBL_READ =>
725 with m
.Case(State
.PROC_TBL_READ
):
728 # v.state := PROC_TBL_WAIT;
730 comb
+= prtbl_rd
.eq(1)
731 comb
+= v
.state
.eq(State
.PROC_TBL_WAIT
)
733 # when PROC_TBL_WAIT =>
734 with m
.Case(State
.PROC_TBL_WAIT
):
735 # if d_in.done = '1' then
736 with m
.If(d_in
.done
):
737 # if r.addr(63) = '1' then
738 with m
.If(r
.addr
[63]):
740 # v.pt3_valid := '1';
741 comb
+= v
.pgtbl3
.eq(data
)
742 comb
+= v
.pt3_valid
.eq(1)
746 # v.pt0_valid := '1';
747 comb
+= v
.pgtbl0
.eq(data
)
748 comb
+= v
.pt0_valid
.eq(1)
750 # -- rts == radix tree size, # address bits
751 # -- being translated
752 # rts := unsigned('0' & data(62 downto 61) &
754 # rts == radix tree size, # address bits
766 # -- mbits == # address bits to index
767 # -- top level of tree
768 # mbits := unsigned('0' & data(4 downto 0));
769 # mbits == # address bits to index
777 # -- set v.shift to rts so that we can use
778 # -- finalmask for the segment check
780 # v.mask_size := mbits(4 downto 0);
781 # v.pgbase := data(55 downto 8) & x"00";
782 # set v.shift to rts so that we can use
783 # finalmask for the segment check
784 comb
+= v
.shift
.eq(rts
)
785 comb
+= v
.mask_size
.eq(mbits
[0:5])
795 # v.state := RADIX_FINISH;
797 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
798 comb
+= v
.invalid
.eq(1)
800 # v.state := SEGMENT_CHECK;
801 comb
+= v
.state
.eq(State
.SEGMENT_CHECK
)
805 # if d_in.err = '1' then
807 # v.state := RADIX_FINISH;
809 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
810 comb
+= v
.badtree
.eq(1)
813 # when SEGMENT_CHECK =>
814 with m
.Case(State
.SEGMENT_CHECK
):
815 # mbits := '0' & r.mask_size;
816 # v.shift := r.shift + (31 - 12) - mbits;
817 # nonzero := or(r.addr(61 downto 31) and
818 # not finalmask(30 downto 0));
825 comb
+= v
.shift
.eq(r
.shift
+ (31 -12) - mbits
)
827 r
.addr
[31:62] & ~finalmask
[0:31]
829 # if r.addr(63) /= r.addr(62) or nonzero = '1' then
830 # v.state := RADIX_FINISH;
832 with m
.If((r
.addr
[63] != r
.addr
[62])
834 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
835 comb
+= v
.segerror
.eq(1)
836 # elsif mbits < 5 or mbits > 16 or mbits
837 # > (r.shift + (31 - 12)) then
838 # v.state := RADIX_FINISH;
840 with m
.If((mbits
< 5) |
(mbits
> 16)
841 |
(mbits
> (r
.shift
+ (31-12)))):
842 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
843 comb
+= v
.badtree
.eq(1)
845 # v.state := RADIX_LOOKUP;
847 comb
+= v
.state
.eq(State
.RADIX_LOOKUP
)
850 # when RADIX_LOOKUP =>
851 with m
.Case(State
.RADIX_LOOKUP
):
853 # v.state := RADIX_READ_WAIT;
855 comb
+= v
.state
.eq(State
.RADIX_READ_WAIT
)
857 # when RADIX_READ_WAIT =>
858 with m
.Case(State
.RADIX_READ_WAIT
):
859 # if d_in.done = '1' then
860 with m
.If(d_in
.done
):
862 comb
+= v
.pde
.eq(data
)
864 # if data(63) = '1' then
868 # if data(62) = '1' then
871 # -- check permissions and RC bits
873 comb
+= perm_ok
.eq(0)
874 # if r.priv = '1' or data(3) = '0' then
875 with m
.If(r
.priv | ~data
[3])):
876 # if r.iside = '0' then
877 # perm_ok := data(1) or (data(2)
886 # -- no IAMR, so no KUEP support
887 # -- for now deny execute
888 # -- permission if cache inhibited
890 # data(0) and not data(5);
891 # no IAMR, so no KUEP support
892 # for now deny execute
893 # permission if cache inhibited
900 # rc_ok := data(8) and (data(7) or
904 (data
[7] |
(~r
.store
))
906 # if perm_ok = '1' and rc_ok = '1' then
907 # v.state := RADIX_LOAD_TLB;
908 with m
.If(perm_ok
& rc_ok
):
914 # v.state := RADIX_FINISH;
915 # v.perm_err := not perm_ok;
916 # -- permission error takes precedence
918 # v.rc_error := perm_ok;
922 comb
+= v
.perm_err
.eq(~perm_ok
)
923 # permission error takes precedence
925 comb
+= v
.rc_error
.eq(perm_ok
)
929 # mbits := unsigned('0' &
937 # if mbits < 5 or mbits > 16 or
938 # mbits > r.shift then
939 # v.state := RADIX_FINISH;
941 with m
.If((mbits
< 5) |
(mbits
> 16) |
946 comb
+= v
.badtree
.eq(1)
949 # v.shift := v.shift - mbits;
950 # v.mask_size := mbits(4 downto 0);
951 # v.pgbase := data(55 downto 8)
953 # v.state := RADIX_LOOKUP;
954 comb
+= v
.shift
.eq(v
.shif
- mbits
)
955 comb
+= v
.mask_size
.eq(mbits
[0:5])
969 # -- non-present PTE, generate a DSI
970 # v.state := RADIX_FINISH;
972 # non-present PTE, generate a DSI
973 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
974 comb
+= v
.invalid
.eq(1)
978 # if d_in.err = '1' then
980 # v.state := RADIX_FINISH;
982 comb
+= v
.state
.eq(State
.RADIX_FINISH
)
983 comb
+= v
.badtree
.eq(1)
986 # when RADIX_LOAD_TLB =>
987 with m
.Case(State
.RADIX_LOAD_TLB
):
989 comb
+= tlb_load
.eq(1)
990 # if r.iside = '0' then
993 # v.state := TLB_WAIT;
995 comb
+= v
.state
.eq(State
.TLB_WAIT
)
1000 comb
+= itlb_load
.eq(1)
1001 comb
+= v
.state
.eq(State
.IDLE
)
1004 # when RADIX_FINISH =>
1006 with m
.Case(State
.RADIX_FINISH
):
1008 comb
+= v
.state
.eq(State
.IDLE
)
1011 # if v.state = RADIX_FINISH or (v.state = RADIX_LOAD_TLB
1012 # and r.iside = '1') then
1013 with m
.If(v
.state
== State
.RADIX_FINISH
1014 |
(v
.state
== State
.RADIX_LOAD_TLB
& r
.iside
)
1016 # v.err := v.invalid or v.badtree or v.segerror
1017 # or v.perm_err or v.rc_error;
1018 # v.done := not v.err;
1019 comb
+= v
.err
.eq(v
.invalid | v
.badtree | v
.segerror
1020 | v
.perm_err | v
.rc_error
)
1021 comb
+= v
.done
.eq(~v
.err
)
1024 # if r.addr(63) = '1' then
1025 with m
.If(r
.addr
[63]):
1026 # effpid := x"00000000";
1027 comb
+= effpid
.eq(Const(0x00000000, 32))
1031 comb
+= effpid
.eq(r
.pid
)
1033 # prtable_addr := x"00" & r.prtbl(55 downto 36) &
1034 # ((r.prtbl(35 downto 12) and not finalmask(
1035 # 23 downto 0)) or (effpid(31 downto 8) and
1036 # finalmask(23 downto 0))) & effpid(7 downto 0)
1038 comb
+= prtable_addr
.eq(
1042 Cat(Const(0b0000, 4), effpid
[0:8]),
1044 (r
.prtble
[12:36] & ~finalmask
[0:24])
1045 | effpid
[8:32] & finalmask
[0:24]
1054 # pgtable_addr := x"00" & r.pgbase(55 downto 19) &
1055 # ((r.pgbase(18 downto 3) and not mask) or
1056 # (addrsh and mask)) & "000";
1057 comb
+= pgtable_addr
.eq(
1062 (r
.pgbase
[3:19] & ~mask
)
1070 # pte := x"00" & ((r.pde(55 downto 12) and not finalmask) or
1071 # (r.addr(55 downto 12) and finalmask)) & r.pde(11 downto 0);
1077 (r
.pde
[12:56] & ~finalmask
)
1078 |
(r
.addr
[12:56] & finalmask
)
1085 # -- update registers
1091 # if tlbie_req = '1' then
1093 with m
.If(tlbie_req
):
1095 # tlb_data := (others => '0');
1096 comb
+= addr
.eq(r
.addr
)
1097 # elsif tlb_load = '1' then
1098 with m
.If(tlb_load
):
1099 # addr := r.addr(63 downto 12) & x"000";
1101 comb
+= addr
.eq(Cat(Const(0x000, 12), r
.addr
[12:64]))
1102 # elsif prtbl_rd = '1' then
1103 with m
.If(prtbl_rd
):
1104 # addr := prtable_addr;
1105 # tlb_data := (others => '0');
1106 comb
+= addr
.eq(prtable_addr
)
1109 # addr := pgtable_addr;
1110 # tlb_data := (others => '0');
1111 comb
+= addr
.eq(pgtable_addr
)
1114 # l_out.done <= r.done;
1115 # l_out.err <= r.err;
1116 # l_out.invalid <= r.invalid;
1117 # l_out.badtree <= r.badtree;
1118 # l_out.segerr <= r.segerror;
1119 # l_out.perm_error <= r.perm_err;
1120 # l_out.rc_error <= r.rc_error;
1121 comb
+= l_out
.done
.eq(r
.done
)
1122 comb
+= l_out
.err
.eq(r
.err
)
1123 comb
+= l_out
.invalid
.eq(r
.invalid
)
1124 comb
+= l_out
.badtree
.eq(r
.badtree
)
1125 comb
+= l_out
.segerr
.eq(r
.segerror
)
1126 comb
+= l_out
.perm_error
.eq(r
.perm_err
)
1127 comb
+= l_out
.rc_error
.eq(r
.rc_error
)
1129 # d_out.valid <= dcreq;
1130 # d_out.tlbie <= tlbie_req;
1131 # d_out.doall <= r.inval_all;
1132 # d_out.tlbld <= tlb_load;
1133 # d_out.addr <= addr;
1134 # d_out.pte <= tlb_data;
1135 comb
+= d_out
.valid
.eq(dcreq
)
1136 comb
+= d_out
.tlbie
.eq(tlbie_req
)
1137 comb
+= d_out
.doall
.eq(r
.inval_all
)
1138 comb
+= d_out
.tlbld
.eeq(tlb_load
)
1139 comb
+= d_out
.addr
.eq(addr
)
1140 comb
+= d_out
.pte
.eq(tlb_data
)
1142 # i_out.tlbld <= itlb_load;
1143 # i_out.tlbie <= tlbie_req;
1144 # i_out.doall <= r.inval_all;
1145 # i_out.addr <= addr;
1146 # i_out.pte <= tlb_data;
1147 comb
+= i_out
.tlbld
.eq(itlb_load
)
1148 comb
+= i_out
.tblie
.eq(tlbie_req
)
1149 comb
+= i_out
.doall
.eq(r
.inval_all
)
1150 comb
+= i_out
.addr
.eq(addr
)
1151 comb
+= i_out
.pte
.eq(tlb_data
)
1158 yield wp
.waddr
.eq(1)
1159 yield wp
.data_i
.eq(2)
1164 yield rp
.raddr
.eq(1)
1166 data
= yield rp
.data_o
1171 yield wp
.waddr
.eq(5)
1172 yield rp
.raddr
.eq(5)
1175 yield wp
.data_i
.eq(6)
1177 data
= yield rp
.data_o
1184 data
= yield rp
.data_o
1188 data
= yield rp
.data_o
1193 rp
= dut
.read_port()
1194 wp
= dut
.write_port()
1195 vl
= rtlil
.convert(dut
, ports
=dut
.ports())
1196 with
open("test_mmu.il", "w") as f
:
1199 run_simulation(dut
, mmu_sim(), vcd_name
='test_mmu.vcd')
1201 if __name__
== '__main__':