12 // virtual memory configuration
14 const reg_t LEVELS
= sizeof(pte_t
) == sizeof(uint64_t) ? 3 : 2;
15 const reg_t PGSHIFT
= 13;
16 const reg_t PGSIZE
= 1 << PGSHIFT
;
17 const reg_t PTIDXBITS
= PGSHIFT
- (sizeof(pte_t
) == 8 ? 3 : 2);
18 const reg_t VPN_BITS
= PTIDXBITS
* LEVELS
;
19 const reg_t PPN_BITS
= 8*sizeof(reg_t
) - PGSHIFT
;
20 const reg_t VA_BITS
= VPN_BITS
+ PGSHIFT
;
22 // page table entry (PTE) fields
23 #define PTE_T 0x001 // Entry is a page Table descriptor
24 #define PTE_E 0x002 // Entry is a page table Entry
25 #define PTE_R 0x004 // Referenced
26 #define PTE_D 0x008 // Dirty
27 #define PTE_UX 0x010 // User eXecute permission
28 #define PTE_UW 0x020 // User Read permission
29 #define PTE_UR 0x040 // User Write permission
30 #define PTE_SX 0x080 // Supervisor eXecute permission
31 #define PTE_SW 0x100 // Supervisor Read permission
32 #define PTE_SR 0x200 // Supervisor Write permission
33 #define PTE_PERM (PTE_SR | PTE_SW | PTE_SX | PTE_UR | PTE_UW | PTE_UX)
34 #define PTE_PPN_SHIFT 13 // LSB of physical page number in the PTE
36 // this class implements a processor's port into the virtual memory system.
37 // an MMU and instruction cache are maintained for simulator performance.
41 mmu_t(char* _mem
, size_t _memsz
);
44 // template for functions that load an aligned value from memory
45 #define load_func(type) \
46 type##_t load_##type(reg_t addr) { \
47 if(unlikely(addr % sizeof(type##_t))) \
50 throw trap_load_address_misaligned; \
52 reg_t paddr = translate(addr, sizeof(type##_t), false, false); \
53 return *(type##_t*)(mem + paddr); \
56 // load value from memory at aligned address; zero extend to register width
62 // load value from memory at aligned address; sign extend to register width
68 // template for functions that store an aligned value to memory
69 #define store_func(type) \
70 void store_##type(reg_t addr, type##_t val) { \
71 if(unlikely(addr % sizeof(type##_t))) \
74 throw trap_store_address_misaligned; \
76 reg_t paddr = translate(addr, sizeof(type##_t), true, false); \
77 *(type##_t*)(mem + paddr) = val; \
80 // store value to memory at aligned address
92 // load instruction from memory at aligned address.
93 // (needed because instruction alignment requirement is variable
94 // if RVC is supported)
95 // returns the instruction at the specified address, given the current
96 // RVC mode. func is set to a pointer to a function that knows how to
97 // execute the returned instruction.
98 inline insn_fetch_t
load_insn(reg_t addr
, bool rvc
)
100 #ifdef RISCV_ENABLE_RVC
101 if(addr
% 4 == 2 && rvc
) // fetch across word boundary
103 reg_t addr_lo
= translate(addr
, 2, false, true);
105 fetch
.insn
.bits
= *(uint16_t*)(mem
+ addr_lo
);
106 fetch
.func
= get_insn_func(fetch
.insn
, sr
);
108 if(!INSN_IS_RVC(fetch
.insn
.bits
))
110 reg_t addr_hi
= translate(addr
+2, 2, false, true);
111 fetch
.insn
.bits
|= (uint32_t)*(uint16_t*)(mem
+ addr_hi
) << 16;
118 reg_t idx
= (addr
/sizeof(insn_t
)) % ICACHE_ENTRIES
;
120 if (unlikely(icache_tag
[idx
] != addr
))
122 reg_t paddr
= translate(addr
, sizeof(insn_t
), false, true);
123 fetch
.insn
= *(insn_t
*)(mem
+ paddr
);
124 fetch
.func
= get_insn_func(fetch
.insn
, sr
);
126 reg_t idx
= (paddr
/sizeof(insn_t
)) % ICACHE_ENTRIES
;
127 icache_tag
[idx
] = addr
;
128 icache_data
[idx
] = fetch
.insn
;
129 icache_func
[idx
] = fetch
.func
;
131 if (tracer
.interested_in_range(paddr
, paddr
+ sizeof(insn_t
), false, true))
133 icache_tag
[idx
] = -1;
134 tracer
.trace(paddr
, sizeof(insn_t
), false, true);
137 fetch
.insn
= icache_data
[idx
];;
138 fetch
.func
= icache_func
[idx
];
143 // get the virtual address that caused a fault
144 reg_t
get_badvaddr() { return badvaddr
; }
146 // get/set the page table base register
147 reg_t
get_ptbr() { return ptbr
; }
148 void set_ptbr(reg_t addr
) { ptbr
= addr
& ~(PGSIZE
-1); flush_tlb(); }
149 // keep the MMU in sync with processor mode
150 void set_sr(uint32_t _sr
) { sr
= _sr
; }
152 // flush the TLB and instruction cache
156 void register_memtracer(memtracer_t
*);
164 memtracer_list_t tracer
;
166 // implement a TLB for simulator performance
167 static const reg_t TLB_ENTRIES
= 256;
168 reg_t tlb_data
[TLB_ENTRIES
];
169 reg_t tlb_insn_tag
[TLB_ENTRIES
];
170 reg_t tlb_load_tag
[TLB_ENTRIES
];
171 reg_t tlb_store_tag
[TLB_ENTRIES
];
173 // implement an instruction cache for simulator performance
174 static const reg_t ICACHE_ENTRIES
= 256;
175 insn_t icache_data
[ICACHE_ENTRIES
];
176 insn_func_t icache_func
[ICACHE_ENTRIES
];
177 reg_t icache_tag
[ICACHE_ENTRIES
];
179 // finish translation on a TLB miss and upate the TLB
180 reg_t
refill_tlb(reg_t addr
, reg_t bytes
, bool store
, bool fetch
);
182 // perform a page table walk for a given virtual address
183 pte_t
walk(reg_t addr
);
185 // translate a virtual address to a physical address
186 reg_t
translate(reg_t addr
, reg_t bytes
, bool store
, bool fetch
)
188 reg_t idx
= (addr
>> PGSHIFT
) % TLB_ENTRIES
;
190 reg_t
* tlb_tag
= fetch
? tlb_insn_tag
: store
? tlb_store_tag
:tlb_load_tag
;
191 reg_t expected_tag
= addr
& ~(PGSIZE
-1);
192 if(likely(tlb_tag
[idx
] == expected_tag
))
193 return ((uintptr_t)addr
& (PGSIZE
-1)) + tlb_data
[idx
];
195 return refill_tlb(addr
, bytes
, store
, fetch
);
198 friend class processor_t
;