14 // virtual memory configuration
16 const reg_t LEVELS
= sizeof(pte_t
) == sizeof(uint64_t) ? 3 : 2;
17 const reg_t PGSHIFT
= 13;
18 const reg_t PGSIZE
= 1 << PGSHIFT
;
19 const reg_t PTIDXBITS
= PGSHIFT
- (sizeof(pte_t
) == 8 ? 3 : 2);
20 const reg_t VPN_BITS
= PTIDXBITS
* LEVELS
;
21 const reg_t PPN_BITS
= 8*sizeof(reg_t
) - PGSHIFT
;
22 const reg_t VA_BITS
= VPN_BITS
+ PGSHIFT
;
24 // page table entry (PTE) fields
25 #define PTE_T 0x001 // Entry is a page Table descriptor
26 #define PTE_E 0x002 // Entry is a page table Entry
27 #define PTE_R 0x004 // Referenced
28 #define PTE_D 0x008 // Dirty
29 #define PTE_UX 0x010 // User eXecute permission
30 #define PTE_UW 0x020 // User Read permission
31 #define PTE_UR 0x040 // User Write permission
32 #define PTE_SX 0x080 // Supervisor eXecute permission
33 #define PTE_SW 0x100 // Supervisor Read permission
34 #define PTE_SR 0x200 // Supervisor Write permission
35 #define PTE_PERM (PTE_SR | PTE_SW | PTE_SX | PTE_UR | PTE_UW | PTE_UX)
36 #define PTE_PPN_SHIFT 13 // LSB of physical page number in the PTE
38 // this class implements a processor's port into the virtual memory system.
39 // an MMU and instruction cache are maintained for simulator performance.
43 mmu_t(char* _mem
, size_t _memsz
);
46 // template for functions that load an aligned value from memory
47 #define load_func(type) \
48 type##_t load_##type(reg_t addr) { \
49 if(unlikely(addr % sizeof(type##_t))) \
52 throw trap_load_address_misaligned; \
54 void* paddr = translate(addr, sizeof(type##_t), false, false); \
55 return *(type##_t*)paddr; \
58 // load value from memory at aligned address; zero extend to register width
64 // load value from memory at aligned address; sign extend to register width
70 // template for functions that store an aligned value to memory
71 #define store_func(type) \
72 void store_##type(reg_t addr, type##_t val) { \
73 if(unlikely(addr % sizeof(type##_t))) \
76 throw trap_store_address_misaligned; \
78 void* paddr = translate(addr, sizeof(type##_t), true, false); \
79 *(type##_t*)paddr = val; \
82 // store value to memory at aligned address
88 // load instruction from memory at aligned address.
89 // (needed because instruction alignment requirement is variable
90 // if RVC is supported)
91 // returns the instruction at the specified address, given the current
92 // RVC mode. func is set to a pointer to a function that knows how to
93 // execute the returned instruction.
94 insn_t
__attribute__((always_inline
)) load_insn(reg_t addr
, bool rvc
,
99 #ifdef RISCV_ENABLE_RVC
100 if(addr
% 4 == 2 && rvc
) // fetch across word boundary
102 void* addr_lo
= translate(addr
, 2, false, true);
103 insn
.bits
= *(uint16_t*)addr_lo
;
105 *func
= processor_t::dispatch_table
106 [insn
.bits
% processor_t::DISPATCH_TABLE_SIZE
];
108 if(!INSN_IS_RVC(insn
.bits
))
110 void* addr_hi
= translate(addr
+2, 2, false, true);
111 insn
.bits
|= (uint32_t)*(uint16_t*)addr_hi
<< 16;
117 reg_t idx
= (addr
/sizeof(insn_t
)) % ICACHE_ENTRIES
;
118 insn_t data
= icache_data
[idx
];
119 *func
= icache_func
[idx
];
120 if(likely(icache_tag
[idx
] == addr
))
123 // the processor guarantees alignment based upon rvc mode
124 void* paddr
= translate(addr
, sizeof(insn_t
), false, true);
125 insn
= *(insn_t
*)paddr
;
126 *func
= processor_t::dispatch_table
127 [insn
.bits
% processor_t::DISPATCH_TABLE_SIZE
];
129 if (!tracer
.interested_in_range(addr
, addr
+ sizeof(insn_t
), false, true))
131 icache_tag
[idx
] = addr
;
132 icache_data
[idx
] = insn
;
133 icache_func
[idx
] = *func
;
140 // get the virtual address that caused a fault
141 reg_t
get_badvaddr() { return badvaddr
; }
143 // get/set the page table base register
144 reg_t
get_ptbr() { return ptbr
; }
145 void set_ptbr(reg_t addr
) { ptbr
= addr
& ~(PGSIZE
-1); flush_tlb(); }
147 // keep the MMU in sync with processor mode
148 void set_supervisor(bool sup
) { supervisor
= sup
; }
149 void set_vm_enabled(bool en
) { vm_enabled
= en
; }
151 // flush the TLB and instruction cache
155 void register_memtracer(memtracer_t
*);
164 memtracer_list_t tracer
;
166 // implement a TLB for simulator performance
167 static const reg_t TLB_ENTRIES
= 256;
168 char* tlb_data
[TLB_ENTRIES
];
169 reg_t tlb_insn_tag
[TLB_ENTRIES
];
170 reg_t tlb_load_tag
[TLB_ENTRIES
];
171 reg_t tlb_store_tag
[TLB_ENTRIES
];
173 // implement an instruction cache for simulator performance
174 static const reg_t ICACHE_ENTRIES
= 256;
175 insn_t icache_data
[ICACHE_ENTRIES
];
176 insn_func_t icache_func
[ICACHE_ENTRIES
];
177 reg_t icache_tag
[ICACHE_ENTRIES
];
179 // finish translation on a TLB miss and upate the TLB
180 void* refill(reg_t addr
, reg_t bytes
, bool store
, bool fetch
);
182 // perform a page table walk for a given virtual address
183 pte_t
walk(reg_t addr
);
185 // translate a virtual address to a physical address
186 void* translate(reg_t addr
, reg_t bytes
, bool store
, bool fetch
)
188 reg_t idx
= (addr
>> PGSHIFT
) % TLB_ENTRIES
;
190 reg_t
* tlb_tag
= fetch
? tlb_insn_tag
: store
? tlb_store_tag
:tlb_load_tag
;
191 reg_t expected_tag
= addr
& ~(PGSIZE
-1);
192 if(likely(tlb_tag
[idx
] == expected_tag
))
193 return ((uintptr_t)addr
& (PGSIZE
-1)) + tlb_data
[idx
];
195 return refill(addr
, bytes
, store
, fetch
);
198 friend class processor_t
;