14 // virtual memory configuration
16 const reg_t LEVELS
= sizeof(pte_t
) == sizeof(uint64_t) ? 3 : 2;
17 const reg_t PGSHIFT
= 13;
18 const reg_t PGSIZE
= 1 << PGSHIFT
;
19 const reg_t PTIDXBITS
= PGSHIFT
- (sizeof(pte_t
) == 8 ? 3 : 2);
20 const reg_t VPN_BITS
= PTIDXBITS
* LEVELS
;
21 const reg_t PPN_BITS
= 8*sizeof(reg_t
) - PGSHIFT
;
22 const reg_t VA_BITS
= VPN_BITS
+ PGSHIFT
;
24 // page table entry (PTE) fields
25 #define PTE_T 0x001 // Entry is a page Table descriptor
26 #define PTE_E 0x002 // Entry is a page table Entry
27 #define PTE_R 0x004 // Referenced
28 #define PTE_D 0x008 // Dirty
29 #define PTE_UX 0x010 // User eXecute permission
30 #define PTE_UW 0x020 // User Read permission
31 #define PTE_UR 0x040 // User Write permission
32 #define PTE_SX 0x080 // Supervisor eXecute permission
33 #define PTE_SW 0x100 // Supervisor Read permission
34 #define PTE_SR 0x200 // Supervisor Write permission
35 #define PTE_PERM (PTE_SR | PTE_SW | PTE_SX | PTE_UR | PTE_UW | PTE_UX)
36 #define PTE_PPN_SHIFT 13 // LSB of physical page number in the PTE
38 // this class implements a processor's port into the virtual memory system.
39 // an MMU and instruction cache are maintained for simulator performance.
43 mmu_t(char* _mem
, size_t _memsz
);
46 // template for functions that load an aligned value from memory
47 #define load_func(type) \
48 type##_t load_##type(reg_t addr) { \
49 if(unlikely(addr % sizeof(type##_t))) \
52 throw trap_load_address_misaligned; \
54 reg_t paddr = translate(addr, sizeof(type##_t), false, false); \
55 return *(type##_t*)(mem + paddr); \
58 // load value from memory at aligned address; zero extend to register width
64 // load value from memory at aligned address; sign extend to register width
70 // template for functions that store an aligned value to memory
71 #define store_func(type) \
72 void store_##type(reg_t addr, type##_t val) { \
73 if(unlikely(addr % sizeof(type##_t))) \
76 throw trap_store_address_misaligned; \
78 reg_t paddr = translate(addr, sizeof(type##_t), true, false); \
79 *(type##_t*)(mem + paddr) = val; \
82 // store value to memory at aligned address
94 // load instruction from memory at aligned address.
95 // (needed because instruction alignment requirement is variable
96 // if RVC is supported)
97 // returns the instruction at the specified address, given the current
98 // RVC mode. func is set to a pointer to a function that knows how to
99 // execute the returned instruction.
100 inline insn_fetch_t
load_insn(reg_t addr
, bool rvc
)
102 #ifdef RISCV_ENABLE_RVC
103 if(addr
% 4 == 2 && rvc
) // fetch across word boundary
105 reg_t addr_lo
= translate(addr
, 2, false, true);
107 fetch
.insn
.bits
= *(uint16_t*)(mem
+ addr_lo
);
108 size_t dispatch_idx
= fetch
.insn
.bits
% processor_t::DISPATCH_TABLE_SIZE
;
109 fetch
.func
= processor_t::dispatch_table
[dispatch_idx
];
111 if(!INSN_IS_RVC(fetch
.insn
.bits
))
113 reg_t addr_hi
= translate(addr
+2, 2, false, true);
114 fetch
.insn
.bits
|= (uint32_t)*(uint16_t*)(mem
+ addr_hi
) << 16;
121 reg_t idx
= (addr
/sizeof(insn_t
)) % ICACHE_ENTRIES
;
123 if (unlikely(icache_tag
[idx
] != addr
))
125 reg_t paddr
= translate(addr
, sizeof(insn_t
), false, true);
126 fetch
.insn
= *(insn_t
*)(mem
+ paddr
);
127 size_t dispatch_idx
= fetch
.insn
.bits
% processor_t::DISPATCH_TABLE_SIZE
;
128 fetch
.func
= processor_t::dispatch_table
[dispatch_idx
];
130 reg_t idx
= (paddr
/sizeof(insn_t
)) % ICACHE_ENTRIES
;
131 icache_tag
[idx
] = addr
;
132 icache_data
[idx
] = fetch
.insn
;
133 icache_func
[idx
] = fetch
.func
;
135 if (tracer
.interested_in_range(paddr
, paddr
+ sizeof(insn_t
), false, true))
137 icache_tag
[idx
] = -1;
138 tracer
.trace(paddr
, sizeof(insn_t
), false, true);
141 fetch
.insn
= icache_data
[idx
];;
142 fetch
.func
= icache_func
[idx
];
147 // get the virtual address that caused a fault
148 reg_t
get_badvaddr() { return badvaddr
; }
150 // get/set the page table base register
151 reg_t
get_ptbr() { return ptbr
; }
152 void set_ptbr(reg_t addr
) { ptbr
= addr
& ~(PGSIZE
-1); flush_tlb(); }
154 // keep the MMU in sync with processor mode
155 void set_supervisor(bool sup
) { supervisor
= sup
; }
156 void set_vm_enabled(bool en
) { vm_enabled
= en
; }
158 // flush the TLB and instruction cache
162 void register_memtracer(memtracer_t
*);
171 memtracer_list_t tracer
;
173 // implement a TLB for simulator performance
174 static const reg_t TLB_ENTRIES
= 256;
175 reg_t tlb_data
[TLB_ENTRIES
];
176 reg_t tlb_insn_tag
[TLB_ENTRIES
];
177 reg_t tlb_load_tag
[TLB_ENTRIES
];
178 reg_t tlb_store_tag
[TLB_ENTRIES
];
180 // implement an instruction cache for simulator performance
181 static const reg_t ICACHE_ENTRIES
= 256;
182 insn_t icache_data
[ICACHE_ENTRIES
];
183 insn_func_t icache_func
[ICACHE_ENTRIES
];
184 reg_t icache_tag
[ICACHE_ENTRIES
];
186 // finish translation on a TLB miss and upate the TLB
187 reg_t
refill_tlb(reg_t addr
, reg_t bytes
, bool store
, bool fetch
);
189 // perform a page table walk for a given virtual address
190 pte_t
walk(reg_t addr
);
192 // translate a virtual address to a physical address
193 reg_t
translate(reg_t addr
, reg_t bytes
, bool store
, bool fetch
)
195 reg_t idx
= (addr
>> PGSHIFT
) % TLB_ENTRIES
;
197 reg_t
* tlb_tag
= fetch
? tlb_insn_tag
: store
? tlb_store_tag
:tlb_load_tag
;
198 reg_t expected_tag
= addr
& ~(PGSIZE
-1);
199 if(likely(tlb_tag
[idx
] == expected_tag
))
200 return ((uintptr_t)addr
& (PGSIZE
-1)) + tlb_data
[idx
];
202 return refill_tlb(addr
, bytes
, store
, fetch
);
205 friend class processor_t
;