Add --enable-misaligned option for misaligned ld/st support
[riscv-isa-sim.git] / riscv / mmu.h
1 // See LICENSE for license details.
2
3 #ifndef _RISCV_MMU_H
4 #define _RISCV_MMU_H
5
6 #include "decode.h"
7 #include "trap.h"
8 #include "common.h"
9 #include "config.h"
10 #include "sim.h"
11 #include "processor.h"
12 #include "memtracer.h"
13 #include <stdlib.h>
14 #include <vector>
15
16 // virtual memory configuration
17 #define PGSHIFT 12
18 const reg_t PGSIZE = 1 << PGSHIFT;
19 const reg_t PGMASK = ~(PGSIZE-1);
20
21 struct insn_fetch_t
22 {
23 insn_func_t func;
24 insn_t insn;
25 };
26
27 struct icache_entry_t {
28 reg_t tag;
29 reg_t pad;
30 insn_fetch_t data;
31 };
32
33 class trigger_matched_t
34 {
35 public:
36 trigger_matched_t(int index,
37 trigger_operation_t operation, reg_t address, reg_t data) :
38 index(index), operation(operation), address(address), data(data) {}
39
40 int index;
41 trigger_operation_t operation;
42 reg_t address;
43 reg_t data;
44 };
45
46 // this class implements a processor's port into the virtual memory system.
47 // an MMU and instruction cache are maintained for simulator performance.
48 class mmu_t
49 {
50 public:
51 mmu_t(sim_t* sim, processor_t* proc);
52 ~mmu_t();
53
54 inline reg_t misaligned_load(reg_t addr, size_t size)
55 {
56 #ifdef RISCV_ENABLE_MISALIGNED
57 reg_t res = 0;
58 for (size_t i = 0; i < size; i++)
59 res += (reg_t)load_uint8(addr + i) << (i * 8);
60 return res;
61 #else
62 throw trap_load_address_misaligned(addr);
63 #endif
64 }
65
66 inline void misaligned_store(reg_t addr, reg_t data, size_t size)
67 {
68 #ifdef RISCV_ENABLE_MISALIGNED
69 for (size_t i = 0; i < size; i++)
70 store_uint8(addr + i, data >> (i * 8));
71 #else
72 throw trap_store_address_misaligned(addr);
73 #endif
74 }
75
76 // template for functions that load an aligned value from memory
77 #define load_func(type) \
78 inline type##_t load_##type(reg_t addr) { \
79 if (unlikely(addr & (sizeof(type##_t)-1))) \
80 return misaligned_load(addr, sizeof(type##_t)); \
81 reg_t vpn = addr >> PGSHIFT; \
82 if (likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) \
83 return *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
84 if (unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
85 type##_t data = *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
86 if (!matched_trigger) { \
87 matched_trigger = trigger_exception(OPERATION_LOAD, addr, data); \
88 if (matched_trigger) \
89 throw *matched_trigger; \
90 } \
91 return data; \
92 } \
93 type##_t res; \
94 load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res); \
95 return res; \
96 }
97
98 // load value from memory at aligned address; zero extend to register width
99 load_func(uint8)
100 load_func(uint16)
101 load_func(uint32)
102 load_func(uint64)
103
104 // load value from memory at aligned address; sign extend to register width
105 load_func(int8)
106 load_func(int16)
107 load_func(int32)
108 load_func(int64)
109
110 // template for functions that store an aligned value to memory
111 #define store_func(type) \
112 void store_##type(reg_t addr, type##_t val) { \
113 if (unlikely(addr & (sizeof(type##_t)-1))) \
114 return misaligned_store(addr, val, sizeof(type##_t)); \
115 reg_t vpn = addr >> PGSHIFT; \
116 if (likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) \
117 *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
118 else if (unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
119 if (!matched_trigger) { \
120 matched_trigger = trigger_exception(OPERATION_STORE, addr, val); \
121 if (matched_trigger) \
122 throw *matched_trigger; \
123 } \
124 *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
125 } \
126 else \
127 store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&val); \
128 }
129
130 // template for functions that perform an atomic memory operation
131 #define amo_func(type) \
132 template<typename op> \
133 type##_t amo_##type(reg_t addr, op f) { \
134 if (addr & (sizeof(type##_t)-1)) \
135 throw trap_store_address_misaligned(addr); \
136 try { \
137 auto lhs = load_##type(addr); \
138 store_##type(addr, f(lhs)); \
139 return lhs; \
140 } catch (trap_load_page_fault& t) { \
141 /* AMO faults should be reported as store faults */ \
142 throw trap_store_page_fault(t.get_badaddr()); \
143 } catch (trap_load_access_fault& t) { \
144 /* AMO faults should be reported as store faults */ \
145 throw trap_store_access_fault(t.get_badaddr()); \
146 } \
147 }
148
149 // store value to memory at aligned address
150 store_func(uint8)
151 store_func(uint16)
152 store_func(uint32)
153 store_func(uint64)
154
155 // perform an atomic memory operation at an aligned address
156 amo_func(uint32)
157 amo_func(uint64)
158
159 static const reg_t ICACHE_ENTRIES = 1024;
160
161 inline size_t icache_index(reg_t addr)
162 {
163 return (addr / PC_ALIGN) % ICACHE_ENTRIES;
164 }
165
166 inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry)
167 {
168 const uint16_t* iaddr = translate_insn_addr(addr);
169 insn_bits_t insn = *iaddr;
170 int length = insn_length(insn);
171
172 if (likely(length == 4)) {
173 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 2) << 16;
174 } else if (length == 2) {
175 insn = (int16_t)insn;
176 } else if (length == 6) {
177 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 4) << 32;
178 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
179 } else {
180 static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
181 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 6) << 48;
182 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 4) << 32;
183 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
184 }
185
186 insn_fetch_t fetch = {proc->decode_insn(insn), insn};
187 entry->tag = addr;
188 entry->data = fetch;
189
190 reg_t paddr = sim->mem_to_addr((char*)iaddr);
191 if (tracer.interested_in_range(paddr, paddr + 1, FETCH)) {
192 entry->tag = -1;
193 tracer.trace(paddr, length, FETCH);
194 }
195 return entry;
196 }
197
198 inline icache_entry_t* access_icache(reg_t addr)
199 {
200 icache_entry_t* entry = &icache[icache_index(addr)];
201 if (likely(entry->tag == addr))
202 return entry;
203 return refill_icache(addr, entry);
204 }
205
206 inline insn_fetch_t load_insn(reg_t addr)
207 {
208 icache_entry_t entry;
209 return refill_icache(addr, &entry)->data;
210 }
211
212 void flush_tlb();
213 void flush_icache();
214
215 void register_memtracer(memtracer_t*);
216
217 private:
218 sim_t* sim;
219 processor_t* proc;
220 memtracer_list_t tracer;
221 uint16_t fetch_temp;
222
223 // implement an instruction cache for simulator performance
224 icache_entry_t icache[ICACHE_ENTRIES];
225
226 // implement a TLB for simulator performance
227 static const reg_t TLB_ENTRIES = 256;
228 // If a TLB tag has TLB_CHECK_TRIGGERS set, then the MMU must check for a
229 // trigger match before completing an access.
230 static const reg_t TLB_CHECK_TRIGGERS = reg_t(1) << 63;
231 char* tlb_data[TLB_ENTRIES];
232 reg_t tlb_insn_tag[TLB_ENTRIES];
233 reg_t tlb_load_tag[TLB_ENTRIES];
234 reg_t tlb_store_tag[TLB_ENTRIES];
235
236 // finish translation on a TLB miss and update the TLB
237 void refill_tlb(reg_t vaddr, reg_t paddr, access_type type);
238 const char* fill_from_mmio(reg_t vaddr, reg_t paddr);
239
240 // perform a page table walk for a given VA; set referenced/dirty bits
241 reg_t walk(reg_t addr, access_type type, reg_t prv);
242
243 // handle uncommon cases: TLB misses, page faults, MMIO
244 const uint16_t* fetch_slow_path(reg_t addr);
245 void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes);
246 void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes);
247 reg_t translate(reg_t addr, access_type type);
248
249 // ITLB lookup
250 inline const uint16_t* translate_insn_addr(reg_t addr) {
251 reg_t vpn = addr >> PGSHIFT;
252 if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn))
253 return (uint16_t*)(tlb_data[vpn % TLB_ENTRIES] + addr);
254 if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) {
255 uint16_t* ptr = (uint16_t*)(tlb_data[vpn % TLB_ENTRIES] + addr);
256 int match = proc->trigger_match(OPERATION_EXECUTE, addr, *ptr);
257 if (match >= 0)
258 throw trigger_matched_t(match, OPERATION_EXECUTE, addr, *ptr);
259 return ptr;
260 }
261 return fetch_slow_path(addr);
262 }
263
264 inline trigger_matched_t *trigger_exception(trigger_operation_t operation,
265 reg_t address, reg_t data)
266 {
267 if (!proc) {
268 return NULL;
269 }
270 int match = proc->trigger_match(operation, address, data);
271 if (match == -1)
272 return NULL;
273 if (proc->state.mcontrol[match].timing == 0) {
274 throw trigger_matched_t(match, operation, address, data);
275 }
276 return new trigger_matched_t(match, operation, address, data);
277 }
278
279 bool check_triggers_fetch;
280 bool check_triggers_load;
281 bool check_triggers_store;
282 // The exception describing a matched trigger, or NULL.
283 trigger_matched_t *matched_trigger;
284
285 friend class processor_t;
286 };
287
288 struct vm_info {
289 int levels;
290 int idxbits;
291 int ptesize;
292 reg_t ptbase;
293 };
294
295 inline vm_info decode_vm_info(int xlen, reg_t prv, reg_t sptbr)
296 {
297 if (prv == PRV_M) {
298 return {0, 0, 0, 0};
299 } else if (prv <= PRV_S && xlen == 32) {
300 switch (get_field(sptbr, SPTBR32_MODE)) {
301 case SPTBR_MODE_OFF: return {0, 0, 0, 0};
302 case SPTBR_MODE_SV32: return {2, 10, 4, (sptbr & SPTBR32_PPN) << PGSHIFT};
303 default: abort();
304 }
305 } else if (prv <= PRV_S && xlen == 64) {
306 switch (get_field(sptbr, SPTBR64_MODE)) {
307 case SPTBR_MODE_OFF: return {0, 0, 0, 0};
308 case SPTBR_MODE_SV39: return {3, 9, 8, (sptbr & SPTBR64_PPN) << PGSHIFT};
309 case SPTBR_MODE_SV48: return {4, 9, 8, (sptbr & SPTBR64_PPN) << PGSHIFT};
310 case SPTBR_MODE_SV57: return {5, 9, 8, (sptbr & SPTBR64_PPN) << PGSHIFT};
311 case SPTBR_MODE_SV64: return {6, 9, 8, (sptbr & SPTBR64_PPN) << PGSHIFT};
312 default: abort();
313 }
314 } else {
315 abort();
316 }
317 }
318
319 #endif