Update README
[riscv-isa-sim.git] / riscv / mmu.h
1 // See LICENSE for license details.
2
3 #ifndef _RISCV_MMU_H
4 #define _RISCV_MMU_H
5
6 #include "decode.h"
7 #include "trap.h"
8 #include "common.h"
9 #include "config.h"
10 #include "simif.h"
11 #include "processor.h"
12 #include "memtracer.h"
13 #include <stdlib.h>
14 #include <vector>
15
16 // virtual memory configuration
17 #define PGSHIFT 12
18 const reg_t PGSIZE = 1 << PGSHIFT;
19 const reg_t PGMASK = ~(PGSIZE-1);
20
21 struct insn_fetch_t
22 {
23 insn_func_t func;
24 insn_t insn;
25 };
26
27 struct icache_entry_t {
28 reg_t tag;
29 struct icache_entry_t* next;
30 insn_fetch_t data;
31 };
32
33 struct tlb_entry_t {
34 char* host_offset;
35 reg_t target_offset;
36 };
37
38 class trigger_matched_t
39 {
40 public:
41 trigger_matched_t(int index,
42 trigger_operation_t operation, reg_t address, reg_t data) :
43 index(index), operation(operation), address(address), data(data) {}
44
45 int index;
46 trigger_operation_t operation;
47 reg_t address;
48 reg_t data;
49 };
50
51 // this class implements a processor's port into the virtual memory system.
52 // an MMU and instruction cache are maintained for simulator performance.
53 class mmu_t
54 {
55 public:
56 mmu_t(simif_t* sim, processor_t* proc);
57 ~mmu_t();
58
59 inline reg_t misaligned_load(reg_t addr, size_t size)
60 {
61 #ifdef RISCV_ENABLE_MISALIGNED
62 reg_t res = 0;
63 for (size_t i = 0; i < size; i++)
64 res += (reg_t)load_uint8(addr + i) << (i * 8);
65 return res;
66 #else
67 throw trap_load_address_misaligned(addr);
68 #endif
69 }
70
71 inline void misaligned_store(reg_t addr, reg_t data, size_t size)
72 {
73 #ifdef RISCV_ENABLE_MISALIGNED
74 for (size_t i = 0; i < size; i++)
75 store_uint8(addr + i, data >> (i * 8));
76 #else
77 throw trap_store_address_misaligned(addr);
78 #endif
79 }
80
81 // template for functions that load an aligned value from memory
82 #define load_func(type) \
83 inline type##_t load_##type(reg_t addr) { \
84 if (unlikely(addr & (sizeof(type##_t)-1))) \
85 return misaligned_load(addr, sizeof(type##_t)); \
86 reg_t vpn = addr >> PGSHIFT; \
87 if (likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) \
88 return *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); \
89 if (unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
90 type##_t data = *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); \
91 if (!matched_trigger) { \
92 matched_trigger = trigger_exception(OPERATION_LOAD, addr, data); \
93 if (matched_trigger) \
94 throw *matched_trigger; \
95 } \
96 return data; \
97 } \
98 type##_t res; \
99 load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res); \
100 return res; \
101 }
102
103 // load value from memory at aligned address; zero extend to register width
104 load_func(uint8)
105 load_func(uint16)
106 load_func(uint32)
107 load_func(uint64)
108
109 // load value from memory at aligned address; sign extend to register width
110 load_func(int8)
111 load_func(int16)
112 load_func(int32)
113 load_func(int64)
114
115 // template for functions that store an aligned value to memory
116 #define store_func(type) \
117 void store_##type(reg_t addr, type##_t val) { \
118 if (unlikely(addr & (sizeof(type##_t)-1))) \
119 return misaligned_store(addr, val, sizeof(type##_t)); \
120 reg_t vpn = addr >> PGSHIFT; \
121 if (likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) \
122 *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = val; \
123 else if (unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
124 if (!matched_trigger) { \
125 matched_trigger = trigger_exception(OPERATION_STORE, addr, val); \
126 if (matched_trigger) \
127 throw *matched_trigger; \
128 } \
129 *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = val; \
130 } \
131 else \
132 store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&val); \
133 }
134
135 // template for functions that perform an atomic memory operation
136 #define amo_func(type) \
137 template<typename op> \
138 type##_t amo_##type(reg_t addr, op f) { \
139 if (addr & (sizeof(type##_t)-1)) \
140 throw trap_store_address_misaligned(addr); \
141 try { \
142 auto lhs = load_##type(addr); \
143 store_##type(addr, f(lhs)); \
144 return lhs; \
145 } catch (trap_load_page_fault& t) { \
146 /* AMO faults should be reported as store faults */ \
147 throw trap_store_page_fault(t.get_tval()); \
148 } catch (trap_load_access_fault& t) { \
149 /* AMO faults should be reported as store faults */ \
150 throw trap_store_access_fault(t.get_tval()); \
151 } \
152 }
153
154 void store_float128(reg_t addr, float128_t val)
155 {
156 #ifndef RISCV_ENABLE_MISALIGNED
157 if (unlikely(addr & (sizeof(float128_t)-1)))
158 throw trap_store_address_misaligned(addr);
159 #endif
160 store_uint64(addr, val.v[0]);
161 store_uint64(addr + 8, val.v[1]);
162 }
163
164 float128_t load_float128(reg_t addr)
165 {
166 #ifndef RISCV_ENABLE_MISALIGNED
167 if (unlikely(addr & (sizeof(float128_t)-1)))
168 throw trap_load_address_misaligned(addr);
169 #endif
170 return (float128_t){load_uint64(addr), load_uint64(addr + 8)};
171 }
172
173 // store value to memory at aligned address
174 store_func(uint8)
175 store_func(uint16)
176 store_func(uint32)
177 store_func(uint64)
178
179 // perform an atomic memory operation at an aligned address
180 amo_func(uint32)
181 amo_func(uint64)
182
183 inline void yield_load_reservation()
184 {
185 load_reservation_address = (reg_t)-1;
186 }
187
188 inline void acquire_load_reservation(reg_t vaddr)
189 {
190 reg_t paddr = translate(vaddr, LOAD);
191 if (auto host_addr = sim->addr_to_mem(paddr))
192 load_reservation_address = refill_tlb(vaddr, paddr, host_addr, LOAD).target_offset + vaddr;
193 else
194 throw trap_load_access_fault(vaddr); // disallow LR to I/O space
195 }
196
197 inline bool check_load_reservation(reg_t vaddr)
198 {
199 reg_t paddr = translate(vaddr, STORE);
200 if (auto host_addr = sim->addr_to_mem(paddr))
201 return load_reservation_address == refill_tlb(vaddr, paddr, host_addr, STORE).target_offset + vaddr;
202 else
203 throw trap_store_access_fault(vaddr); // disallow SC to I/O space
204 }
205
206 static const reg_t ICACHE_ENTRIES = 1024;
207
208 inline size_t icache_index(reg_t addr)
209 {
210 return (addr / PC_ALIGN) % ICACHE_ENTRIES;
211 }
212
213 inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry)
214 {
215 auto tlb_entry = translate_insn_addr(addr);
216 insn_bits_t insn = *(uint16_t*)(tlb_entry.host_offset + addr);
217 int length = insn_length(insn);
218
219 if (likely(length == 4)) {
220 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr_to_host(addr + 2) << 16;
221 } else if (length == 2) {
222 insn = (int16_t)insn;
223 } else if (length == 6) {
224 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr_to_host(addr + 4) << 32;
225 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr_to_host(addr + 2) << 16;
226 } else {
227 static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
228 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr_to_host(addr + 6) << 48;
229 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr_to_host(addr + 4) << 32;
230 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr_to_host(addr + 2) << 16;
231 }
232
233 insn_fetch_t fetch = {proc->decode_insn(insn), insn};
234 entry->tag = addr;
235 entry->next = &icache[icache_index(addr + length)];
236 entry->data = fetch;
237
238 reg_t paddr = tlb_entry.target_offset + addr;;
239 if (tracer.interested_in_range(paddr, paddr + 1, FETCH)) {
240 entry->tag = -1;
241 tracer.trace(paddr, length, FETCH);
242 }
243 return entry;
244 }
245
246 inline icache_entry_t* access_icache(reg_t addr)
247 {
248 icache_entry_t* entry = &icache[icache_index(addr)];
249 if (likely(entry->tag == addr))
250 return entry;
251 return refill_icache(addr, entry);
252 }
253
254 inline insn_fetch_t load_insn(reg_t addr)
255 {
256 icache_entry_t entry;
257 return refill_icache(addr, &entry)->data;
258 }
259
260 void flush_tlb();
261 void flush_icache();
262
263 void register_memtracer(memtracer_t*);
264
265 int is_dirty_enabled()
266 {
267 #ifdef RISCV_ENABLE_DIRTY
268 return 1;
269 #else
270 return 0;
271 #endif
272 }
273
274 int is_misaligned_enabled()
275 {
276 #ifdef RISCV_ENABLE_MISALIGNED
277 return 1;
278 #else
279 return 0;
280 #endif
281 }
282
283 private:
284 simif_t* sim;
285 processor_t* proc;
286 memtracer_list_t tracer;
287 reg_t load_reservation_address;
288 uint16_t fetch_temp;
289
290 // implement an instruction cache for simulator performance
291 icache_entry_t icache[ICACHE_ENTRIES];
292
293 // implement a TLB for simulator performance
294 static const reg_t TLB_ENTRIES = 256;
295 // If a TLB tag has TLB_CHECK_TRIGGERS set, then the MMU must check for a
296 // trigger match before completing an access.
297 static const reg_t TLB_CHECK_TRIGGERS = reg_t(1) << 63;
298 tlb_entry_t tlb_data[TLB_ENTRIES];
299 reg_t tlb_insn_tag[TLB_ENTRIES];
300 reg_t tlb_load_tag[TLB_ENTRIES];
301 reg_t tlb_store_tag[TLB_ENTRIES];
302
303 // finish translation on a TLB miss and update the TLB
304 tlb_entry_t refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type);
305 const char* fill_from_mmio(reg_t vaddr, reg_t paddr);
306
307 // perform a page table walk for a given VA; set referenced/dirty bits
308 reg_t walk(reg_t addr, access_type type, reg_t prv);
309
310 // handle uncommon cases: TLB misses, page faults, MMIO
311 tlb_entry_t fetch_slow_path(reg_t addr);
312 void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes);
313 void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes);
314 reg_t translate(reg_t addr, access_type type);
315
316 // ITLB lookup
317 inline tlb_entry_t translate_insn_addr(reg_t addr) {
318 reg_t vpn = addr >> PGSHIFT;
319 if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn))
320 return tlb_data[vpn % TLB_ENTRIES];
321 tlb_entry_t result;
322 if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) {
323 result = fetch_slow_path(addr);
324 } else {
325 result = tlb_data[vpn % TLB_ENTRIES];
326 }
327 if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) {
328 uint16_t* ptr = (uint16_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr);
329 int match = proc->trigger_match(OPERATION_EXECUTE, addr, *ptr);
330 if (match >= 0) {
331 throw trigger_matched_t(match, OPERATION_EXECUTE, addr, *ptr);
332 }
333 }
334 return result;
335 }
336
337 inline const uint16_t* translate_insn_addr_to_host(reg_t addr) {
338 return (uint16_t*)(translate_insn_addr(addr).host_offset + addr);
339 }
340
341 inline trigger_matched_t *trigger_exception(trigger_operation_t operation,
342 reg_t address, reg_t data)
343 {
344 if (!proc) {
345 return NULL;
346 }
347 int match = proc->trigger_match(operation, address, data);
348 if (match == -1)
349 return NULL;
350 if (proc->state.mcontrol[match].timing == 0) {
351 throw trigger_matched_t(match, operation, address, data);
352 }
353 return new trigger_matched_t(match, operation, address, data);
354 }
355
356 bool check_triggers_fetch;
357 bool check_triggers_load;
358 bool check_triggers_store;
359 // The exception describing a matched trigger, or NULL.
360 trigger_matched_t *matched_trigger;
361
362 friend class processor_t;
363 };
364
365 struct vm_info {
366 int levels;
367 int idxbits;
368 int ptesize;
369 reg_t ptbase;
370 };
371
372 inline vm_info decode_vm_info(int xlen, reg_t prv, reg_t satp)
373 {
374 if (prv == PRV_M) {
375 return {0, 0, 0, 0};
376 } else if (prv <= PRV_S && xlen == 32) {
377 switch (get_field(satp, SATP32_MODE)) {
378 case SATP_MODE_OFF: return {0, 0, 0, 0};
379 case SATP_MODE_SV32: return {2, 10, 4, (satp & SATP32_PPN) << PGSHIFT};
380 default: abort();
381 }
382 } else if (prv <= PRV_S && xlen == 64) {
383 switch (get_field(satp, SATP64_MODE)) {
384 case SATP_MODE_OFF: return {0, 0, 0, 0};
385 case SATP_MODE_SV39: return {3, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
386 case SATP_MODE_SV48: return {4, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
387 case SATP_MODE_SV57: return {5, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
388 case SATP_MODE_SV64: return {6, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
389 default: abort();
390 }
391 } else {
392 abort();
393 }
394 }
395
396 #endif