Move much closer to new platform-M memory map
[riscv-isa-sim.git] / riscv / mmu.h
1 // See LICENSE for license details.
2
3 #ifndef _RISCV_MMU_H
4 #define _RISCV_MMU_H
5
6 #include "decode.h"
7 #include "trap.h"
8 #include "common.h"
9 #include "config.h"
10 #include "sim.h"
11 #include "processor.h"
12 #include "memtracer.h"
13 #include <stdlib.h>
14 #include <vector>
15
16 // virtual memory configuration
17 #define PGSHIFT 12
18 const reg_t PGSIZE = 1 << PGSHIFT;
19
20 struct insn_fetch_t
21 {
22 insn_func_t func;
23 insn_t insn;
24 };
25
26 struct icache_entry_t {
27 reg_t tag;
28 reg_t pad;
29 insn_fetch_t data;
30 };
31
32 // this class implements a processor's port into the virtual memory system.
33 // an MMU and instruction cache are maintained for simulator performance.
34 class mmu_t
35 {
36 public:
37 mmu_t(sim_t* sim, processor_t* proc);
38 ~mmu_t();
39
40 // template for functions that load an aligned value from memory
41 #define load_func(type) \
42 type##_t load_##type(reg_t addr) __attribute__((always_inline)) { \
43 if (addr & (sizeof(type##_t)-1)) \
44 throw trap_load_address_misaligned(addr); \
45 reg_t vpn = addr >> PGSHIFT; \
46 if (likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) \
47 return *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
48 type##_t res; \
49 load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res); \
50 return res; \
51 }
52
53 // load value from memory at aligned address; zero extend to register width
54 load_func(uint8)
55 load_func(uint16)
56 load_func(uint32)
57 load_func(uint64)
58
59 // load value from memory at aligned address; sign extend to register width
60 load_func(int8)
61 load_func(int16)
62 load_func(int32)
63 load_func(int64)
64
65 // template for functions that store an aligned value to memory
66 #define store_func(type) \
67 void store_##type(reg_t addr, type##_t val) { \
68 if (addr & (sizeof(type##_t)-1)) \
69 throw trap_store_address_misaligned(addr); \
70 reg_t vpn = addr >> PGSHIFT; \
71 if (likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) \
72 *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
73 else \
74 store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&val); \
75 }
76
77 // store value to memory at aligned address
78 store_func(uint8)
79 store_func(uint16)
80 store_func(uint32)
81 store_func(uint64)
82
83 static const reg_t ICACHE_ENTRIES = 1024;
84
85 inline size_t icache_index(reg_t addr)
86 {
87 return (addr / PC_ALIGN) % ICACHE_ENTRIES;
88 }
89
90 inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry)
91 {
92 const uint16_t* iaddr = translate_insn_addr(addr);
93 insn_bits_t insn = *iaddr;
94 int length = insn_length(insn);
95
96 if (likely(length == 4)) {
97 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 2) << 16;
98 } else if (length == 2) {
99 insn = (int16_t)insn;
100 } else if (length == 6) {
101 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 4) << 32;
102 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
103 } else {
104 static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
105 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 6) << 48;
106 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 4) << 32;
107 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
108 }
109
110 insn_fetch_t fetch = {proc->decode_insn(insn), insn};
111 entry->tag = addr;
112 entry->data = fetch;
113
114 reg_t paddr = sim->mem_to_addr((char*)iaddr);
115 if (tracer.interested_in_range(paddr, paddr + 1, FETCH)) {
116 entry->tag = -1;
117 tracer.trace(paddr, length, FETCH);
118 }
119 return entry;
120 }
121
122 inline icache_entry_t* access_icache(reg_t addr)
123 {
124 icache_entry_t* entry = &icache[icache_index(addr)];
125 if (likely(entry->tag == addr))
126 return entry;
127 return refill_icache(addr, entry);
128 }
129
130 inline insn_fetch_t load_insn(reg_t addr)
131 {
132 return access_icache(addr)->data;
133 }
134
135 void flush_tlb();
136 void flush_icache();
137
138 void register_memtracer(memtracer_t*);
139
140 private:
141 sim_t* sim;
142 processor_t* proc;
143 memtracer_list_t tracer;
144 uint16_t fetch_temp;
145
146 // implement an instruction cache for simulator performance
147 icache_entry_t icache[ICACHE_ENTRIES];
148
149 // implement a TLB for simulator performance
150 static const reg_t TLB_ENTRIES = 256;
151 char* tlb_data[TLB_ENTRIES];
152 reg_t tlb_insn_tag[TLB_ENTRIES];
153 reg_t tlb_load_tag[TLB_ENTRIES];
154 reg_t tlb_store_tag[TLB_ENTRIES];
155
156 // finish translation on a TLB miss and upate the TLB
157 void refill_tlb(reg_t vaddr, reg_t paddr, access_type type);
158
159 // perform a page table walk for a given VA; set referenced/dirty bits
160 reg_t walk(reg_t addr, access_type type, bool supervisor, bool pum);
161
162 // handle uncommon cases: TLB misses, page faults, MMIO
163 const uint16_t* fetch_slow_path(reg_t addr);
164 void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes);
165 void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes);
166 reg_t translate(reg_t addr, access_type type);
167
168 // ITLB lookup
169 const uint16_t* translate_insn_addr(reg_t addr) __attribute__((always_inline)) {
170 reg_t vpn = addr >> PGSHIFT;
171 if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn))
172 return (uint16_t*)(tlb_data[vpn % TLB_ENTRIES] + addr);
173 return fetch_slow_path(addr);
174 }
175
176 friend class processor_t;
177 };
178
179 #endif