Update README
[riscv-isa-sim.git] / riscv / mmu.cc
1 // See LICENSE for license details.
2
3 #include "mmu.h"
4 #include "simif.h"
5 #include "processor.h"
6
7 mmu_t::mmu_t(simif_t* sim, processor_t* proc)
8 : sim(sim), proc(proc),
9 check_triggers_fetch(false),
10 check_triggers_load(false),
11 check_triggers_store(false),
12 matched_trigger(NULL)
13 {
14 flush_tlb();
15 yield_load_reservation();
16 }
17
18 mmu_t::~mmu_t()
19 {
20 }
21
22 void mmu_t::flush_icache()
23 {
24 for (size_t i = 0; i < ICACHE_ENTRIES; i++)
25 icache[i].tag = -1;
26 }
27
28 void mmu_t::flush_tlb()
29 {
30 memset(tlb_insn_tag, -1, sizeof(tlb_insn_tag));
31 memset(tlb_load_tag, -1, sizeof(tlb_load_tag));
32 memset(tlb_store_tag, -1, sizeof(tlb_store_tag));
33
34 flush_icache();
35 }
36
37 reg_t mmu_t::translate(reg_t addr, access_type type)
38 {
39 if (!proc)
40 return addr;
41
42 reg_t mode = proc->state.prv;
43 if (type != FETCH) {
44 if (!proc->state.dcsr.cause && get_field(proc->state.mstatus, MSTATUS_MPRV))
45 mode = get_field(proc->state.mstatus, MSTATUS_MPP);
46 }
47
48 return walk(addr, type, mode) | (addr & (PGSIZE-1));
49 }
50
51 tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr)
52 {
53 reg_t paddr = translate(vaddr, FETCH);
54
55 if (auto host_addr = sim->addr_to_mem(paddr)) {
56 return refill_tlb(vaddr, paddr, host_addr, FETCH);
57 } else {
58 if (!sim->mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp))
59 throw trap_instruction_access_fault(vaddr);
60 tlb_entry_t entry = {(char*)&fetch_temp - vaddr, paddr - vaddr};
61 return entry;
62 }
63 }
64
65 reg_t reg_from_bytes(size_t len, const uint8_t* bytes)
66 {
67 switch (len) {
68 case 1:
69 return bytes[0];
70 case 2:
71 return bytes[0] |
72 (((reg_t) bytes[1]) << 8);
73 case 4:
74 return bytes[0] |
75 (((reg_t) bytes[1]) << 8) |
76 (((reg_t) bytes[2]) << 16) |
77 (((reg_t) bytes[3]) << 24);
78 case 8:
79 return bytes[0] |
80 (((reg_t) bytes[1]) << 8) |
81 (((reg_t) bytes[2]) << 16) |
82 (((reg_t) bytes[3]) << 24) |
83 (((reg_t) bytes[4]) << 32) |
84 (((reg_t) bytes[5]) << 40) |
85 (((reg_t) bytes[6]) << 48) |
86 (((reg_t) bytes[7]) << 56);
87 }
88 abort();
89 }
90
91 void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes)
92 {
93 reg_t paddr = translate(addr, LOAD);
94
95 if (auto host_addr = sim->addr_to_mem(paddr)) {
96 memcpy(bytes, host_addr, len);
97 if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD))
98 tracer.trace(paddr, len, LOAD);
99 else
100 refill_tlb(addr, paddr, host_addr, LOAD);
101 } else if (!sim->mmio_load(paddr, len, bytes)) {
102 throw trap_load_access_fault(addr);
103 }
104
105 if (!matched_trigger) {
106 reg_t data = reg_from_bytes(len, bytes);
107 matched_trigger = trigger_exception(OPERATION_LOAD, addr, data);
108 if (matched_trigger)
109 throw *matched_trigger;
110 }
111 }
112
113 void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes)
114 {
115 reg_t paddr = translate(addr, STORE);
116
117 if (!matched_trigger) {
118 reg_t data = reg_from_bytes(len, bytes);
119 matched_trigger = trigger_exception(OPERATION_STORE, addr, data);
120 if (matched_trigger)
121 throw *matched_trigger;
122 }
123
124 if (auto host_addr = sim->addr_to_mem(paddr)) {
125 memcpy(host_addr, bytes, len);
126 if (tracer.interested_in_range(paddr, paddr + PGSIZE, STORE))
127 tracer.trace(paddr, len, STORE);
128 else
129 refill_tlb(addr, paddr, host_addr, STORE);
130 } else if (!sim->mmio_store(paddr, len, bytes)) {
131 throw trap_store_access_fault(addr);
132 }
133 }
134
135 tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type)
136 {
137 reg_t idx = (vaddr >> PGSHIFT) % TLB_ENTRIES;
138 reg_t expected_tag = vaddr >> PGSHIFT;
139
140 if ((tlb_load_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag)
141 tlb_load_tag[idx] = -1;
142 if ((tlb_store_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag)
143 tlb_store_tag[idx] = -1;
144 if ((tlb_insn_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag)
145 tlb_insn_tag[idx] = -1;
146
147 if ((check_triggers_fetch && type == FETCH) ||
148 (check_triggers_load && type == LOAD) ||
149 (check_triggers_store && type == STORE))
150 expected_tag |= TLB_CHECK_TRIGGERS;
151
152 if (type == FETCH) tlb_insn_tag[idx] = expected_tag;
153 else if (type == STORE) tlb_store_tag[idx] = expected_tag;
154 else tlb_load_tag[idx] = expected_tag;
155
156 tlb_entry_t entry = {host_addr - vaddr, paddr - vaddr};
157 tlb_data[idx] = entry;
158 return entry;
159 }
160
161 reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode)
162 {
163 vm_info vm = decode_vm_info(proc->max_xlen, mode, proc->get_state()->satp);
164 if (vm.levels == 0)
165 return addr & ((reg_t(2) << (proc->xlen-1))-1); // zero-extend from xlen
166
167 bool s_mode = mode == PRV_S;
168 bool sum = get_field(proc->state.mstatus, MSTATUS_SUM);
169 bool mxr = get_field(proc->state.mstatus, MSTATUS_MXR);
170
171 // verify bits xlen-1:va_bits-1 are all equal
172 int va_bits = PGSHIFT + vm.levels * vm.idxbits;
173 reg_t mask = (reg_t(1) << (proc->xlen - (va_bits-1))) - 1;
174 reg_t masked_msbs = (addr >> (va_bits-1)) & mask;
175 if (masked_msbs != 0 && masked_msbs != mask)
176 vm.levels = 0;
177
178 reg_t base = vm.ptbase;
179 for (int i = vm.levels - 1; i >= 0; i--) {
180 int ptshift = i * vm.idxbits;
181 reg_t idx = (addr >> (PGSHIFT + ptshift)) & ((1 << vm.idxbits) - 1);
182
183 // check that physical address of PTE is legal
184 auto ppte = sim->addr_to_mem(base + idx * vm.ptesize);
185 if (!ppte)
186 goto fail_access;
187
188 reg_t pte = vm.ptesize == 4 ? *(uint32_t*)ppte : *(uint64_t*)ppte;
189 reg_t ppn = pte >> PTE_PPN_SHIFT;
190
191 if (PTE_TABLE(pte)) { // next level of page table
192 base = ppn << PGSHIFT;
193 } else if ((pte & PTE_U) ? s_mode && (type == FETCH || !sum) : !s_mode) {
194 break;
195 } else if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) {
196 break;
197 } else if (type == FETCH ? !(pte & PTE_X) :
198 type == LOAD ? !(pte & PTE_R) && !(mxr && (pte & PTE_X)) :
199 !((pte & PTE_R) && (pte & PTE_W))) {
200 break;
201 } else if ((ppn & ((reg_t(1) << ptshift) - 1)) != 0) {
202 break;
203 } else {
204 reg_t ad = PTE_A | ((type == STORE) * PTE_D);
205 #ifdef RISCV_ENABLE_DIRTY
206 // set accessed and possibly dirty bits.
207 *(uint32_t*)ppte |= ad;
208 #else
209 // take exception if access or possibly dirty bit is not set.
210 if ((pte & ad) != ad)
211 break;
212 #endif
213 // for superpage mappings, make a fake leaf PTE for the TLB's benefit.
214 reg_t vpn = addr >> PGSHIFT;
215 reg_t value = (ppn | (vpn & ((reg_t(1) << ptshift) - 1))) << PGSHIFT;
216 return value;
217 }
218 }
219
220 fail:
221 switch (type) {
222 case FETCH: throw trap_instruction_page_fault(addr);
223 case LOAD: throw trap_load_page_fault(addr);
224 case STORE: throw trap_store_page_fault(addr);
225 default: abort();
226 }
227
228 fail_access:
229 switch (type) {
230 case FETCH: throw trap_instruction_access_fault(addr);
231 case LOAD: throw trap_load_access_fault(addr);
232 case STORE: throw trap_store_access_fault(addr);
233 default: abort();
234 }
235 }
236
237 void mmu_t::register_memtracer(memtracer_t* t)
238 {
239 flush_tlb();
240 tracer.hook(t);
241 }