Put simif_t declaration in its own file. (#209)
[riscv-isa-sim.git] / riscv / mmu.cc
1 // See LICENSE for license details.
2
3 #include "mmu.h"
4 #include "simif.h"
5 #include "processor.h"
6
7 mmu_t::mmu_t(simif_t* sim, processor_t* proc)
8 : sim(sim), proc(proc),
9 check_triggers_fetch(false),
10 check_triggers_load(false),
11 check_triggers_store(false),
12 matched_trigger(NULL)
13 {
14 flush_tlb();
15 }
16
17 mmu_t::~mmu_t()
18 {
19 }
20
21 void mmu_t::flush_icache()
22 {
23 for (size_t i = 0; i < ICACHE_ENTRIES; i++)
24 icache[i].tag = -1;
25 }
26
27 void mmu_t::flush_tlb()
28 {
29 memset(tlb_insn_tag, -1, sizeof(tlb_insn_tag));
30 memset(tlb_load_tag, -1, sizeof(tlb_load_tag));
31 memset(tlb_store_tag, -1, sizeof(tlb_store_tag));
32
33 flush_icache();
34 }
35
36 reg_t mmu_t::translate(reg_t addr, access_type type)
37 {
38 if (!proc)
39 return addr;
40
41 reg_t mode = proc->state.prv;
42 if (type != FETCH) {
43 if (!proc->state.dcsr.cause && get_field(proc->state.mstatus, MSTATUS_MPRV))
44 mode = get_field(proc->state.mstatus, MSTATUS_MPP);
45 }
46
47 return walk(addr, type, mode) | (addr & (PGSIZE-1));
48 }
49
50 tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr)
51 {
52 reg_t paddr = translate(vaddr, FETCH);
53
54 if (auto host_addr = sim->addr_to_mem(paddr)) {
55 return refill_tlb(vaddr, paddr, host_addr, FETCH);
56 } else {
57 if (!sim->mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp))
58 throw trap_instruction_access_fault(vaddr);
59 tlb_entry_t entry = {(char*)&fetch_temp - vaddr, paddr - vaddr};
60 return entry;
61 }
62 }
63
64 reg_t reg_from_bytes(size_t len, const uint8_t* bytes)
65 {
66 switch (len) {
67 case 1:
68 return bytes[0];
69 case 2:
70 return bytes[0] |
71 (((reg_t) bytes[1]) << 8);
72 case 4:
73 return bytes[0] |
74 (((reg_t) bytes[1]) << 8) |
75 (((reg_t) bytes[2]) << 16) |
76 (((reg_t) bytes[3]) << 24);
77 case 8:
78 return bytes[0] |
79 (((reg_t) bytes[1]) << 8) |
80 (((reg_t) bytes[2]) << 16) |
81 (((reg_t) bytes[3]) << 24) |
82 (((reg_t) bytes[4]) << 32) |
83 (((reg_t) bytes[5]) << 40) |
84 (((reg_t) bytes[6]) << 48) |
85 (((reg_t) bytes[7]) << 56);
86 }
87 abort();
88 }
89
90 void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes)
91 {
92 reg_t paddr = translate(addr, LOAD);
93
94 if (auto host_addr = sim->addr_to_mem(paddr)) {
95 memcpy(bytes, host_addr, len);
96 if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD))
97 tracer.trace(paddr, len, LOAD);
98 else
99 refill_tlb(addr, paddr, host_addr, LOAD);
100 } else if (!sim->mmio_load(paddr, len, bytes)) {
101 throw trap_load_access_fault(addr);
102 }
103
104 if (!matched_trigger) {
105 reg_t data = reg_from_bytes(len, bytes);
106 matched_trigger = trigger_exception(OPERATION_LOAD, addr, data);
107 if (matched_trigger)
108 throw *matched_trigger;
109 }
110 }
111
112 void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes)
113 {
114 reg_t paddr = translate(addr, STORE);
115
116 if (!matched_trigger) {
117 reg_t data = reg_from_bytes(len, bytes);
118 matched_trigger = trigger_exception(OPERATION_STORE, addr, data);
119 if (matched_trigger)
120 throw *matched_trigger;
121 }
122
123 if (auto host_addr = sim->addr_to_mem(paddr)) {
124 memcpy(host_addr, bytes, len);
125 if (tracer.interested_in_range(paddr, paddr + PGSIZE, STORE))
126 tracer.trace(paddr, len, STORE);
127 else
128 refill_tlb(addr, paddr, host_addr, STORE);
129 } else if (!sim->mmio_store(paddr, len, bytes)) {
130 throw trap_store_access_fault(addr);
131 }
132 }
133
134 tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type)
135 {
136 reg_t idx = (vaddr >> PGSHIFT) % TLB_ENTRIES;
137 reg_t expected_tag = vaddr >> PGSHIFT;
138
139 if ((tlb_load_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag)
140 tlb_load_tag[idx] = -1;
141 if ((tlb_store_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag)
142 tlb_store_tag[idx] = -1;
143 if ((tlb_insn_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag)
144 tlb_insn_tag[idx] = -1;
145
146 if ((check_triggers_fetch && type == FETCH) ||
147 (check_triggers_load && type == LOAD) ||
148 (check_triggers_store && type == STORE))
149 expected_tag |= TLB_CHECK_TRIGGERS;
150
151 if (type == FETCH) tlb_insn_tag[idx] = expected_tag;
152 else if (type == STORE) tlb_store_tag[idx] = expected_tag;
153 else tlb_load_tag[idx] = expected_tag;
154
155 tlb_entry_t entry = {host_addr - vaddr, paddr - vaddr};
156 tlb_data[idx] = entry;
157 return entry;
158 }
159
160 reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode)
161 {
162 vm_info vm = decode_vm_info(proc->max_xlen, mode, proc->get_state()->satp);
163 if (vm.levels == 0)
164 return addr & ((reg_t(2) << (proc->xlen-1))-1); // zero-extend from xlen
165
166 bool s_mode = mode == PRV_S;
167 bool sum = get_field(proc->state.mstatus, MSTATUS_SUM);
168 bool mxr = get_field(proc->state.mstatus, MSTATUS_MXR);
169
170 // verify bits xlen-1:va_bits-1 are all equal
171 int va_bits = PGSHIFT + vm.levels * vm.idxbits;
172 reg_t mask = (reg_t(1) << (proc->xlen - (va_bits-1))) - 1;
173 reg_t masked_msbs = (addr >> (va_bits-1)) & mask;
174 if (masked_msbs != 0 && masked_msbs != mask)
175 vm.levels = 0;
176
177 reg_t base = vm.ptbase;
178 for (int i = vm.levels - 1; i >= 0; i--) {
179 int ptshift = i * vm.idxbits;
180 reg_t idx = (addr >> (PGSHIFT + ptshift)) & ((1 << vm.idxbits) - 1);
181
182 // check that physical address of PTE is legal
183 auto ppte = sim->addr_to_mem(base + idx * vm.ptesize);
184 if (!ppte)
185 goto fail_access;
186
187 reg_t pte = vm.ptesize == 4 ? *(uint32_t*)ppte : *(uint64_t*)ppte;
188 reg_t ppn = pte >> PTE_PPN_SHIFT;
189
190 if (PTE_TABLE(pte)) { // next level of page table
191 base = ppn << PGSHIFT;
192 } else if ((pte & PTE_U) ? s_mode && (type == FETCH || !sum) : !s_mode) {
193 break;
194 } else if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) {
195 break;
196 } else if (type == FETCH ? !(pte & PTE_X) :
197 type == LOAD ? !(pte & PTE_R) && !(mxr && (pte & PTE_X)) :
198 !((pte & PTE_R) && (pte & PTE_W))) {
199 break;
200 } else if ((ppn & ((reg_t(1) << ptshift) - 1)) != 0) {
201 break;
202 } else {
203 reg_t ad = PTE_A | ((type == STORE) * PTE_D);
204 #ifdef RISCV_ENABLE_DIRTY
205 // set accessed and possibly dirty bits.
206 *(uint32_t*)ppte |= ad;
207 #else
208 // take exception if access or possibly dirty bit is not set.
209 if ((pte & ad) != ad)
210 break;
211 #endif
212 // for superpage mappings, make a fake leaf PTE for the TLB's benefit.
213 reg_t vpn = addr >> PGSHIFT;
214 reg_t value = (ppn | (vpn & ((reg_t(1) << ptshift) - 1))) << PGSHIFT;
215 return value;
216 }
217 }
218
219 fail:
220 switch (type) {
221 case FETCH: throw trap_instruction_page_fault(addr);
222 case LOAD: throw trap_load_page_fault(addr);
223 case STORE: throw trap_store_page_fault(addr);
224 default: abort();
225 }
226
227 fail_access:
228 switch (type) {
229 case FETCH: throw trap_instruction_access_fault(addr);
230 case LOAD: throw trap_load_access_fault(addr);
231 case STORE: throw trap_store_access_fault(addr);
232 default: abort();
233 }
234 }
235
236 void mmu_t::register_memtracer(memtracer_t* t)
237 {
238 flush_tlb();
239 tracer.hook(t);
240 }