0b28f2ff803be0f0eeb6ef9b6a1e3465463950da
[riscv-isa-sim.git] / riscv / mmu.cc
1 // See LICENSE for license details.
2
3 #include "mmu.h"
4 #include "sim.h"
5 #include "processor.h"
6
7 mmu_t::mmu_t(sim_t* sim, processor_t* proc)
8 : sim(sim), proc(proc),
9 check_triggers_fetch(false),
10 check_triggers_load(false),
11 check_triggers_store(false),
12 matched_trigger(NULL)
13 {
14 flush_tlb();
15 }
16
17 mmu_t::~mmu_t()
18 {
19 }
20
21 void mmu_t::flush_icache()
22 {
23 for (size_t i = 0; i < ICACHE_ENTRIES; i++)
24 icache[i].tag = -1;
25 }
26
27 void mmu_t::flush_tlb()
28 {
29 memset(tlb_insn_tag, -1, sizeof(tlb_insn_tag));
30 memset(tlb_load_tag, -1, sizeof(tlb_load_tag));
31 memset(tlb_store_tag, -1, sizeof(tlb_store_tag));
32
33 flush_icache();
34 }
35
36 reg_t mmu_t::translate(reg_t addr, access_type type)
37 {
38 if (!proc)
39 return addr;
40
41 reg_t mode = proc->state.prv;
42 if (type != FETCH) {
43 if (!proc->state.dcsr.cause && get_field(proc->state.mstatus, MSTATUS_MPRV))
44 mode = get_field(proc->state.mstatus, MSTATUS_MPP);
45 }
46
47 return walk(addr, type, mode) | (addr & (PGSIZE-1));
48 }
49
50 const uint16_t* mmu_t::fetch_slow_path(reg_t vaddr)
51 {
52 reg_t paddr = translate(vaddr, FETCH);
53
54 if (sim->addr_is_mem(paddr)) {
55 refill_tlb(vaddr, paddr, FETCH);
56 return (const uint16_t*)sim->addr_to_mem(paddr);
57 } else {
58 if (!sim->mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp))
59 throw trap_instruction_access_fault(vaddr);
60 return &fetch_temp;
61 }
62 }
63
64 reg_t reg_from_bytes(size_t len, const uint8_t* bytes)
65 {
66 switch (len) {
67 case 1:
68 return bytes[0];
69 case 2:
70 return bytes[0] |
71 (((reg_t) bytes[1]) << 8);
72 case 4:
73 return bytes[0] |
74 (((reg_t) bytes[1]) << 8) |
75 (((reg_t) bytes[2]) << 16) |
76 (((reg_t) bytes[3]) << 24);
77 case 8:
78 return bytes[0] |
79 (((reg_t) bytes[1]) << 8) |
80 (((reg_t) bytes[2]) << 16) |
81 (((reg_t) bytes[3]) << 24) |
82 (((reg_t) bytes[4]) << 32) |
83 (((reg_t) bytes[5]) << 40) |
84 (((reg_t) bytes[6]) << 48) |
85 (((reg_t) bytes[7]) << 56);
86 }
87 abort();
88 }
89
90 void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes)
91 {
92 reg_t paddr = translate(addr, LOAD);
93
94 if (sim->addr_is_mem(paddr)) {
95 memcpy(bytes, sim->addr_to_mem(paddr), len);
96 if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD))
97 tracer.trace(paddr, len, LOAD);
98 else
99 refill_tlb(addr, paddr, LOAD);
100 } else if (!sim->mmio_load(paddr, len, bytes)) {
101 throw trap_load_access_fault(addr);
102 }
103
104 if (!matched_trigger) {
105 reg_t data = reg_from_bytes(len, bytes);
106 matched_trigger = trigger_exception(OPERATION_LOAD, addr, data);
107 if (matched_trigger)
108 throw *matched_trigger;
109 }
110 }
111
112 void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes)
113 {
114 reg_t paddr = translate(addr, STORE);
115
116 if (!matched_trigger) {
117 reg_t data = reg_from_bytes(len, bytes);
118 matched_trigger = trigger_exception(OPERATION_STORE, addr, data);
119 if (matched_trigger)
120 throw *matched_trigger;
121 }
122
123 if (sim->addr_is_mem(paddr)) {
124 memcpy(sim->addr_to_mem(paddr), bytes, len);
125 if (tracer.interested_in_range(paddr, paddr + PGSIZE, STORE))
126 tracer.trace(paddr, len, STORE);
127 else
128 refill_tlb(addr, paddr, STORE);
129 } else if (!sim->mmio_store(paddr, len, bytes)) {
130 throw trap_store_access_fault(addr);
131 }
132 }
133
134 void mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, access_type type)
135 {
136 reg_t idx = (vaddr >> PGSHIFT) % TLB_ENTRIES;
137 reg_t expected_tag = vaddr >> PGSHIFT;
138
139 if ((tlb_load_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag)
140 tlb_load_tag[idx] = -1;
141 if ((tlb_store_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag)
142 tlb_store_tag[idx] = -1;
143 if ((tlb_insn_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag)
144 tlb_insn_tag[idx] = -1;
145
146 if ((check_triggers_fetch && type == FETCH) ||
147 (check_triggers_load && type == LOAD) ||
148 (check_triggers_store && type == STORE))
149 expected_tag |= TLB_CHECK_TRIGGERS;
150
151 if (type == FETCH) tlb_insn_tag[idx] = expected_tag;
152 else if (type == STORE) tlb_store_tag[idx] = expected_tag;
153 else tlb_load_tag[idx] = expected_tag;
154
155 tlb_data[idx] = sim->addr_to_mem(paddr) - vaddr;
156 }
157
158 reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode)
159 {
160 vm_info vm = decode_vm_info(proc->max_xlen, mode, proc->get_state()->sptbr);
161 if (vm.levels == 0)
162 return addr & ((reg_t(2) << (proc->xlen-1))-1); // zero-extend from xlen
163
164 bool supervisor = mode == PRV_S;
165 bool sum = get_field(proc->state.mstatus, MSTATUS_SUM);
166 bool mxr = get_field(proc->state.mstatus, MSTATUS_MXR);
167
168 // verify bits xlen-1:va_bits-1 are all equal
169 int va_bits = PGSHIFT + vm.levels * vm.idxbits;
170 reg_t mask = (reg_t(1) << (proc->xlen - (va_bits-1))) - 1;
171 reg_t masked_msbs = (addr >> (va_bits-1)) & mask;
172 if (masked_msbs != 0 && masked_msbs != mask)
173 vm.levels = 0;
174
175 reg_t base = vm.ptbase;
176 for (int i = vm.levels - 1; i >= 0; i--) {
177 int ptshift = i * vm.idxbits;
178 reg_t idx = (addr >> (PGSHIFT + ptshift)) & ((1 << vm.idxbits) - 1);
179
180 // check that physical address of PTE is legal
181 reg_t pte_addr = base + idx * vm.ptesize;
182 if (!sim->addr_is_mem(pte_addr))
183 break;
184
185 void* ppte = sim->addr_to_mem(pte_addr);
186 reg_t pte = vm.ptesize == 4 ? *(uint32_t*)ppte : *(uint64_t*)ppte;
187 reg_t ppn = pte >> PTE_PPN_SHIFT;
188
189 if (PTE_TABLE(pte)) { // next level of page table
190 base = ppn << PGSHIFT;
191 } else if ((pte & PTE_U) ? supervisor && !sum : !supervisor) {
192 break;
193 } else if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) {
194 break;
195 } else if (type == FETCH ? !(pte & PTE_X) :
196 type == LOAD ? !(pte & PTE_R) && !(mxr && (pte & PTE_X)) :
197 !((pte & PTE_R) && (pte & PTE_W))) {
198 break;
199 } else {
200 reg_t ad = PTE_A | ((type == STORE) * PTE_D);
201 #ifdef RISCV_ENABLE_DIRTY
202 // set accessed and possibly dirty bits.
203 *(uint32_t*)ppte |= ad;
204 #else
205 // take exception if access or possibly dirty bit is not set.
206 if ((pte & ad) != ad)
207 break;
208 #endif
209 // for superpage mappings, make a fake leaf PTE for the TLB's benefit.
210 reg_t vpn = addr >> PGSHIFT;
211 reg_t value = (ppn | (vpn & ((reg_t(1) << ptshift) - 1))) << PGSHIFT;
212 return value;
213 }
214 }
215
216 fail:
217 switch (type) {
218 case FETCH: throw trap_instruction_access_fault(addr);
219 case LOAD: throw trap_load_access_fault(addr);
220 case STORE: throw trap_store_access_fault(addr);
221 default: abort();
222 }
223 }
224
225 void mmu_t::register_memtracer(memtracer_t* t)
226 {
227 flush_tlb();
228 tracer.hook(t);
229 }