Don't set dirty/referenced bits w/o permission
[riscv-isa-sim.git] / riscv / mmu.h
1 // See LICENSE for license details.
2
3 #ifndef _RISCV_MMU_H
4 #define _RISCV_MMU_H
5
6 #include "decode.h"
7 #include "trap.h"
8 #include "common.h"
9 #include "config.h"
10 #include "processor.h"
11 #include "memtracer.h"
12 #include <vector>
13
14 // virtual memory configuration
15 typedef reg_t pte_t;
16 const reg_t LEVELS = sizeof(pte_t) == 8 ? 3 : 2;
17 const reg_t PTIDXBITS = 10;
18 const reg_t PGSHIFT = PTIDXBITS + (sizeof(pte_t) == 8 ? 3 : 2);
19 const reg_t PGSIZE = 1 << PGSHIFT;
20 const reg_t VPN_BITS = PTIDXBITS * LEVELS;
21 const reg_t PPN_BITS = 8*sizeof(reg_t) - PGSHIFT;
22 const reg_t VA_BITS = VPN_BITS + PGSHIFT;
23
24 struct insn_fetch_t
25 {
26 insn_func_t func;
27 insn_t insn;
28 };
29
30 struct icache_entry_t {
31 reg_t tag;
32 reg_t pad;
33 insn_fetch_t data;
34 };
35
36 // this class implements a processor's port into the virtual memory system.
37 // an MMU and instruction cache are maintained for simulator performance.
38 class mmu_t
39 {
40 public:
41 mmu_t(char* _mem, size_t _memsz);
42 ~mmu_t();
43
44 // template for functions that load an aligned value from memory
45 #define load_func(type) \
46 type##_t load_##type(reg_t addr) __attribute__((always_inline)) { \
47 void* paddr = translate(addr, sizeof(type##_t), false, false); \
48 return *(type##_t*)paddr; \
49 }
50
51 // load value from memory at aligned address; zero extend to register width
52 load_func(uint8)
53 load_func(uint16)
54 load_func(uint32)
55 load_func(uint64)
56
57 // load value from memory at aligned address; sign extend to register width
58 load_func(int8)
59 load_func(int16)
60 load_func(int32)
61 load_func(int64)
62
63 // template for functions that store an aligned value to memory
64 #define store_func(type) \
65 void store_##type(reg_t addr, type##_t val) { \
66 void* paddr = translate(addr, sizeof(type##_t), true, false); \
67 *(type##_t*)paddr = val; \
68 }
69
70 // store value to memory at aligned address
71 store_func(uint8)
72 store_func(uint16)
73 store_func(uint32)
74 store_func(uint64)
75
76 static const reg_t ICACHE_ENTRIES = 1024;
77
78 inline size_t icache_index(reg_t addr)
79 {
80 // for instruction sizes != 4, this hash still works but is suboptimal
81 return (addr / 4) % ICACHE_ENTRIES;
82 }
83
84 // load instruction from memory at aligned address.
85 icache_entry_t* access_icache(reg_t addr) __attribute__((always_inline))
86 {
87 reg_t idx = icache_index(addr);
88 icache_entry_t* entry = &icache[idx];
89 if (likely(entry->tag == addr))
90 return entry;
91
92 bool rvc = false; // set this dynamically once RVC is re-implemented
93 char* iaddr = (char*)translate(addr, rvc ? 2 : 4, false, true);
94 insn_bits_t insn = *(uint16_t*)iaddr;
95
96 if (unlikely(insn_length(insn) == 2)) {
97 insn = (int16_t)insn;
98 } else if (likely(insn_length(insn) == 4)) {
99 if (likely((addr & (PGSIZE-1)) < PGSIZE-2))
100 insn |= (insn_bits_t)*(int16_t*)(iaddr + 2) << 16;
101 else
102 insn |= (insn_bits_t)*(int16_t*)translate(addr + 2, 2, false, true) << 16;
103 } else if (insn_length(insn) == 6) {
104 insn |= (insn_bits_t)*(int16_t*)translate(addr + 4, 2, false, true) << 32;
105 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 2, false, true) << 16;
106 } else {
107 static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
108 insn |= (insn_bits_t)*(int16_t*)translate(addr + 6, 2, false, true) << 48;
109 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 4, 2, false, true) << 32;
110 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 2, false, true) << 16;
111 }
112
113 insn_fetch_t fetch = {proc->decode_insn(insn), insn};
114 icache[idx].tag = addr;
115 icache[idx].data = fetch;
116
117 reg_t paddr = iaddr - mem;
118 if (!tracer.empty() && tracer.interested_in_range(paddr, paddr + 1, false, true))
119 {
120 icache[idx].tag = -1;
121 tracer.trace(paddr, 1, false, true);
122 }
123 return &icache[idx];
124 }
125
126 inline insn_fetch_t load_insn(reg_t addr)
127 {
128 return access_icache(addr)->data;
129 }
130
131 void set_processor(processor_t* p) { proc = p; flush_tlb(); }
132
133 void flush_tlb();
134 void flush_icache();
135
136 void register_memtracer(memtracer_t*);
137
138 private:
139 char* mem;
140 size_t memsz;
141 processor_t* proc;
142 memtracer_list_t tracer;
143
144 // implement an instruction cache for simulator performance
145 icache_entry_t icache[ICACHE_ENTRIES];
146
147 // implement a TLB for simulator performance
148 static const reg_t TLB_ENTRIES = 256;
149 char* tlb_data[TLB_ENTRIES];
150 reg_t tlb_insn_tag[TLB_ENTRIES];
151 reg_t tlb_load_tag[TLB_ENTRIES];
152 reg_t tlb_store_tag[TLB_ENTRIES];
153
154 // finish translation on a TLB miss and upate the TLB
155 void* refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch);
156
157 // perform a page table walk for a given VA; set referenced/dirty bits
158 pte_t walk(reg_t addr, reg_t perm);
159
160 // translate a virtual address to a physical address
161 void* translate(reg_t addr, reg_t bytes, bool store, bool fetch)
162 __attribute__((always_inline))
163 {
164 reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
165 reg_t expected_tag = addr >> PGSHIFT;
166 reg_t* tags = fetch ? tlb_insn_tag : store ? tlb_store_tag :tlb_load_tag;
167 reg_t tag = tags[idx];
168 void* data = tlb_data[idx] + addr;
169
170 if (unlikely(addr & (bytes-1)))
171 store ? throw trap_store_address_misaligned(addr) :
172 fetch ? throw trap_instruction_address_misaligned(addr) :
173 throw trap_load_address_misaligned(addr);
174
175 if (likely(tag == expected_tag))
176 return data;
177
178 return refill_tlb(addr, bytes, store, fetch);
179 }
180
181 friend class processor_t;
182 };
183
184 #endif