clean up fetch-execute loop a bit
[riscv-isa-sim.git] / riscv / mmu.h
1 #ifndef _RISCV_MMU_H
2 #define _RISCV_MMU_H
3
4 #include "decode.h"
5 #include "trap.h"
6 #include "common.h"
7 #include "config.h"
8 #include "processor.h"
9 #include "memtracer.h"
10 #include <vector>
11
12 class processor_t;
13
14 // virtual memory configuration
15 typedef reg_t pte_t;
16 const reg_t LEVELS = sizeof(pte_t) == sizeof(uint64_t) ? 3 : 2;
17 const reg_t PGSHIFT = 13;
18 const reg_t PGSIZE = 1 << PGSHIFT;
19 const reg_t PTIDXBITS = PGSHIFT - (sizeof(pte_t) == 8 ? 3 : 2);
20 const reg_t VPN_BITS = PTIDXBITS * LEVELS;
21 const reg_t PPN_BITS = 8*sizeof(reg_t) - PGSHIFT;
22 const reg_t VA_BITS = VPN_BITS + PGSHIFT;
23
24 // page table entry (PTE) fields
25 #define PTE_T 0x001 // Entry is a page Table descriptor
26 #define PTE_E 0x002 // Entry is a page table Entry
27 #define PTE_R 0x004 // Referenced
28 #define PTE_D 0x008 // Dirty
29 #define PTE_UX 0x010 // User eXecute permission
30 #define PTE_UW 0x020 // User Read permission
31 #define PTE_UR 0x040 // User Write permission
32 #define PTE_SX 0x080 // Supervisor eXecute permission
33 #define PTE_SW 0x100 // Supervisor Read permission
34 #define PTE_SR 0x200 // Supervisor Write permission
35 #define PTE_PERM (PTE_SR | PTE_SW | PTE_SX | PTE_UR | PTE_UW | PTE_UX)
36 #define PTE_PPN_SHIFT 13 // LSB of physical page number in the PTE
37
38 // this class implements a processor's port into the virtual memory system.
39 // an MMU and instruction cache are maintained for simulator performance.
40 class mmu_t
41 {
42 public:
43 mmu_t(char* _mem, size_t _memsz);
44 ~mmu_t();
45
46 // template for functions that load an aligned value from memory
47 #define load_func(type) \
48 type##_t load_##type(reg_t addr) { \
49 if(unlikely(addr % sizeof(type##_t))) \
50 { \
51 badvaddr = addr; \
52 throw trap_load_address_misaligned; \
53 } \
54 void* paddr = translate(addr, sizeof(type##_t), false, false); \
55 return *(type##_t*)paddr; \
56 }
57
58 // load value from memory at aligned address; zero extend to register width
59 load_func(uint8)
60 load_func(uint16)
61 load_func(uint32)
62 load_func(uint64)
63
64 // load value from memory at aligned address; sign extend to register width
65 load_func(int8)
66 load_func(int16)
67 load_func(int32)
68 load_func(int64)
69
70 // template for functions that store an aligned value to memory
71 #define store_func(type) \
72 void store_##type(reg_t addr, type##_t val) { \
73 if(unlikely(addr % sizeof(type##_t))) \
74 { \
75 badvaddr = addr; \
76 throw trap_store_address_misaligned; \
77 } \
78 void* paddr = translate(addr, sizeof(type##_t), true, false); \
79 *(type##_t*)paddr = val; \
80 }
81
82 // store value to memory at aligned address
83 store_func(uint8)
84 store_func(uint16)
85 store_func(uint32)
86 store_func(uint64)
87
88 struct insn_fetch_t
89 {
90 insn_t insn;
91 insn_func_t func;
92 };
93
94 // load instruction from memory at aligned address.
95 // (needed because instruction alignment requirement is variable
96 // if RVC is supported)
97 // returns the instruction at the specified address, given the current
98 // RVC mode. func is set to a pointer to a function that knows how to
99 // execute the returned instruction.
100 inline insn_fetch_t load_insn(reg_t addr, bool rvc)
101 {
102 #ifdef RISCV_ENABLE_RVC
103 if(addr % 4 == 2 && rvc) // fetch across word boundary
104 {
105 void* addr_lo = translate(addr, 2, false, true);
106 insn_fetch_t fetch;
107 fetch.insn.bits = *(uint16_t*)addr_lo;
108 size_t dispatch_idx = fetch.insn.bits % processor_t::DISPATCH_TABLE_SIZE;
109 fetch.func = processor_t::dispatch_table[dispatch_idx];
110
111 if(!INSN_IS_RVC(fetch.insn.bits))
112 {
113 void* addr_hi = translate(addr+2, 2, false, true);
114 fetch.insn.bits |= (uint32_t)*(uint16_t*)addr_hi << 16;
115 }
116 return fetch;
117 }
118 else
119 #endif
120 {
121 reg_t idx = (addr/sizeof(insn_t)) % ICACHE_ENTRIES;
122 insn_fetch_t fetch;
123 if (unlikely(icache_tag[idx] != addr))
124 {
125 void* paddr = translate(addr, sizeof(insn_t), false, true);
126 fetch.insn = *(insn_t*)paddr;
127 size_t dispatch_idx = fetch.insn.bits % processor_t::DISPATCH_TABLE_SIZE;
128 fetch.func = processor_t::dispatch_table[dispatch_idx];
129
130 reg_t idx = ((uintptr_t)paddr/sizeof(insn_t)) % ICACHE_ENTRIES;
131 icache_tag[idx] = addr;
132 icache_data[idx] = fetch.insn;
133 icache_func[idx] = fetch.func;
134
135 if (tracer.interested_in_range(addr, addr + sizeof(insn_t), false, true))
136 icache_tag[idx] = -1;
137 }
138 fetch.insn = icache_data[idx];;
139 fetch.func = icache_func[idx];
140 return fetch;
141 }
142 }
143
144 // get the virtual address that caused a fault
145 reg_t get_badvaddr() { return badvaddr; }
146
147 // get/set the page table base register
148 reg_t get_ptbr() { return ptbr; }
149 void set_ptbr(reg_t addr) { ptbr = addr & ~(PGSIZE-1); flush_tlb(); }
150
151 // keep the MMU in sync with processor mode
152 void set_supervisor(bool sup) { supervisor = sup; }
153 void set_vm_enabled(bool en) { vm_enabled = en; }
154
155 // flush the TLB and instruction cache
156 void flush_tlb();
157 void flush_icache();
158
159 void register_memtracer(memtracer_t*);
160
161 private:
162 char* mem;
163 size_t memsz;
164 reg_t badvaddr;
165 reg_t ptbr;
166 bool supervisor;
167 bool vm_enabled;
168 memtracer_list_t tracer;
169
170 // implement a TLB for simulator performance
171 static const reg_t TLB_ENTRIES = 256;
172 char* tlb_data[TLB_ENTRIES];
173 reg_t tlb_insn_tag[TLB_ENTRIES];
174 reg_t tlb_load_tag[TLB_ENTRIES];
175 reg_t tlb_store_tag[TLB_ENTRIES];
176
177 // implement an instruction cache for simulator performance
178 static const reg_t ICACHE_ENTRIES = 256;
179 insn_t icache_data[ICACHE_ENTRIES];
180 insn_func_t icache_func[ICACHE_ENTRIES];
181 reg_t icache_tag[ICACHE_ENTRIES];
182
183 // finish translation on a TLB miss and upate the TLB
184 void* refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch);
185
186 // perform a page table walk for a given virtual address
187 pte_t walk(reg_t addr);
188
189 // translate a virtual address to a physical address
190 void* translate(reg_t addr, reg_t bytes, bool store, bool fetch)
191 {
192 reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
193
194 reg_t* tlb_tag = fetch ? tlb_insn_tag : store ? tlb_store_tag :tlb_load_tag;
195 reg_t expected_tag = addr & ~(PGSIZE-1);
196 if(likely(tlb_tag[idx] == expected_tag))
197 return ((uintptr_t)addr & (PGSIZE-1)) + tlb_data[idx];
198
199 return refill_tlb(addr, bytes, store, fetch);
200 }
201
202 friend class processor_t;
203 };
204
205 #endif