truncate effective addresses in rv32
[riscv-isa-sim.git] / riscv / mmu.h
1 #ifndef _RISCV_MMU_H
2 #define _RISCV_MMU_H
3
4 #include "decode.h"
5 #include "trap.h"
6 #include "common.h"
7 #include "config.h"
8 #include "processor.h"
9 #include "memtracer.h"
10 #include <vector>
11
12 // virtual memory configuration
13 typedef reg_t pte_t;
14 const reg_t LEVELS = sizeof(pte_t) == sizeof(uint64_t) ? 3 : 2;
15 const reg_t PGSHIFT = 13;
16 const reg_t PGSIZE = 1 << PGSHIFT;
17 const reg_t PTIDXBITS = PGSHIFT - (sizeof(pte_t) == 8 ? 3 : 2);
18 const reg_t VPN_BITS = PTIDXBITS * LEVELS;
19 const reg_t PPN_BITS = 8*sizeof(reg_t) - PGSHIFT;
20 const reg_t VA_BITS = VPN_BITS + PGSHIFT;
21
22 // page table entry (PTE) fields
23 #define PTE_T 0x001 // Entry is a page Table descriptor
24 #define PTE_E 0x002 // Entry is a page table Entry
25 #define PTE_R 0x004 // Referenced
26 #define PTE_D 0x008 // Dirty
27 #define PTE_UX 0x010 // User eXecute permission
28 #define PTE_UW 0x020 // User Read permission
29 #define PTE_UR 0x040 // User Write permission
30 #define PTE_SX 0x080 // Supervisor eXecute permission
31 #define PTE_SW 0x100 // Supervisor Read permission
32 #define PTE_SR 0x200 // Supervisor Write permission
33 #define PTE_PERM (PTE_SR | PTE_SW | PTE_SX | PTE_UR | PTE_UW | PTE_UX)
34 #define PTE_PPN_SHIFT 13 // LSB of physical page number in the PTE
35
36 // this class implements a processor's port into the virtual memory system.
37 // an MMU and instruction cache are maintained for simulator performance.
38 class mmu_t
39 {
40 public:
41 mmu_t(char* _mem, size_t _memsz);
42 ~mmu_t();
43
44 // template for functions that load an aligned value from memory
45 #define load_func(type) \
46 type##_t load_##type(reg_t addr) { \
47 if(unlikely(addr % sizeof(type##_t))) \
48 { \
49 badvaddr = addr; \
50 throw trap_load_address_misaligned; \
51 } \
52 reg_t paddr = translate(addr, sizeof(type##_t), false, false); \
53 return *(type##_t*)(mem + paddr); \
54 }
55
56 // load value from memory at aligned address; zero extend to register width
57 load_func(uint8)
58 load_func(uint16)
59 load_func(uint32)
60 load_func(uint64)
61
62 // load value from memory at aligned address; sign extend to register width
63 load_func(int8)
64 load_func(int16)
65 load_func(int32)
66 load_func(int64)
67
68 // template for functions that store an aligned value to memory
69 #define store_func(type) \
70 void store_##type(reg_t addr, type##_t val) { \
71 if(unlikely(addr % sizeof(type##_t))) \
72 { \
73 badvaddr = addr; \
74 throw trap_store_address_misaligned; \
75 } \
76 reg_t paddr = translate(addr, sizeof(type##_t), true, false); \
77 *(type##_t*)(mem + paddr) = val; \
78 }
79
80 // store value to memory at aligned address
81 store_func(uint8)
82 store_func(uint16)
83 store_func(uint32)
84 store_func(uint64)
85
86 struct insn_fetch_t
87 {
88 insn_t insn;
89 insn_func_t func;
90 };
91
92 // load instruction from memory at aligned address.
93 // (needed because instruction alignment requirement is variable
94 // if RVC is supported)
95 // returns the instruction at the specified address, given the current
96 // RVC mode. func is set to a pointer to a function that knows how to
97 // execute the returned instruction.
98 inline insn_fetch_t load_insn(reg_t addr, bool rvc)
99 {
100 #ifdef RISCV_ENABLE_RVC
101 if(addr % 4 == 2 && rvc) // fetch across word boundary
102 {
103 reg_t addr_lo = translate(addr, 2, false, true);
104 insn_fetch_t fetch;
105 fetch.insn.bits = *(uint16_t*)(mem + addr_lo);
106 fetch.func = get_insn_func(fetch.insn, sr);
107
108 if(!INSN_IS_RVC(fetch.insn.bits))
109 {
110 reg_t addr_hi = translate(addr+2, 2, false, true);
111 fetch.insn.bits |= (uint32_t)*(uint16_t*)(mem + addr_hi) << 16;
112 }
113 return fetch;
114 }
115 else
116 #endif
117 {
118 reg_t idx = (addr/sizeof(insn_t)) % ICACHE_ENTRIES;
119 insn_fetch_t fetch;
120 if (unlikely(icache_tag[idx] != addr))
121 {
122 reg_t paddr = translate(addr, sizeof(insn_t), false, true);
123 fetch.insn = *(insn_t*)(mem + paddr);
124 fetch.func = get_insn_func(fetch.insn, sr);
125
126 reg_t idx = (paddr/sizeof(insn_t)) % ICACHE_ENTRIES;
127 icache_tag[idx] = addr;
128 icache_data[idx] = fetch.insn;
129 icache_func[idx] = fetch.func;
130
131 if (tracer.interested_in_range(paddr, paddr + sizeof(insn_t), false, true))
132 {
133 icache_tag[idx] = -1;
134 tracer.trace(paddr, sizeof(insn_t), false, true);
135 }
136 }
137 fetch.insn = icache_data[idx];;
138 fetch.func = icache_func[idx];
139 return fetch;
140 }
141 }
142
143 // get the virtual address that caused a fault
144 reg_t get_badvaddr() { return badvaddr; }
145
146 // get/set the page table base register
147 reg_t get_ptbr() { return ptbr; }
148 void set_ptbr(reg_t addr) { ptbr = addr & ~(PGSIZE-1); flush_tlb(); }
149 // keep the MMU in sync with processor mode
150 void set_sr(uint32_t _sr) { sr = _sr; }
151
152 // flush the TLB and instruction cache
153 void flush_tlb();
154 void flush_icache();
155
156 void register_memtracer(memtracer_t*);
157
158 private:
159 char* mem;
160 size_t memsz;
161 reg_t badvaddr;
162 reg_t ptbr;
163 uint32_t sr;
164 memtracer_list_t tracer;
165
166 // implement a TLB for simulator performance
167 static const reg_t TLB_ENTRIES = 256;
168 reg_t tlb_data[TLB_ENTRIES];
169 reg_t tlb_insn_tag[TLB_ENTRIES];
170 reg_t tlb_load_tag[TLB_ENTRIES];
171 reg_t tlb_store_tag[TLB_ENTRIES];
172
173 // implement an instruction cache for simulator performance
174 static const reg_t ICACHE_ENTRIES = 256;
175 insn_t icache_data[ICACHE_ENTRIES];
176 insn_func_t icache_func[ICACHE_ENTRIES];
177 reg_t icache_tag[ICACHE_ENTRIES];
178
179 // finish translation on a TLB miss and upate the TLB
180 reg_t refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch);
181
182 // perform a page table walk for a given virtual address
183 pte_t walk(reg_t addr);
184
185 // translate a virtual address to a physical address
186 reg_t translate(reg_t addr, reg_t bytes, bool store, bool fetch)
187 {
188 reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
189
190 reg_t* tlb_tag = fetch ? tlb_insn_tag : store ? tlb_store_tag :tlb_load_tag;
191 reg_t expected_tag = addr & ~(PGSIZE-1);
192 if(likely(tlb_tag[idx] == expected_tag))
193 return ((uintptr_t)addr & (PGSIZE-1)) + tlb_data[idx];
194
195 return refill_tlb(addr, bytes, store, fetch);
196 }
197
198 friend class processor_t;
199 };
200
201 #endif