d6f446bb294c5b91a7b8c5d4a437ed63c6871050
[riscv-isa-sim.git] / riscv / mmu.h
1 // See LICENSE for license details.
2
3 #ifndef _RISCV_MMU_H
4 #define _RISCV_MMU_H
5
6 #include "decode.h"
7 #include "trap.h"
8 #include "common.h"
9 #include "config.h"
10 #include "processor.h"
11 #include "memtracer.h"
12 #include <vector>
13
14 // virtual memory configuration
15 typedef reg_t pte_t;
16 const reg_t LEVELS = sizeof(pte_t) == 8 ? 3 : 2;
17 const reg_t PGSHIFT = 12;
18 const reg_t PTIDXBITS = PGSHIFT - (sizeof(pte_t) == 8 ? 3 : 2);
19 const reg_t PGSIZE = 1 << PGSHIFT;
20 const reg_t VPN_BITS = PTIDXBITS * LEVELS;
21 const reg_t VA_BITS = VPN_BITS + PGSHIFT;
22
23 struct insn_fetch_t
24 {
25 insn_func_t func;
26 insn_t insn;
27 };
28
29 struct icache_entry_t {
30 reg_t tag;
31 reg_t pad;
32 insn_fetch_t data;
33 };
34
35 // this class implements a processor's port into the virtual memory system.
36 // an MMU and instruction cache are maintained for simulator performance.
37 class mmu_t
38 {
39 public:
40 mmu_t(char* _mem, size_t _memsz);
41 ~mmu_t();
42
43 // template for functions that load an aligned value from memory
44 #define load_func(type) \
45 type##_t load_##type(reg_t addr) __attribute__((always_inline)) { \
46 void* paddr = translate(addr, sizeof(type##_t), false, false); \
47 return *(type##_t*)paddr; \
48 }
49
50 // load value from memory at aligned address; zero extend to register width
51 load_func(uint8)
52 load_func(uint16)
53 load_func(uint32)
54 load_func(uint64)
55
56 // load value from memory at aligned address; sign extend to register width
57 load_func(int8)
58 load_func(int16)
59 load_func(int32)
60 load_func(int64)
61
62 // template for functions that store an aligned value to memory
63 #define store_func(type) \
64 void store_##type(reg_t addr, type##_t val) { \
65 void* paddr = translate(addr, sizeof(type##_t), true, false); \
66 *(type##_t*)paddr = val; \
67 }
68
69 // store value to memory at aligned address
70 store_func(uint8)
71 store_func(uint16)
72 store_func(uint32)
73 store_func(uint64)
74
75 static const reg_t ICACHE_ENTRIES = 1024;
76
77 inline size_t icache_index(reg_t addr)
78 {
79 // for instruction sizes != 4, this hash still works but is suboptimal
80 return (addr / 4) % ICACHE_ENTRIES;
81 }
82
83 // load instruction from memory at aligned address.
84 icache_entry_t* access_icache(reg_t addr) __attribute__((always_inline))
85 {
86 reg_t idx = icache_index(addr);
87 icache_entry_t* entry = &icache[idx];
88 if (likely(entry->tag == addr))
89 return entry;
90
91 char* iaddr = (char*)translate(addr, 1, false, true);
92 insn_bits_t insn = *(uint16_t*)iaddr;
93
94 if (likely(insn_length(insn) == 4)) {
95 if (likely(addr % PGSIZE < PGSIZE-2))
96 insn |= (insn_bits_t)*(int16_t*)(iaddr + 2) << 16;
97 else
98 insn |= (insn_bits_t)*(int16_t*)translate(addr + 2, 1, false, true) << 16;
99 } else if (insn_length(insn) == 2) {
100 insn = (int16_t)insn;
101 } else if (insn_length(insn) == 6) {
102 insn |= (insn_bits_t)*(int16_t*)translate(addr + 4, 1, false, true) << 32;
103 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 1, false, true) << 16;
104 } else {
105 static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
106 insn |= (insn_bits_t)*(int16_t*)translate(addr + 6, 1, false, true) << 48;
107 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 4, 1, false, true) << 32;
108 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 1, false, true) << 16;
109 }
110
111 insn_fetch_t fetch = {proc->decode_insn(insn), insn};
112 icache[idx].tag = addr;
113 icache[idx].data = fetch;
114
115 reg_t paddr = iaddr - mem;
116 if (!tracer.empty() && tracer.interested_in_range(paddr, paddr + 1, false, true))
117 {
118 icache[idx].tag = -1;
119 tracer.trace(paddr, 1, false, true);
120 }
121 return &icache[idx];
122 }
123
124 inline insn_fetch_t load_insn(reg_t addr)
125 {
126 return access_icache(addr)->data;
127 }
128
129 void set_processor(processor_t* p) { proc = p; flush_tlb(); }
130
131 void flush_tlb();
132 void flush_icache();
133
134 void register_memtracer(memtracer_t*);
135
136 private:
137 char* mem;
138 size_t memsz;
139 processor_t* proc;
140 memtracer_list_t tracer;
141
142 // implement an instruction cache for simulator performance
143 icache_entry_t icache[ICACHE_ENTRIES];
144
145 // implement a TLB for simulator performance
146 static const reg_t TLB_ENTRIES = 256;
147 char* tlb_data[TLB_ENTRIES];
148 reg_t tlb_insn_tag[TLB_ENTRIES];
149 reg_t tlb_load_tag[TLB_ENTRIES];
150 reg_t tlb_store_tag[TLB_ENTRIES];
151
152 // finish translation on a TLB miss and upate the TLB
153 void* refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch);
154
155 // perform a page table walk for a given VA; set referenced/dirty bits
156 pte_t walk(reg_t addr, bool supervisor, bool store, bool fetch);
157
158 // translate a virtual address to a physical address
159 void* translate(reg_t addr, reg_t bytes, bool store, bool fetch)
160 __attribute__((always_inline))
161 {
162 reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
163 reg_t expected_tag = addr >> PGSHIFT;
164 reg_t* tags = fetch ? tlb_insn_tag : store ? tlb_store_tag :tlb_load_tag;
165 reg_t tag = tags[idx];
166 void* data = tlb_data[idx] + addr;
167
168 if (unlikely(addr & (bytes-1)))
169 store ? throw trap_store_address_misaligned(addr) :
170 fetch ? throw trap_instruction_address_misaligned(addr) :
171 throw trap_load_address_misaligned(addr);
172
173 if (likely(tag == expected_tag))
174 return data;
175
176 return refill_tlb(addr, bytes, store, fetch);
177 }
178
179 friend class processor_t;
180 };
181
182 #endif