Support 2/4/6/8-byte instructions
[riscv-isa-sim.git] / riscv / mmu.h
1 // See LICENSE for license details.
2
3 #ifndef _RISCV_MMU_H
4 #define _RISCV_MMU_H
5
6 #include "decode.h"
7 #include "icache.h"
8 #include "trap.h"
9 #include "common.h"
10 #include "config.h"
11 #include "processor.h"
12 #include "memtracer.h"
13 #include <vector>
14
15 // virtual memory configuration
16 typedef reg_t pte_t;
17 const reg_t LEVELS = sizeof(pte_t) == 8 ? 3 : 2;
18 const reg_t PTIDXBITS = 10;
19 const reg_t PGSHIFT = PTIDXBITS + (sizeof(pte_t) == 8 ? 3 : 2);
20 const reg_t PGSIZE = 1 << PGSHIFT;
21 const reg_t VPN_BITS = PTIDXBITS * LEVELS;
22 const reg_t PPN_BITS = 8*sizeof(reg_t) - PGSHIFT;
23 const reg_t VA_BITS = VPN_BITS + PGSHIFT;
24
25 struct insn_fetch_t
26 {
27 insn_func_t func;
28 insn_t insn;
29 };
30
31 struct icache_entry_t {
32 reg_t tag;
33 reg_t pad;
34 insn_fetch_t data;
35 };
36
37 // this class implements a processor's port into the virtual memory system.
38 // an MMU and instruction cache are maintained for simulator performance.
39 class mmu_t
40 {
41 public:
42 mmu_t(char* _mem, size_t _memsz);
43 ~mmu_t();
44
45 // template for functions that load an aligned value from memory
46 #define load_func(type) \
47 type##_t load_##type(reg_t addr) __attribute__((always_inline)) { \
48 void* paddr = translate(addr, sizeof(type##_t), false, false); \
49 return *(type##_t*)paddr; \
50 }
51
52 // load value from memory at aligned address; zero extend to register width
53 load_func(uint8)
54 load_func(uint16)
55 load_func(uint32)
56 load_func(uint64)
57
58 // load value from memory at aligned address; sign extend to register width
59 load_func(int8)
60 load_func(int16)
61 load_func(int32)
62 load_func(int64)
63
64 // template for functions that store an aligned value to memory
65 #define store_func(type) \
66 void store_##type(reg_t addr, type##_t val) { \
67 void* paddr = translate(addr, sizeof(type##_t), true, false); \
68 *(type##_t*)paddr = val; \
69 }
70
71 // store value to memory at aligned address
72 store_func(uint8)
73 store_func(uint16)
74 store_func(uint32)
75 store_func(uint64)
76
77 inline size_t icache_index(reg_t addr)
78 {
79 // for instruction sizes != 4, this hash still works but is suboptimal
80 return (addr / 4) % ICACHE_SIZE;
81 }
82
83 // load instruction from memory at aligned address.
84 icache_entry_t* access_icache(reg_t addr) __attribute__((always_inline))
85 {
86 reg_t idx = icache_index(addr);
87 icache_entry_t* entry = &icache[idx];
88 if (likely(entry->tag == addr))
89 return entry;
90
91 char* iaddr = (char*)translate(addr, 2, false, true);
92 insn_bits_t insn = *(uint16_t*)iaddr;
93
94 if (unlikely(insn_length(insn) == 2)) {
95 insn = (int16_t)insn;
96 } else if (likely(insn_length(insn) == 4)) {
97 if (likely((addr & (PGSIZE-1)) < PGSIZE-2))
98 insn |= (insn_bits_t)*(int16_t*)(iaddr + 2) << 16;
99 else
100 insn |= (insn_bits_t)*(int16_t*)translate(addr + 2, 2, false, true) << 16;
101 } else if (insn_length(insn) == 6) {
102 insn |= (insn_bits_t)*(int16_t*)translate(addr + 4, 2, false, true) << 32;
103 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 2, false, true) << 16;
104 } else {
105 static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
106 insn |= (insn_bits_t)*(int16_t*)translate(addr + 6, 2, false, true) << 48;
107 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 4, 2, false, true) << 32;
108 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 2, false, true) << 16;
109 }
110
111 insn_fetch_t fetch = {proc->decode_insn(insn), insn};
112 icache[idx].tag = addr;
113 icache[idx].data = fetch;
114
115 reg_t paddr = iaddr - mem;
116 if (!tracer.empty() && tracer.interested_in_range(paddr, paddr + 1, false, true))
117 {
118 icache[idx].tag = -1;
119 tracer.trace(paddr, 1, false, true);
120 }
121 return &icache[idx];
122 }
123
124 inline insn_fetch_t load_insn(reg_t addr)
125 {
126 return access_icache(addr)->data;
127 }
128
129 void set_processor(processor_t* p) { proc = p; flush_tlb(); }
130
131 void flush_tlb();
132 void flush_icache();
133
134 void register_memtracer(memtracer_t*);
135
136 private:
137 char* mem;
138 size_t memsz;
139 processor_t* proc;
140 memtracer_list_t tracer;
141
142 // implement an instruction cache for simulator performance
143 icache_entry_t icache[ICACHE_SIZE];
144
145 // implement a TLB for simulator performance
146 static const reg_t TLB_ENTRIES = 256;
147 char* tlb_data[TLB_ENTRIES];
148 reg_t tlb_insn_tag[TLB_ENTRIES];
149 reg_t tlb_load_tag[TLB_ENTRIES];
150 reg_t tlb_store_tag[TLB_ENTRIES];
151
152 // finish translation on a TLB miss and upate the TLB
153 void* refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch);
154
155 // perform a page table walk for a given virtual address
156 pte_t walk(reg_t addr);
157
158 // translate a virtual address to a physical address
159 void* translate(reg_t addr, reg_t bytes, bool store, bool fetch)
160 __attribute__((always_inline))
161 {
162 reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
163 reg_t expected_tag = addr >> PGSHIFT;
164 reg_t* tags = fetch ? tlb_insn_tag : store ? tlb_store_tag :tlb_load_tag;
165 reg_t tag = tags[idx];
166 void* data = tlb_data[idx] + addr;
167
168 if (unlikely(addr & (bytes-1)))
169 store ? throw trap_store_address_misaligned(addr) : throw trap_load_address_misaligned(addr);
170
171 if (likely(tag == expected_tag))
172 return data;
173
174 return refill_tlb(addr, bytes, store, fetch);
175 }
176
177 friend class processor_t;
178 };
179
180 #endif