implement PUM functionality
[riscv-isa-sim.git] / riscv / mmu.h
1 // See LICENSE for license details.
2
3 #ifndef _RISCV_MMU_H
4 #define _RISCV_MMU_H
5
6 #include "decode.h"
7 #include "trap.h"
8 #include "common.h"
9 #include "config.h"
10 #include "processor.h"
11 #include "memtracer.h"
12 #include <stdlib.h>
13 #include <vector>
14
15 // virtual memory configuration
16 #define PGSHIFT 12
17 const reg_t PGSIZE = 1 << PGSHIFT;
18
19 struct insn_fetch_t
20 {
21 insn_func_t func;
22 insn_t insn;
23 };
24
25 struct icache_entry_t {
26 reg_t tag;
27 reg_t pad;
28 insn_fetch_t data;
29 };
30
31 // this class implements a processor's port into the virtual memory system.
32 // an MMU and instruction cache are maintained for simulator performance.
33 class mmu_t
34 {
35 public:
36 mmu_t(char* _mem, size_t _memsz);
37 ~mmu_t();
38
39 // template for functions that load an aligned value from memory
40 #define load_func(type) \
41 type##_t load_##type(reg_t addr) __attribute__((always_inline)) { \
42 if (addr & (sizeof(type##_t)-1)) \
43 throw trap_load_address_misaligned(addr); \
44 reg_t vpn = addr >> PGSHIFT; \
45 if (likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) \
46 return *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
47 type##_t res; \
48 load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res); \
49 return res; \
50 }
51
52 // load value from memory at aligned address; zero extend to register width
53 load_func(uint8)
54 load_func(uint16)
55 load_func(uint32)
56 load_func(uint64)
57
58 // load value from memory at aligned address; sign extend to register width
59 load_func(int8)
60 load_func(int16)
61 load_func(int32)
62 load_func(int64)
63
64 // template for functions that store an aligned value to memory
65 #define store_func(type) \
66 void store_##type(reg_t addr, type##_t val) { \
67 if (addr & (sizeof(type##_t)-1)) \
68 throw trap_store_address_misaligned(addr); \
69 reg_t vpn = addr >> PGSHIFT; \
70 if (likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) \
71 *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
72 else \
73 store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&val); \
74 }
75
76 // store value to memory at aligned address
77 store_func(uint8)
78 store_func(uint16)
79 store_func(uint32)
80 store_func(uint64)
81
82 static const reg_t ICACHE_ENTRIES = 1024;
83
84 inline size_t icache_index(reg_t addr)
85 {
86 return (addr / PC_ALIGN) % ICACHE_ENTRIES;
87 }
88
89 inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry)
90 {
91 const uint16_t* iaddr = translate_insn_addr(addr);
92 insn_bits_t insn = *iaddr;
93 int length = insn_length(insn);
94
95 if (likely(length == 4)) {
96 if (likely(addr % PGSIZE < PGSIZE-2))
97 insn |= (insn_bits_t)*(const int16_t*)(iaddr + 1) << 16;
98 else
99 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 2) << 16;
100 } else if (length == 2) {
101 insn = (int16_t)insn;
102 } else if (length == 6) {
103 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 4) << 32;
104 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
105 } else {
106 static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
107 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 6) << 48;
108 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 4) << 32;
109 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
110 }
111
112 insn_fetch_t fetch = {proc->decode_insn(insn), insn};
113 entry->tag = addr;
114 entry->data = fetch;
115
116 reg_t paddr = (const char*)iaddr - mem;
117 if (tracer.interested_in_range(paddr, paddr + 1, FETCH)) {
118 entry->tag = -1;
119 tracer.trace(paddr, length, FETCH);
120 }
121 return entry;
122 }
123
124 inline icache_entry_t* access_icache(reg_t addr)
125 {
126 icache_entry_t* entry = &icache[icache_index(addr)];
127 if (likely(entry->tag == addr))
128 return entry;
129 return refill_icache(addr, entry);
130 }
131
132 inline insn_fetch_t load_insn(reg_t addr)
133 {
134 return access_icache(addr)->data;
135 }
136
137 void set_processor(processor_t* p) { proc = p; flush_tlb(); }
138
139 void flush_tlb();
140 void flush_icache();
141
142 void register_memtracer(memtracer_t*);
143
144 private:
145 char* mem;
146 size_t memsz;
147 processor_t* proc;
148 memtracer_list_t tracer;
149
150 // implement an instruction cache for simulator performance
151 icache_entry_t icache[ICACHE_ENTRIES];
152
153 // implement a TLB for simulator performance
154 static const reg_t TLB_ENTRIES = 256;
155 char* tlb_data[TLB_ENTRIES];
156 reg_t tlb_insn_tag[TLB_ENTRIES];
157 reg_t tlb_load_tag[TLB_ENTRIES];
158 reg_t tlb_store_tag[TLB_ENTRIES];
159
160 // finish translation on a TLB miss and upate the TLB
161 void refill_tlb(reg_t vaddr, reg_t paddr, access_type type);
162
163 // perform a page table walk for a given VA; set referenced/dirty bits
164 reg_t walk(reg_t addr, access_type type, bool supervisor, bool pum);
165
166 // handle uncommon cases: TLB misses, page faults, MMIO
167 const uint16_t* fetch_slow_path(reg_t addr);
168 void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes);
169 void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes);
170 reg_t translate(reg_t addr, access_type type);
171
172 // ITLB lookup
173 const uint16_t* translate_insn_addr(reg_t addr) __attribute__((always_inline)) {
174 reg_t vpn = addr >> PGSHIFT;
175 if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn))
176 return (uint16_t*)(tlb_data[vpn % TLB_ENTRIES] + addr);
177 return fetch_slow_path(addr);
178 }
179
180 friend class processor_t;
181 };
182
183 #endif