Use enum instead of two bools to denote memory access type
[riscv-isa-sim.git] / riscv / mmu.h
1 // See LICENSE for license details.
2
3 #ifndef _RISCV_MMU_H
4 #define _RISCV_MMU_H
5
6 #include "decode.h"
7 #include "trap.h"
8 #include "common.h"
9 #include "config.h"
10 #include "processor.h"
11 #include "memtracer.h"
12 #include <stdlib.h>
13 #include <vector>
14
15 // virtual memory configuration
16 #define PGSHIFT 12
17 const reg_t PGSIZE = 1 << PGSHIFT;
18
19 struct insn_fetch_t
20 {
21 insn_func_t func;
22 insn_t insn;
23 };
24
25 struct icache_entry_t {
26 reg_t tag;
27 reg_t pad;
28 insn_fetch_t data;
29 };
30
31 // this class implements a processor's port into the virtual memory system.
32 // an MMU and instruction cache are maintained for simulator performance.
33 class mmu_t
34 {
35 public:
36 mmu_t(char* _mem, size_t _memsz);
37 ~mmu_t();
38
39 // template for functions that load an aligned value from memory
40 #define load_func(type) \
41 type##_t load_##type(reg_t addr) __attribute__((always_inline)) { \
42 void* paddr = translate(addr, sizeof(type##_t), LOAD); \
43 return *(type##_t*)paddr; \
44 }
45
46 // load value from memory at aligned address; zero extend to register width
47 load_func(uint8)
48 load_func(uint16)
49 load_func(uint32)
50 load_func(uint64)
51
52 // load value from memory at aligned address; sign extend to register width
53 load_func(int8)
54 load_func(int16)
55 load_func(int32)
56 load_func(int64)
57
58 // template for functions that store an aligned value to memory
59 #define store_func(type) \
60 void store_##type(reg_t addr, type##_t val) { \
61 void* paddr = translate(addr, sizeof(type##_t), STORE); \
62 *(type##_t*)paddr = val; \
63 }
64
65 // store value to memory at aligned address
66 store_func(uint8)
67 store_func(uint16)
68 store_func(uint32)
69 store_func(uint64)
70
71 static const reg_t ICACHE_ENTRIES = 1024;
72
73 inline size_t icache_index(reg_t addr)
74 {
75 return (addr / PC_ALIGN) % ICACHE_ENTRIES;
76 }
77
78 inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry)
79 {
80 char* iaddr = (char*)translate(addr, 1, FETCH);
81 insn_bits_t insn = *(uint16_t*)iaddr;
82 int length = insn_length(insn);
83
84 if (likely(length == 4)) {
85 if (likely(addr % PGSIZE < PGSIZE-2))
86 insn |= (insn_bits_t)*(int16_t*)(iaddr + 2) << 16;
87 else
88 insn |= (insn_bits_t)*(int16_t*)translate(addr + 2, 1, FETCH) << 16;
89 } else if (length == 2) {
90 insn = (int16_t)insn;
91 } else if (length == 6) {
92 insn |= (insn_bits_t)*(int16_t*)translate(addr + 4, 1, FETCH) << 32;
93 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 1, FETCH) << 16;
94 } else {
95 static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
96 insn |= (insn_bits_t)*(int16_t*)translate(addr + 6, 1, FETCH) << 48;
97 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 4, 1, FETCH) << 32;
98 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 1, FETCH) << 16;
99 }
100
101 insn_fetch_t fetch = {proc->decode_insn(insn), insn};
102 entry->tag = addr;
103 entry->data = fetch;
104
105 reg_t paddr = iaddr - mem;
106 if (tracer.interested_in_range(paddr, paddr + 1, FETCH)) {
107 entry->tag = -1;
108 tracer.trace(paddr, length, FETCH);
109 }
110 return entry;
111 }
112
113 inline icache_entry_t* access_icache(reg_t addr)
114 {
115 icache_entry_t* entry = &icache[icache_index(addr)];
116 if (likely(entry->tag == addr))
117 return entry;
118 return refill_icache(addr, entry);
119 }
120
121 inline insn_fetch_t load_insn(reg_t addr)
122 {
123 return access_icache(addr)->data;
124 }
125
126 void set_processor(processor_t* p) { proc = p; flush_tlb(); }
127
128 void flush_tlb();
129 void flush_icache();
130
131 void register_memtracer(memtracer_t*);
132
133 private:
134 char* mem;
135 size_t memsz;
136 processor_t* proc;
137 memtracer_list_t tracer;
138
139 // implement an instruction cache for simulator performance
140 icache_entry_t icache[ICACHE_ENTRIES];
141
142 // implement a TLB for simulator performance
143 static const reg_t TLB_ENTRIES = 256;
144 char* tlb_data[TLB_ENTRIES];
145 reg_t tlb_insn_tag[TLB_ENTRIES];
146 reg_t tlb_load_tag[TLB_ENTRIES];
147 reg_t tlb_store_tag[TLB_ENTRIES];
148
149 // finish translation on a TLB miss and upate the TLB
150 void* refill_tlb(reg_t addr, reg_t bytes, access_type type);
151
152 // perform a page table walk for a given VA; set referenced/dirty bits
153 reg_t walk(reg_t addr, bool supervisor, access_type type);
154
155 // translate a virtual address to a physical address
156 void* translate(reg_t addr, reg_t bytes, access_type type)
157 __attribute__((always_inline))
158 {
159 reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
160 reg_t expected_tag = addr >> PGSHIFT;
161 reg_t* tags = type == FETCH ? tlb_insn_tag :
162 type == STORE ? tlb_store_tag :
163 tlb_load_tag;
164 reg_t tag = tags[idx];
165 void* data = tlb_data[idx] + addr;
166
167 if (unlikely(addr & (bytes-1)))
168 type == FETCH ? throw trap_instruction_address_misaligned(addr) :
169 type == STORE ? throw trap_store_address_misaligned(addr) :
170 /* LOAD */ throw trap_load_address_misaligned(addr);
171
172 if (likely(tag == expected_tag))
173 return data;
174
175 return refill_tlb(addr, bytes, type);
176 }
177
178 friend class processor_t;
179 };
180
181 #endif