Support setting ISA/subsets with --isa flag
[riscv-isa-sim.git] / riscv / mmu.h
1 // See LICENSE for license details.
2
3 #ifndef _RISCV_MMU_H
4 #define _RISCV_MMU_H
5
6 #include "decode.h"
7 #include "trap.h"
8 #include "common.h"
9 #include "config.h"
10 #include "processor.h"
11 #include "memtracer.h"
12 #include <vector>
13
14 // virtual memory configuration
15 #define PGSHIFT 12
16 const reg_t PGSIZE = 1 << PGSHIFT;
17
18 struct insn_fetch_t
19 {
20 insn_func_t func;
21 insn_t insn;
22 };
23
24 struct icache_entry_t {
25 reg_t tag;
26 reg_t pad;
27 insn_fetch_t data;
28 };
29
30 // this class implements a processor's port into the virtual memory system.
31 // an MMU and instruction cache are maintained for simulator performance.
32 class mmu_t
33 {
34 public:
35 mmu_t(char* _mem, size_t _memsz);
36 ~mmu_t();
37
38 // template for functions that load an aligned value from memory
39 #define load_func(type) \
40 type##_t load_##type(reg_t addr) __attribute__((always_inline)) { \
41 void* paddr = translate(addr, sizeof(type##_t), false, false); \
42 return *(type##_t*)paddr; \
43 }
44
45 // load value from memory at aligned address; zero extend to register width
46 load_func(uint8)
47 load_func(uint16)
48 load_func(uint32)
49 load_func(uint64)
50
51 // load value from memory at aligned address; sign extend to register width
52 load_func(int8)
53 load_func(int16)
54 load_func(int32)
55 load_func(int64)
56
57 // template for functions that store an aligned value to memory
58 #define store_func(type) \
59 void store_##type(reg_t addr, type##_t val) { \
60 void* paddr = translate(addr, sizeof(type##_t), true, false); \
61 *(type##_t*)paddr = val; \
62 }
63
64 // store value to memory at aligned address
65 store_func(uint8)
66 store_func(uint16)
67 store_func(uint32)
68 store_func(uint64)
69
70 static const reg_t ICACHE_ENTRIES = 1024;
71
72 inline size_t icache_index(reg_t addr)
73 {
74 // for instruction sizes != 4, this hash still works but is suboptimal
75 return (addr / 4) % ICACHE_ENTRIES;
76 }
77
78 // load instruction from memory at aligned address.
79 icache_entry_t* access_icache(reg_t addr) __attribute__((always_inline))
80 {
81 reg_t idx = icache_index(addr);
82 icache_entry_t* entry = &icache[idx];
83 if (likely(entry->tag == addr))
84 return entry;
85
86 char* iaddr = (char*)translate(addr, 1, false, true);
87 insn_bits_t insn = *(uint16_t*)iaddr;
88
89 if (likely(insn_length(insn) == 4)) {
90 if (likely(addr % PGSIZE < PGSIZE-2))
91 insn |= (insn_bits_t)*(int16_t*)(iaddr + 2) << 16;
92 else
93 insn |= (insn_bits_t)*(int16_t*)translate(addr + 2, 1, false, true) << 16;
94 } else if (insn_length(insn) == 2) {
95 insn = (int16_t)insn;
96 } else if (insn_length(insn) == 6) {
97 insn |= (insn_bits_t)*(int16_t*)translate(addr + 4, 1, false, true) << 32;
98 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 1, false, true) << 16;
99 } else {
100 static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
101 insn |= (insn_bits_t)*(int16_t*)translate(addr + 6, 1, false, true) << 48;
102 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 4, 1, false, true) << 32;
103 insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 1, false, true) << 16;
104 }
105
106 insn_fetch_t fetch = {proc->decode_insn(insn), insn};
107 icache[idx].tag = addr;
108 icache[idx].data = fetch;
109
110 reg_t paddr = iaddr - mem;
111 if (!tracer.empty() && tracer.interested_in_range(paddr, paddr + 1, false, true))
112 {
113 icache[idx].tag = -1;
114 tracer.trace(paddr, 1, false, true);
115 }
116 return &icache[idx];
117 }
118
119 inline insn_fetch_t load_insn(reg_t addr)
120 {
121 return access_icache(addr)->data;
122 }
123
124 void set_processor(processor_t* p) { proc = p; flush_tlb(); }
125
126 void flush_tlb();
127 void flush_icache();
128
129 void register_memtracer(memtracer_t*);
130
131 private:
132 char* mem;
133 size_t memsz;
134 processor_t* proc;
135 memtracer_list_t tracer;
136
137 // implement an instruction cache for simulator performance
138 icache_entry_t icache[ICACHE_ENTRIES];
139
140 // implement a TLB for simulator performance
141 static const reg_t TLB_ENTRIES = 256;
142 char* tlb_data[TLB_ENTRIES];
143 reg_t tlb_insn_tag[TLB_ENTRIES];
144 reg_t tlb_load_tag[TLB_ENTRIES];
145 reg_t tlb_store_tag[TLB_ENTRIES];
146
147 // finish translation on a TLB miss and upate the TLB
148 void* refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch);
149
150 // perform a page table walk for a given VA; set referenced/dirty bits
151 reg_t walk(reg_t addr, bool supervisor, bool store, bool fetch);
152
153 // translate a virtual address to a physical address
154 void* translate(reg_t addr, reg_t bytes, bool store, bool fetch)
155 __attribute__((always_inline))
156 {
157 reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
158 reg_t expected_tag = addr >> PGSHIFT;
159 reg_t* tags = fetch ? tlb_insn_tag : store ? tlb_store_tag :tlb_load_tag;
160 reg_t tag = tags[idx];
161 void* data = tlb_data[idx] + addr;
162
163 if (unlikely(addr & (bytes-1)))
164 store ? throw trap_store_address_misaligned(addr) :
165 fetch ? throw trap_instruction_address_misaligned(addr) :
166 throw trap_load_address_misaligned(addr);
167
168 if (likely(tag == expected_tag))
169 return data;
170
171 return refill_tlb(addr, bytes, store, fetch);
172 }
173
174 friend class processor_t;
175 };
176
177 #endif