Don't use I$ in debug mode
[riscv-isa-sim.git] / riscv / mmu.h
1 // See LICENSE for license details.
2
3 #ifndef _RISCV_MMU_H
4 #define _RISCV_MMU_H
5
6 #include "decode.h"
7 #include "trap.h"
8 #include "common.h"
9 #include "config.h"
10 #include "sim.h"
11 #include "processor.h"
12 #include "memtracer.h"
13 #include <stdlib.h>
14 #include <vector>
15
16 // virtual memory configuration
17 #define PGSHIFT 12
18 const reg_t PGSIZE = 1 << PGSHIFT;
19 const reg_t PGMASK = ~(PGSIZE-1);
20
21 struct insn_fetch_t
22 {
23 insn_func_t func;
24 insn_t insn;
25 };
26
27 struct icache_entry_t {
28 reg_t tag;
29 reg_t pad;
30 insn_fetch_t data;
31 };
32
33 // this class implements a processor's port into the virtual memory system.
34 // an MMU and instruction cache are maintained for simulator performance.
35 class mmu_t
36 {
37 public:
38 mmu_t(sim_t* sim, processor_t* proc);
39 ~mmu_t();
40
41 // template for functions that load an aligned value from memory
42 #define load_func(type) \
43 inline type##_t load_##type(reg_t addr) { \
44 if (addr & (sizeof(type##_t)-1)) \
45 throw trap_load_address_misaligned(addr); \
46 reg_t vpn = addr >> PGSHIFT; \
47 if (likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) \
48 return *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
49 type##_t res; \
50 load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res); \
51 return res; \
52 }
53
54 // load value from memory at aligned address; zero extend to register width
55 load_func(uint8)
56 load_func(uint16)
57 load_func(uint32)
58 load_func(uint64)
59
60 // load value from memory at aligned address; sign extend to register width
61 load_func(int8)
62 load_func(int16)
63 load_func(int32)
64 load_func(int64)
65
66 // template for functions that store an aligned value to memory
67 #define store_func(type) \
68 void store_##type(reg_t addr, type##_t val) { \
69 if (addr & (sizeof(type##_t)-1)) \
70 throw trap_store_address_misaligned(addr); \
71 reg_t vpn = addr >> PGSHIFT; \
72 if (likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) \
73 *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
74 else \
75 store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&val); \
76 }
77
78 // store value to memory at aligned address
79 store_func(uint8)
80 store_func(uint16)
81 store_func(uint32)
82 store_func(uint64)
83
84 static const reg_t ICACHE_ENTRIES = 1024;
85
86 inline size_t icache_index(reg_t addr)
87 {
88 return (addr / PC_ALIGN) % ICACHE_ENTRIES;
89 }
90
91 inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry)
92 {
93 const uint16_t* iaddr = translate_insn_addr(addr);
94 insn_bits_t insn = *iaddr;
95 int length = insn_length(insn);
96
97 if (likely(length == 4)) {
98 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 2) << 16;
99 } else if (length == 2) {
100 insn = (int16_t)insn;
101 } else if (length == 6) {
102 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 4) << 32;
103 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
104 } else {
105 static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
106 insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 6) << 48;
107 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 4) << 32;
108 insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
109 }
110
111 insn_fetch_t fetch = {proc->decode_insn(insn), insn};
112 entry->tag = addr;
113 entry->data = fetch;
114
115 reg_t paddr = sim->mem_to_addr((char*)iaddr);
116 if (tracer.interested_in_range(paddr, paddr + 1, FETCH)) {
117 entry->tag = -1;
118 tracer.trace(paddr, length, FETCH);
119 }
120 return entry;
121 }
122
123 inline icache_entry_t* access_icache(reg_t addr)
124 {
125 icache_entry_t* entry = &icache[icache_index(addr)];
126 if (likely(entry->tag == addr))
127 return entry;
128 return refill_icache(addr, entry);
129 }
130
131 inline insn_fetch_t load_insn(reg_t addr)
132 {
133 icache_entry_t entry;
134 return refill_icache(addr, &entry)->data;
135 }
136
137 void flush_tlb();
138 void flush_icache();
139
140 void register_memtracer(memtracer_t*);
141
142 private:
143 sim_t* sim;
144 processor_t* proc;
145 memtracer_list_t tracer;
146 uint16_t fetch_temp;
147
148 // implement an instruction cache for simulator performance
149 icache_entry_t icache[ICACHE_ENTRIES];
150
151 // implement a TLB for simulator performance
152 static const reg_t TLB_ENTRIES = 256;
153 char* tlb_data[TLB_ENTRIES];
154 reg_t tlb_insn_tag[TLB_ENTRIES];
155 reg_t tlb_load_tag[TLB_ENTRIES];
156 reg_t tlb_store_tag[TLB_ENTRIES];
157
158 // finish translation on a TLB miss and update the TLB
159 void refill_tlb(reg_t vaddr, reg_t paddr, access_type type);
160 const char* fill_from_mmio(reg_t vaddr, reg_t paddr);
161
162 // perform a page table walk for a given VA; set referenced/dirty bits
163 reg_t walk(reg_t addr, access_type type, bool supervisor, bool pum);
164
165 // handle uncommon cases: TLB misses, page faults, MMIO
166 const uint16_t* fetch_slow_path(reg_t addr);
167 void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes);
168 void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes);
169 reg_t translate(reg_t addr, access_type type);
170
171 // ITLB lookup
172 inline const uint16_t* translate_insn_addr(reg_t addr) {
173 reg_t vpn = addr >> PGSHIFT;
174 if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn))
175 return (uint16_t*)(tlb_data[vpn % TLB_ENTRIES] + addr);
176 return fetch_slow_path(addr);
177 }
178
179 friend class processor_t;
180 };
181
182 #endif