9e1218cc0debbb46eafb2dc296f7947682527d46
1 // See LICENSE for license details.
10 #include "processor.h"
11 #include "memtracer.h"
14 // virtual memory configuration
16 const reg_t LEVELS
= sizeof(pte_t
) == sizeof(uint64_t) ? 3 : 2;
17 const reg_t PGSHIFT
= 13;
18 const reg_t PGSIZE
= 1 << PGSHIFT
;
19 const reg_t PTIDXBITS
= PGSHIFT
- (sizeof(pte_t
) == 8 ? 3 : 2);
20 const reg_t VPN_BITS
= PTIDXBITS
* LEVELS
;
21 const reg_t PPN_BITS
= 8*sizeof(reg_t
) - PGSHIFT
;
22 const reg_t VA_BITS
= VPN_BITS
+ PGSHIFT
;
24 // page table entry (PTE) fields
25 #define PTE_T 0x001 // Entry is a page Table descriptor
26 #define PTE_E 0x002 // Entry is a page table Entry
27 #define PTE_R 0x004 // Referenced
28 #define PTE_D 0x008 // Dirty
29 #define PTE_UX 0x010 // User eXecute permission
30 #define PTE_UW 0x020 // User Read permission
31 #define PTE_UR 0x040 // User Write permission
32 #define PTE_SX 0x080 // Supervisor eXecute permission
33 #define PTE_SW 0x100 // Supervisor Read permission
34 #define PTE_SR 0x200 // Supervisor Write permission
35 #define PTE_PERM (PTE_SR | PTE_SW | PTE_SX | PTE_UR | PTE_UW | PTE_UX)
36 #define PTE_PPN_SHIFT 13 // LSB of physical page number in the PTE
38 // this class implements a processor's port into the virtual memory system.
39 // an MMU and instruction cache are maintained for simulator performance.
43 mmu_t(char* _mem
, size_t _memsz
);
46 // template for functions that load an aligned value from memory
47 #define load_func(type) \
48 type##_t load_##type(reg_t addr) { \
49 if(unlikely(addr % sizeof(type##_t))) \
52 throw trap_load_address_misaligned; \
54 reg_t paddr = translate(addr, sizeof(type##_t), false, false); \
55 return *(type##_t*)(mem + paddr); \
57 type##_t load_reserved_##type(reg_t addr) { \
58 load_reservation = addr; \
59 return load_##type(addr); \
62 // load value from memory at aligned address; zero extend to register width
68 // load value from memory at aligned address; sign extend to register width
74 // template for functions that store an aligned value to memory
75 #define store_func(type) \
76 void store_##type(reg_t addr, type##_t val) { \
77 if(unlikely(addr % sizeof(type##_t))) \
80 throw trap_store_address_misaligned; \
82 reg_t paddr = translate(addr, sizeof(type##_t), true, false); \
83 *(type##_t*)(mem + paddr) = val; \
85 reg_t store_conditional_##type(reg_t addr, type##_t val) { \
86 if (addr == load_reservation) { \
87 store_##type(addr, val); \
92 // store value to memory at aligned address
104 // load instruction from memory at aligned address.
105 // (needed because instruction alignment requirement is variable
106 // if RVC is supported)
107 // returns the instruction at the specified address, given the current
108 // RVC mode. func is set to a pointer to a function that knows how to
109 // execute the returned instruction.
110 inline insn_fetch_t
load_insn(reg_t addr
, bool rvc
)
112 #ifdef RISCV_ENABLE_RVC
113 if(addr
% 4 == 2 && rvc
) // fetch across word boundary
115 reg_t addr_lo
= translate(addr
, 2, false, true);
117 fetch
.insn
.bits
= *(uint16_t*)(mem
+ addr_lo
);
118 fetch
.func
= proc
->decode_insn(fetch
.insn
);
120 if(!INSN_IS_RVC(fetch
.insn
.bits
))
122 reg_t addr_hi
= translate(addr
+2, 2, false, true);
123 fetch
.insn
.bits
|= (uint32_t)*(uint16_t*)(mem
+ addr_hi
) << 16;
130 reg_t idx
= (addr
/sizeof(insn_t::itype
)) % ICACHE_ENTRIES
;
132 if (unlikely(icache_tag
[idx
] != addr
))
134 reg_t paddr
= translate(addr
, sizeof(insn_t::itype
), false, true);
135 fetch
.insn
.itype
= *(decltype(insn_t::itype
)*)(mem
+ paddr
);
136 fetch
.func
= proc
->decode_insn(fetch
.insn
);
138 reg_t idx
= (paddr
/sizeof(insn_t::itype
)) % ICACHE_ENTRIES
;
139 icache_tag
[idx
] = addr
;
140 icache_data
[idx
] = fetch
.insn
;
141 icache_func
[idx
] = fetch
.func
;
143 if (tracer
.interested_in_range(paddr
, paddr
+ sizeof(insn_t::itype
), false, true))
145 icache_tag
[idx
] = -1;
146 tracer
.trace(paddr
, sizeof(insn_t::itype
), false, true);
149 fetch
.insn
= icache_data
[idx
];
150 fetch
.func
= icache_func
[idx
];
155 reg_t
get_badvaddr() { return badvaddr
; }
156 reg_t
get_ptbr() { return ptbr
; }
157 void set_ptbr(reg_t addr
) { ptbr
= addr
& ~(PGSIZE
-1); flush_tlb(); }
158 void set_processor(processor_t
* p
) { proc
= p
; flush_tlb(); }
162 void yield_load_reservation() { load_reservation
= -1; }
164 void register_memtracer(memtracer_t
*);
169 reg_t load_reservation
;
173 memtracer_list_t tracer
;
175 // implement an instruction cache for simulator performance
176 static const reg_t ICACHE_ENTRIES
= 256;
177 insn_t icache_data
[ICACHE_ENTRIES
];
178 insn_func_t icache_func
[ICACHE_ENTRIES
];
180 // implement a TLB for simulator performance
181 static const reg_t TLB_ENTRIES
= 256;
182 reg_t tlb_data
[TLB_ENTRIES
];
183 reg_t tlb_insn_tag
[TLB_ENTRIES
];
184 reg_t tlb_load_tag
[TLB_ENTRIES
];
185 reg_t tlb_store_tag
[TLB_ENTRIES
];
186 reg_t icache_tag
[ICACHE_ENTRIES
];
188 // finish translation on a TLB miss and upate the TLB
189 reg_t
refill_tlb(reg_t addr
, reg_t bytes
, bool store
, bool fetch
);
191 // perform a page table walk for a given virtual address
192 pte_t
walk(reg_t addr
);
194 // translate a virtual address to a physical address
195 reg_t
translate(reg_t addr
, reg_t bytes
, bool store
, bool fetch
)
197 reg_t idx
= (addr
>> PGSHIFT
) % TLB_ENTRIES
;
199 reg_t
* tlb_tag
= fetch
? tlb_insn_tag
: store
? tlb_store_tag
:tlb_load_tag
;
200 reg_t expected_tag
= addr
& ~(PGSIZE
-1);
201 if(likely(tlb_tag
[idx
] == expected_tag
))
202 return ((uintptr_t)addr
& (PGSIZE
-1)) + tlb_data
[idx
];
204 return refill_tlb(addr
, bytes
, store
, fetch
);
207 friend class processor_t
;