// virtual memory configuration
typedef reg_t pte_t;
const reg_t LEVELS = sizeof(pte_t) == 8 ? 3 : 2;
-const reg_t PTIDXBITS = 10;
-const reg_t PGSHIFT = PTIDXBITS + (sizeof(pte_t) == 8 ? 3 : 2);
+const reg_t PGSHIFT = 12;
+const reg_t PTIDXBITS = PGSHIFT - (sizeof(pte_t) == 8 ? 3 : 2);
const reg_t PGSIZE = 1 << PGSHIFT;
const reg_t VPN_BITS = PTIDXBITS * LEVELS;
-const reg_t PPN_BITS = 8*sizeof(reg_t) - PGSHIFT;
const reg_t VA_BITS = VPN_BITS + PGSHIFT;
+struct insn_fetch_t
+{
+ insn_func_t func;
+ insn_t insn;
+};
+
+struct icache_entry_t {
+ reg_t tag;
+ reg_t pad;
+ insn_fetch_t data;
+};
+
// this class implements a processor's port into the virtual memory system.
// an MMU and instruction cache are maintained for simulator performance.
class mmu_t
// template for functions that load an aligned value from memory
#define load_func(type) \
type##_t load_##type(reg_t addr) __attribute__((always_inline)) { \
- if(unlikely(addr % sizeof(type##_t))) \
- throw trap_load_address_misaligned(addr); \
void* paddr = translate(addr, sizeof(type##_t), false, false); \
return *(type##_t*)paddr; \
}
// template for functions that store an aligned value to memory
#define store_func(type) \
void store_##type(reg_t addr, type##_t val) { \
- if(unlikely(addr % sizeof(type##_t))) \
- throw trap_store_address_misaligned(addr); \
void* paddr = translate(addr, sizeof(type##_t), true, false); \
*(type##_t*)paddr = val; \
}
store_func(uint32)
store_func(uint64)
- struct insn_fetch_t
+ static const reg_t ICACHE_ENTRIES = 1024;
+
+ inline size_t icache_index(reg_t addr)
{
- insn_func_t func;
- union {
- insn_t insn;
- uint_fast32_t pad;
- } insn;
- };
+ // for instruction sizes != 4, this hash still works but is suboptimal
+ return (addr / 4) % ICACHE_ENTRIES;
+ }
// load instruction from memory at aligned address.
- inline insn_fetch_t load_insn(reg_t addr)
+ icache_entry_t* access_icache(reg_t addr) __attribute__((always_inline))
{
- reg_t offset = addr & (sizeof(insn_t) * (ICACHE_ENTRIES-1));
- offset *= sizeof(icache_entry_t) / sizeof(insn_t);
- icache_entry_t* entry = (icache_entry_t*)((char*)icache + offset);
- insn_fetch_t data = entry->data;
+ reg_t idx = icache_index(addr);
+ icache_entry_t* entry = &icache[idx];
if (likely(entry->tag == addr))
- return data;
-
- void* iaddr = translate(addr, sizeof(insn_t), false, true);
- insn_fetch_t fetch;
- fetch.insn.pad = *(decltype(fetch.insn.insn.bits())*)iaddr;
- fetch.func = proc->decode_insn(fetch.insn.insn);
+ return entry;
+
+ char* iaddr = (char*)translate(addr, 1, false, true);
+ insn_bits_t insn = *(uint16_t*)iaddr;
+
+ if (likely(insn_length(insn) == 4)) {
+ if (likely(addr % PGSIZE < PGSIZE-2))
+ insn |= (insn_bits_t)*(int16_t*)(iaddr + 2) << 16;
+ else
+ insn |= (insn_bits_t)*(int16_t*)translate(addr + 2, 1, false, true) << 16;
+ } else if (insn_length(insn) == 2) {
+ insn = (int16_t)insn;
+ } else if (insn_length(insn) == 6) {
+ insn |= (insn_bits_t)*(int16_t*)translate(addr + 4, 1, false, true) << 32;
+ insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 1, false, true) << 16;
+ } else {
+ static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
+ insn |= (insn_bits_t)*(int16_t*)translate(addr + 6, 1, false, true) << 48;
+ insn |= (insn_bits_t)*(uint16_t*)translate(addr + 4, 1, false, true) << 32;
+ insn |= (insn_bits_t)*(uint16_t*)translate(addr + 2, 1, false, true) << 16;
+ }
- entry->tag = addr;
- entry->data = fetch;
+ insn_fetch_t fetch = {proc->decode_insn(insn), insn};
+ icache[idx].tag = addr;
+ icache[idx].data = fetch;
- reg_t paddr = (char*)iaddr - mem;
- if (!tracer.empty() && tracer.interested_in_range(paddr, paddr + sizeof(insn_t), false, true))
+ reg_t paddr = iaddr - mem;
+ if (!tracer.empty() && tracer.interested_in_range(paddr, paddr + 1, false, true))
{
- entry->tag = -1;
- tracer.trace(paddr, sizeof(insn_t), false, true);
+ icache[idx].tag = -1;
+ tracer.trace(paddr, 1, false, true);
}
- return entry->data;
+ return &icache[idx];
+ }
+
+ inline insn_fetch_t load_insn(reg_t addr)
+ {
+ return access_icache(addr)->data;
}
void set_processor(processor_t* p) { proc = p; flush_tlb(); }
memtracer_list_t tracer;
// implement an instruction cache for simulator performance
- static const reg_t ICACHE_ENTRIES = 2048;
- struct icache_entry_t {
- reg_t tag;
- reg_t pad;
- insn_fetch_t data;
- };
icache_entry_t icache[ICACHE_ENTRIES];
// implement a TLB for simulator performance
// finish translation on a TLB miss and upate the TLB
void* refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch);
- // perform a page table walk for a given virtual address
- pte_t walk(reg_t addr);
+ // perform a page table walk for a given VA; set referenced/dirty bits
+ pte_t walk(reg_t addr, bool supervisor, bool store, bool fetch);
// translate a virtual address to a physical address
void* translate(reg_t addr, reg_t bytes, bool store, bool fetch)
__attribute__((always_inline))
{
reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
- reg_t expected_tag = addr & ~(PGSIZE-1);
-
- reg_t* tlb_tag = fetch ? tlb_insn_tag : store ? tlb_store_tag :tlb_load_tag;
+ reg_t expected_tag = addr >> PGSHIFT;
+ reg_t* tags = fetch ? tlb_insn_tag : store ? tlb_store_tag :tlb_load_tag;
+ reg_t tag = tags[idx];
void* data = tlb_data[idx] + addr;
- if (likely(tlb_tag[idx] == expected_tag))
+
+ if (unlikely(addr & (bytes-1)))
+ store ? throw trap_store_address_misaligned(addr) :
+ fetch ? throw trap_instruction_address_misaligned(addr) :
+ throw trap_load_address_misaligned(addr);
+
+ if (likely(tag == expected_tag))
return data;
return refill_tlb(addr, bytes, store, fetch);