Update to new privileged spec
[riscv-isa-sim.git] / riscv / mmu.cc
1 // See LICENSE for license details.
2
3 #include "mmu.h"
4 #include "sim.h"
5 #include "processor.h"
6
7 mmu_t::mmu_t(char* _mem, size_t _memsz)
8 : mem(_mem), memsz(_memsz), proc(NULL)
9 {
10 flush_tlb();
11 }
12
13 mmu_t::~mmu_t()
14 {
15 }
16
17 void mmu_t::flush_icache()
18 {
19 for (size_t i = 0; i < ICACHE_ENTRIES; i++)
20 icache[i].tag = -1;
21 }
22
23 void mmu_t::flush_tlb()
24 {
25 memset(tlb_insn_tag, -1, sizeof(tlb_insn_tag));
26 memset(tlb_load_tag, -1, sizeof(tlb_load_tag));
27 memset(tlb_store_tag, -1, sizeof(tlb_store_tag));
28
29 flush_icache();
30 }
31
32 void* mmu_t::refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch)
33 {
34 reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
35 reg_t expected_tag = addr >> PGSHIFT;
36
37 reg_t pte = 0;
38 reg_t mstatus = proc ? proc->state.mstatus : 0;
39
40 bool vm_disabled = get_field(mstatus, MSTATUS_VM) == VM_MBARE;
41 bool mode_m = get_field(mstatus, MSTATUS_PRV) == PRV_M;
42 bool mode_s = get_field(mstatus, MSTATUS_PRV) == PRV_S;
43 bool mprv_m = get_field(mstatus, MSTATUS_MPRV) == PRV_M;
44 bool mprv_s = get_field(mstatus, MSTATUS_MPRV) == PRV_S;
45
46 if (vm_disabled || (mode_m && (mprv_m || fetch))) {
47 // virtual memory is disabled. merely check legality of physical address.
48 if (addr < memsz) {
49 // produce a fake PTE for the TLB's benefit.
50 pte = PTE_V | PTE_UX | PTE_SX | ((addr >> PGSHIFT) << PGSHIFT);
51 if (vm_disabled || !(mode_m && !mprv_m))
52 pte |= PTE_UR | PTE_SR | PTE_UW | PTE_SW;
53 }
54 } else {
55 pte = walk(addr);
56 }
57
58 reg_t pte_perm = pte & PTE_PERM;
59 if (mode_s || (mode_m && mprv_s && !fetch))
60 pte_perm = (pte_perm/(PTE_SX/PTE_UX)) & PTE_PERM;
61 pte_perm |= pte & PTE_V;
62
63 reg_t perm = (fetch ? PTE_UX : store ? PTE_UW : PTE_UR) | PTE_V;
64 if(unlikely((pte_perm & perm) != perm))
65 {
66 if (fetch)
67 throw trap_instruction_access_fault(addr);
68 if (store)
69 throw trap_store_access_fault(addr);
70 throw trap_load_access_fault(addr);
71 }
72
73 reg_t pgoff = addr & (PGSIZE-1);
74 reg_t pgbase = pte >> PGSHIFT << PGSHIFT;
75 reg_t paddr = pgbase + pgoff;
76
77 if (unlikely(tracer.interested_in_range(pgbase, pgbase + PGSIZE, store, fetch)))
78 tracer.trace(paddr, bytes, store, fetch);
79 else
80 {
81 tlb_load_tag[idx] = (pte_perm & PTE_UR) ? expected_tag : -1;
82 tlb_store_tag[idx] = (pte_perm & PTE_UW) ? expected_tag : -1;
83 tlb_insn_tag[idx] = (pte_perm & PTE_UX) ? expected_tag : -1;
84 tlb_data[idx] = mem + pgbase - (addr & ~(PGSIZE-1));
85 }
86
87 return mem + paddr;
88 }
89
90 pte_t mmu_t::walk(reg_t addr)
91 {
92 reg_t msb_mask = -(reg_t(1) << (VA_BITS-1));
93 if ((addr & msb_mask) != 0 && (addr & msb_mask) != msb_mask)
94 return 0; // address isn't properly sign-extended
95
96 reg_t base = proc->get_state()->sptbr;
97 reg_t ptd;
98
99 int ptshift = (LEVELS-1)*PTIDXBITS;
100 for (reg_t i = 0; i < LEVELS; i++, ptshift -= PTIDXBITS) {
101 reg_t idx = (addr >> (PGSHIFT+ptshift)) & ((1<<PTIDXBITS)-1);
102
103 // check that physical address of PTE is legal
104 reg_t pte_addr = base + idx*sizeof(pte_t);
105 if (pte_addr >= memsz)
106 return 0;
107
108 ptd = *(pte_t*)(mem+pte_addr);
109
110 if (!(ptd & PTE_V)) { // invalid mapping
111 return 0;
112 } else if (ptd & PTE_T) { // next level of page table
113 base = (ptd >> PGSHIFT) << PGSHIFT;
114 } else {
115 // we've found the PTE.
116 // for superpage mappings, make a fake leaf PTE for the TLB's benefit.
117 reg_t vpn = addr >> PGSHIFT;
118 ptd |= (vpn & ((1<<(ptshift))-1)) << PGSHIFT;
119
120 // check that physical address is legal
121 if (((ptd >> PGSHIFT) << PGSHIFT) >= memsz)
122 return 0;
123
124 return ptd;
125 }
126 }
127 return 0;
128 }
129
130 void mmu_t::register_memtracer(memtracer_t* t)
131 {
132 flush_tlb();
133 tracer.hook(t);
134 }