Don't set dirty/referenced bits w/o permission
[riscv-isa-sim.git] / riscv / mmu.cc
1 // See LICENSE for license details.
2
3 #include "mmu.h"
4 #include "sim.h"
5 #include "processor.h"
6
7 mmu_t::mmu_t(char* _mem, size_t _memsz)
8 : mem(_mem), memsz(_memsz), proc(NULL)
9 {
10 flush_tlb();
11 }
12
13 mmu_t::~mmu_t()
14 {
15 }
16
17 void mmu_t::flush_icache()
18 {
19 for (size_t i = 0; i < ICACHE_ENTRIES; i++)
20 icache[i].tag = -1;
21 }
22
23 void mmu_t::flush_tlb()
24 {
25 memset(tlb_insn_tag, -1, sizeof(tlb_insn_tag));
26 memset(tlb_load_tag, -1, sizeof(tlb_load_tag));
27 memset(tlb_store_tag, -1, sizeof(tlb_store_tag));
28
29 flush_icache();
30 }
31
32 void* mmu_t::refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch)
33 {
34 reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
35 reg_t expected_tag = addr >> PGSHIFT;
36
37 reg_t pte = 0;
38 reg_t mstatus = proc ? proc->state.mstatus : 0;
39
40 bool vm_disabled = get_field(mstatus, MSTATUS_VM) == VM_MBARE;
41 bool mode_m = get_field(mstatus, MSTATUS_PRV) == PRV_M;
42 bool mode_s = get_field(mstatus, MSTATUS_PRV) == PRV_S;
43 bool mprv_m = get_field(mstatus, MSTATUS_MPRV) == PRV_M;
44 bool mprv_s = get_field(mstatus, MSTATUS_MPRV) == PRV_S;
45
46 reg_t want_perm = store ? (mode_s || (mode_m && mprv_s) ? PTE_SW : PTE_UW) :
47 !fetch ? (mode_s || (mode_m && mprv_s) ? PTE_SR : PTE_UR) :
48 (mode_s ? PTE_SX : PTE_UX);
49
50 if (vm_disabled || (mode_m && (mprv_m || fetch))) {
51 // virtual memory is disabled. merely check legality of physical address.
52 if (addr < memsz) {
53 // produce a fake PTE for the TLB's benefit.
54 pte = PTE_V | PTE_UX | PTE_SX | ((addr >> PGSHIFT) << PGSHIFT);
55 if (vm_disabled || !(mode_m && !mprv_m))
56 pte |= PTE_UR | PTE_SR | PTE_UW | PTE_SW;
57 }
58 } else {
59 pte = walk(addr, want_perm);
60 }
61
62 if (!(pte & PTE_V) || !(pte & want_perm)) {
63 if (fetch)
64 throw trap_instruction_access_fault(addr);
65 if (store)
66 throw trap_store_access_fault(addr);
67 throw trap_load_access_fault(addr);
68 }
69
70 reg_t pgoff = addr & (PGSIZE-1);
71 reg_t pgbase = pte >> PGSHIFT << PGSHIFT;
72 reg_t paddr = pgbase + pgoff;
73
74 if (unlikely(tracer.interested_in_range(pgbase, pgbase + PGSIZE, store, fetch)))
75 tracer.trace(paddr, bytes, store, fetch);
76 else
77 {
78 tlb_load_tag[idx] = (pte & (PTE_UR|PTE_SR)) ? expected_tag : -1;
79 tlb_store_tag[idx] = (pte & (PTE_UW|PTE_SW)) && store ? expected_tag : -1;
80 tlb_insn_tag[idx] = (pte & (PTE_UX|PTE_SX)) ? expected_tag : -1;
81 tlb_data[idx] = mem + pgbase - (addr & ~(PGSIZE-1));
82 }
83
84 return mem + paddr;
85 }
86
87 pte_t mmu_t::walk(reg_t addr, reg_t perm)
88 {
89 reg_t msb_mask = -(reg_t(1) << (VA_BITS-1));
90 if ((addr & msb_mask) != 0 && (addr & msb_mask) != msb_mask)
91 return 0; // address isn't properly sign-extended
92
93 reg_t base = proc->get_state()->sptbr;
94
95 int ptshift = (LEVELS-1)*PTIDXBITS;
96 for (reg_t i = 0; i < LEVELS; i++, ptshift -= PTIDXBITS) {
97 reg_t idx = (addr >> (PGSHIFT+ptshift)) & ((1<<PTIDXBITS)-1);
98
99 // check that physical address of PTE is legal
100 reg_t pte_addr = base + idx*sizeof(pte_t);
101 if (pte_addr >= memsz)
102 return 0;
103
104 pte_t* ppte = (pte_t*)(mem+pte_addr);
105
106 if (!(*ppte & PTE_V)) { // invalid mapping
107 return 0;
108 } else if (*ppte & PTE_T) { // next level of page table
109 base = (*ppte >> PGSHIFT) << PGSHIFT;
110 } else {
111 // we've found the PTE. set referenced and possibly dirty bits.
112 if (*ppte & perm) {
113 *ppte |= PTE_R;
114 if (perm & (PTE_SW | PTE_UW))
115 *ppte |= PTE_D;
116 }
117 // for superpage mappings, make a fake leaf PTE for the TLB's benefit.
118 reg_t vpn = addr >> PGSHIFT;
119 reg_t pte = *ppte | ((vpn & ((1<<(ptshift))-1)) << PGSHIFT);
120
121 // check that physical address is legal
122 if (((pte >> PGSHIFT) << PGSHIFT) >= memsz)
123 return 0;
124
125 return pte;
126 }
127 }
128 return 0;
129 }
130
131 void mmu_t::register_memtracer(memtracer_t* t)
132 {
133 flush_tlb();
134 tracer.hook(t);
135 }