add load-reserved/store-conditional instructions
[riscv-isa-sim.git] / riscv / mmu.cc
1 // See LICENSE for license details.
2
3 #include "mmu.h"
4 #include "sim.h"
5 #include "processor.h"
6
7 mmu_t::mmu_t(char* _mem, size_t _memsz)
8 : mem(_mem), memsz(_memsz), badvaddr(0),
9 ptbr(0)
10 {
11 set_sr(SR_S);
12 }
13
14 mmu_t::~mmu_t()
15 {
16 }
17
18 void mmu_t::flush_icache()
19 {
20 memset(icache_tag, -1, sizeof(icache_tag));
21 }
22
23 void mmu_t::flush_tlb()
24 {
25 memset(tlb_insn_tag, -1, sizeof(tlb_insn_tag));
26 memset(tlb_load_tag, -1, sizeof(tlb_load_tag));
27 memset(tlb_store_tag, -1, sizeof(tlb_store_tag));
28
29 flush_icache();
30 }
31
32 void mmu_t::set_sr(uint32_t _sr)
33 {
34 sr = _sr;
35 flush_tlb();
36 yield_load_reservation();
37 }
38
39 reg_t mmu_t::refill_tlb(reg_t addr, reg_t bytes, bool store, bool fetch)
40 {
41 reg_t idx = (addr >> PGSHIFT) % TLB_ENTRIES;
42 reg_t expected_tag = addr & ~(PGSIZE-1);
43
44 reg_t pte = walk(addr);
45
46 reg_t pte_perm = pte & PTE_PERM;
47 if (sr & SR_S) // shift supervisor permission bits into user perm bits
48 pte_perm = (pte_perm/(PTE_SX/PTE_UX)) & PTE_PERM;
49 pte_perm |= pte & PTE_E;
50
51 reg_t perm = (fetch ? PTE_UX : store ? PTE_UW : PTE_UR) | PTE_E;
52 if(unlikely((pte_perm & perm) != perm))
53 {
54 if (fetch)
55 throw trap_instruction_access_fault;
56
57 badvaddr = addr;
58 throw store ? trap_store_access_fault : trap_load_access_fault;
59 }
60
61 reg_t pgoff = addr & (PGSIZE-1);
62 reg_t pgbase = pte >> PTE_PPN_SHIFT << PGSHIFT;
63 reg_t paddr = pgbase + pgoff;
64
65 if (unlikely(tracer.interested_in_range(pgbase, pgbase + PGSIZE, store, fetch)))
66 tracer.trace(paddr, bytes, store, fetch);
67 else
68 {
69 tlb_load_tag[idx] = (pte_perm & PTE_UR) ? expected_tag : -1;
70 tlb_store_tag[idx] = (pte_perm & PTE_UW) ? expected_tag : -1;
71 tlb_insn_tag[idx] = (pte_perm & PTE_UX) ? expected_tag : -1;
72 tlb_data[idx] = pgbase;
73 }
74
75 return paddr;
76 }
77
78 pte_t mmu_t::walk(reg_t addr)
79 {
80 pte_t pte = 0;
81
82 // the address must be a canonical sign-extended VA_BITS-bit number
83 int shift = 8*sizeof(reg_t) - VA_BITS;
84 if (((sreg_t)addr << shift >> shift) != (sreg_t)addr)
85 ;
86 else if (!(sr & SR_VM))
87 {
88 if(addr < memsz)
89 pte = PTE_E | PTE_PERM | ((addr >> PGSHIFT) << PTE_PPN_SHIFT);
90 }
91 else
92 {
93 reg_t base = ptbr;
94 reg_t ptd;
95
96 int ptshift = (LEVELS-1)*PTIDXBITS;
97 for(reg_t i = 0; i < LEVELS; i++, ptshift -= PTIDXBITS)
98 {
99 reg_t idx = (addr >> (PGSHIFT+ptshift)) & ((1<<PTIDXBITS)-1);
100
101 reg_t pte_addr = base + idx*sizeof(pte_t);
102 if(pte_addr >= memsz)
103 break;
104
105 ptd = *(pte_t*)(mem+pte_addr);
106 if(ptd & PTE_E)
107 {
108 // if this PTE is from a larger PT, fake a leaf
109 // PTE so the TLB will work right
110 reg_t vpn = addr >> PGSHIFT;
111 ptd |= (vpn & ((1<<(ptshift))-1)) << PTE_PPN_SHIFT;
112
113 // fault if physical addr is invalid
114 reg_t ppn = ptd >> PTE_PPN_SHIFT;
115 if((ppn << PGSHIFT) + (addr & (PGSIZE-1)) < memsz)
116 pte = ptd;
117 break;
118 }
119 else if(!(ptd & PTE_T))
120 break;
121
122 base = (ptd >> PTE_PPN_SHIFT) << PGSHIFT;
123 }
124 }
125
126 return pte;
127 }
128
129 void mmu_t::register_memtracer(memtracer_t* t)
130 {
131 flush_tlb();
132 tracer.hook(t);
133 }