cavatools: initialize repository
[cavatools.git] / caveat / sim_body.h
1 // See LICENSE for license details.
2
3
4 #ifndef SIM_BODY_H
5 #define SIM_BODY_H
6
7
8 #define IR(rn) cpu->reg[rn]
9 #define FR(rn) cpu->reg[rn]
10
11 #ifdef SOFT_FP
12 #define F32(rn) cpu->reg[rn].f32
13 #define F64(rn) cpu->reg[rn].f64
14 inline float32_t NF32(int rn) { float32_t x=F32(rn); x.v^=F32_SIGN; return x; }
15 inline float64_t NF64(int rn) { float64_t x=F64(rn); x.v^=F64_SIGN; return x; }
16 #endif
17
18
19
20 // Use only this macro to advance program counter
21 #define INCPC(bytes) { update_regfile(p->op_rd, IR(p->op_rd).l); PC+=bytes; advance(bytes); }
22
23 // Discontinuous program counter macros
24 #define CALL(npc, sz) { Addr_t tgt=npc; IR(p->op_rd).l=PC+sz; INCPC(sz); trace_bbk(tr_call, tgt); PC=tgt; break; }
25 #define RETURN(npc, sz) { Addr_t tgt=npc; INCPC(sz); trace_bbk(tr_return, tgt); PC=tgt; break; }
26 #define JUMP(npc, sz) { Addr_t tgt=npc; INCPC(sz); trace_bbk(tr_jump, tgt); PC=tgt; break; }
27 #define GOTO(npc, sz) { Addr_t tgt=npc; INCPC(sz); trace_bbk(tr_branch, tgt); PC=tgt; break; }
28
29 #define EBRK(num, sz) { cpu->state.mcause= 3; cpu->state.mtval=num; continue; }
30 #define ECALL(sz) { cpu->state.mcause= 8; cpu->state.mtval=0; INCPC(sz); trace_bbk(tr_ecall, cpu->reg[17].l); PC-=sz; continue; }
31 #define DOCSR(num, sz) { cpu->state.mcause=14; cpu->state.mtval=num; INCPC(sz); trace_bbk(tr_csr, 0L); PC-=sz; continue; }
32
33 // Memory reference instructions
34 #define LOAD_B( a, sz) ( trace_mem(tr_read1, a), *(( char*)(a)) )
35 #define LOAD_UB(a, sz) ( trace_mem(tr_read1, a), *((unsigned char*)(a)) )
36 #define LOAD_H( a, sz) ( trace_mem(tr_read2, a), *(( short*)(a)) )
37 #define LOAD_UH(a, sz) ( trace_mem(tr_read2, a), *((unsigned short*)(a)) )
38 #define LOAD_W( a, sz) ( trace_mem(tr_read4, a), *(( int*)(a)) )
39 #define LOAD_UW(a, sz) ( trace_mem(tr_read4, a), *((unsigned int*)(a)) )
40 #define LOAD_L( a, sz) ( trace_mem(tr_read8, a), *(( long*)(a)) )
41 #define LOAD_UL(a, sz) ( trace_mem(tr_read8, a), *((unsigned long*)(a)) )
42 #define LOAD_F( a, sz) ( trace_mem(tr_read4, a), *(( float*)(a)) )
43 #define LOAD_D( a, sz) ( trace_mem(tr_read8, a), *(( double*)(a)) )
44
45 #define STORE_B(a, sz, v) { trace_mem(tr_write1, a); *(( char*)(a))=v; }
46 #define STORE_H(a, sz, v) { trace_mem(tr_write2, a); *(( short*)(a))=v; }
47 #define STORE_W(a, sz, v) { trace_mem(tr_write4, a); *(( int*)(a))=v; }
48 #define STORE_L(a, sz, v) { trace_mem(tr_write8, a); *(( long*)(a))=v; }
49 #define STORE_F(a, sz, v) { trace_mem(tr_write4, a); *(( float*)(a))=v; }
50 #define STORE_D(a, sz, v) { trace_mem(tr_write8, a); *((double*)(a))=v; }
51
52
53 // Define load reserve/store conditional emulation
54 #define addrW(rn) (( int*)IR(rn).p)
55 #define addrL(rn) ((long*)IR(rn).p)
56 #define amoW(rn) ( trace_mem(tr_amo4, IR(rn).l), ( int*)IR(rn).p )
57 #define amoL(rn) ( trace_mem(tr_amo8, IR(rn).l), (long*)IR(rn).p )
58
59 #define LR_W(rd, r1) { amo_lock_begin; lrsc_set = IR(rd).ul&~0x7; IR(rd).l=*addrW(r1); trace_mem(tr_lr4, IR(r1).ul|0x0L); amo_lock_end; }
60 #define LR_L(rd, r1) { amo_lock_begin; lrsc_set = IR(rd).ul&~0x7; IR(rd).l=*addrL(r1); trace_mem(tr_lr8, IR(r1).ul|0x1L); amo_lock_end; }
61 #define SC_W(rd, r1, r2) { amo_lock_begin; if (lrsc_set == IR(r1).ul&~0x7) { *addrW(r1)=IR(r2).i; IR(rd).l=1; } else IR(rd).l=0; trace_mem(tr_sc4, IR(r1).ul|0x2L); amo_lock_end; }
62 #define SC_L(rd, r1, r2) { amo_lock_begin; if (lrsc_set == IR(r1).ul&~0x7) { *addrL(r1)=IR(r2).l; IR(rd).l=1; } else IR(rd).l=0; trace_mem(tr_sc8, IR(r1).ul|0x3L); amo_lock_end; }
63
64
65 // Define AMO instructions
66 #define AMOSWAP_W(rd, r1, r2) __sync_lock_test_and_set_4(amoW(r1), IR(r2).i)
67 #define AMOSWAP_L(rd, r1, r2) __sync_lock_test_and_set_8(amoL(r1), IR(r2).l)
68
69 #define AMOADD_W(rd, r1, r2) __sync_fetch_and_add_4( amoW(r1), IR(r2).i)
70 #define AMOADD_L(rd, r1, r2) __sync_fetch_and_add_8( amoL(r1), IR(r2).l)
71 #define AMOXOR_W(rd, r1, r2) __sync_fetch_and_xor_4( amoW(r1), IR(r2).i)
72 #define AMOXOR_L(rd, r1, r2) __sync_fetch_and_xor_8( amoL(r1), IR(r2).l)
73 #define AMOOR_W( rd, r1, r2) __sync_fetch_and_or_4( amoW(r1), IR(r2).i)
74 #define AMOOR_L( rd, r1, r2) __sync_fetch_and_or_8( amoL(r1), IR(r2).l)
75 #define AMOAND_W(rd, r1, r2) __sync_fetch_and_and_4( amoW(r1), IR(r2).i)
76 #define AMOAND_L(rd, r1, r2) __sync_fetch_and_and_8( amoL(r1), IR(r2).l)
77
78 #define AMOMIN_W( rd, r1, r2) { amo_lock_begin; int t1=*(( int*)amoW(r1)), t2=IR(r2).i; if (t2 < t1) *addrW(r1) = t2; IR(rd).l = t1; amo_lock_end; }
79 #define AMOMAX_W( rd, r1, r2) { amo_lock_begin; int t1=*(( int*)amoW(r1)), t2=IR(r2).ui; if (t2 > t1) *addrW(r1) = t2; IR(rd).l = t1; amo_lock_end; }
80 #define AMOMIN_L( rd, r1, r2) { amo_lock_begin; long t1=*(( long*)amoL(r1)), t2=IR(r2).i; if (t2 < t1) *addrL(r1) = t2; IR(rd).l = t1; amo_lock_end; }
81 #define AMOMAX_L( rd, r1, r2) { amo_lock_begin; long t1=*(( long*)amoL(r1)), t2=IR(r2).ui; if (t2 > t1) *addrL(r1) = t2; IR(rd).l = t1; amo_lock_end; }
82 #define AMOMINU_W(rd, r1, r2) { amo_lock_begin; unsigned int t1=*((unsigned int*)amoW(r1)), t2=IR(r2).l; if (t2 < t1) *addrW(r1) = t2; IR(rd).l = t1; amo_lock_end; }
83 #define AMOMAXU_W(rd, r1, r2) { amo_lock_begin; unsigned int t1=*((unsigned int*)amoW(r1)), t2=IR(r2).ul; if (t2 > t1) *addrW(r1) = t2; IR(rd).l = t1; amo_lock_end; }
84 #define AMOMINU_L(rd, r1, r2) { amo_lock_begin; unsigned long t1=*((unsigned long*)amoL(r1)), t2=IR(r2).l; if (t2 < t1) *addrL(r1) = t2; IR(rd).l = t1; amo_lock_end; }
85 #define AMOMAXU_L(rd, r1, r2) { amo_lock_begin; unsigned long t1=*((unsigned long*)amoL(r1)), t2=IR(r2).ul; if (t2 > t1) *addrL(r1) = t2; IR(rd).l = t1; amo_lock_end; }
86
87
88 // Define i-stream synchronization instruction
89 #define FENCE(rd, r1, immed) { __sync_synchronize(); INCPC(4); trace_bbk(tr_fence, immed); break; }
90
91
92 {
93 while (cpu->state.mcause == 0) {
94 register const struct insn_t* p = insn(PC);
95 on_every_insn(p);
96 switch (p->op_code) {
97
98 #include "execute_insn.h"
99
100 case Op_zero:
101 abort(); /* should never occur */
102
103 case Op_illegal:
104 cpu->state.mcause = 2; // Illegal instruction
105 continue; // will exit loop
106 default:
107 cpu->state.mcause = 10; // Unknown instruction
108 continue;
109 }
110 IR(0).l = 0L;
111 if (--countdown == 0)
112 break;
113 }
114 cpu->pc = PC; // program counter cached in register
115 cpu->counter.insn_executed += max_count-countdown;
116 }
117
118
119 #endif