add comment
[riscv-isa-sim.git] / riscv / sv_insn_redirect.cc
1 #include "sv_insn_redirect.h"
2 #include "processor.h"
3 #include "mulhi.h"
4 #include "sv_reg.h"
5 #include "sv_mmu.h"
6
7 #define xstr(x) str(x)
8 #define str(x) #x
9
10 void (sv_proc_t::WRITE_FRD)(sv_float32_t value)
11 {
12 reg_t reg = _insn->rd().reg;
13 uint8_t dest_elwidth = _insn->reg_elwidth(reg, false);
14 fprintf(stderr, "WRITE_FRD rd %ld ew %d sv_float32_t %x\n",
15 reg, dest_elwidth, ((float32_t)value).v);
16 freg_t v;
17 switch (dest_elwidth)
18 {
19 // 8-bit
20 case 1: throw trap_illegal_instruction(0); // XXX for now
21 // 16-bit data, up-convert to f32
22 case 2:
23 {
24 float16_t x16 = ::f32_to_f16((float32_t)value);
25 fprintf(stderr, "f32-to-f16\n");
26 v = freg(x16);
27 break;
28 }
29 case 3:
30 {
31 v = freg(value);
32 //float32_t x32 = ::f128_to_f32((float32_t)value);
33 //v = ::f32_to_f128(x32);
34 break;
35 }
36 default:
37 {
38 v = freg(value);
39 break;
40 }
41 }
42 sv_freg_t vf = sv_freg_t(v, xlen, value.get_elwidth());
43 DO_WRITE_FREG( _insn->rd(), vf);
44 }
45
46 void (sv_proc_t::WRITE_FRD)(sv_float64_t value)
47 {
48 reg_t reg = _insn->rd().reg;
49 fprintf(stderr, "WRITE_FRD sv_float64_t %g\n",
50 (double)((float64_t)value).v);
51 uint8_t dest_elwidth = _insn->reg_elwidth(reg, false);
52 freg_t v;
53 switch (dest_elwidth)
54 {
55 // 8-bit
56 case 1: throw trap_illegal_instruction(0); // XXX for now
57 // 16-bit data, up-convert to f32
58 case 2:
59 {
60 float16_t x16 = ::f64_to_f16((float64_t)value);
61 v = freg(x16);
62 break;
63 }
64 case 3:
65 {
66 float32_t x32 = ::f64_to_f32((float64_t)value);
67 v = freg(x32);
68 break;
69 }
70 default:
71 {
72 v = freg(value);
73 break;
74 }
75 }
76 sv_freg_t vf = sv_freg_t(v, xlen, value.get_elwidth());
77 DO_WRITE_FREG( _insn->rd(), vf );
78 }
79
80 void (sv_proc_t::WRITE_FRD)(sv_float128_t value)
81 {
82 fprintf(stderr, "WRITE_FRD sv_float128_t %g\n",
83 (double)((float128_t)value).v[0]);
84 sv_freg_t v = sv_freg_t(freg(((float128_t)value)), xlen, value.get_elwidth());
85 DO_WRITE_FREG( _insn->rd(), v );
86 }
87
88 void (sv_proc_t::WRITE_FRD)(sv_freg_t value)
89 {
90 fprintf(stderr, "WRITE_FRD fsv_reg_t %lx\n", ((freg_t)value).v[0]);
91 DO_WRITE_FREG( _insn->rd(), freg(value) );
92 }
93
94 void (sv_proc_t::WRITE_RVC_FRS2S)(sv_float32_t value)
95 {
96 WRITE_FREG(_insn->rvc_rs2s(), freg(value));
97 }
98
99 void (sv_proc_t::WRITE_RVC_FRS2S)(sv_float64_t const& value)
100 {
101 WRITE_FREG(_insn->rvc_rs2s(), freg(value));
102 }
103
104 void (sv_proc_t::WRITE_RVC_RS1S)(sv_reg_t const& value)
105 {
106 WRITE_REG(_insn->rvc_rs1s(), value );
107 }
108
109 void (sv_proc_t::WRITE_RVC_RS2S)(sv_reg_t const& value)
110 {
111 WRITE_REG(_insn->rvc_rs2s(), value );
112 }
113
114 void (sv_proc_t::WRITE_RD)(sv_reg_t const& value)
115 {
116 WRITE_REG( _insn->rd(), value ); // XXX TODO: replace properly
117 }
118
119 union freg_shift {
120 freg_t f;
121 uint8_t b[sizeof(freg_t)*8];
122 };
123
124 unsigned int sv_proc_t::pred_remap(reg_t reg, int bit)
125 {
126 int ignore_subvl = 0; // predication does not use subvl
127 reg_spec_t rs = {reg, &bit, &ignore_subvl};
128 return remap(rs, true);
129 }
130
131 unsigned int sv_proc_t::remap(reg_spec_t const& spec, bool pred)
132 {
133 unsigned int offs = *spec.offset;
134 unsigned int subo = *spec.suboff;
135 // this is where (after all the fuss, passing info around) the actual
136 // register offset is computed. if subvl is active, it's a multiplier
137 //fprintf(stderr, "remap %ld subvl %ld offs %ld subo %ld\n",
138 // spec.reg, p->get_state()->sv().subvl, offs, subo);
139 if (!pred) {
140 offs = offs * p->get_state()->sv().subvl + subo;
141 }
142 sv_shape_t *shape = p->get_state()->get_shape(spec.reg, pred);
143 if (shape == NULL) {
144 return offs;
145 }
146 // this table is pre-calculated by sv_shape_t::setup_map().
147 // changing the CSRs is the way to change the map.
148 // the map is still calculated even for the 1D case
149 // because it's a linear map
150 unsigned int res = (unsigned int)shape->map[offs] + shape->offs;
151 fprintf(stderr, "remap %ld %d -> %d\n",
152 spec.reg, offs, res);
153 return res;
154 }
155
156 void (sv_proc_t::DO_WRITE_FREG)(reg_spec_t const& spec, sv_freg_t const& value)
157 {
158 int regflen = sizeof(freg_t) * 8; // FLEN (not specified in spike)
159 int flen = _insn->dest_flen;
160 reg_t reg = spec.reg;
161 uint8_t dest_elwidth = _insn->reg_elwidth(reg, false);
162 int bitwidth = 0;
163 //if (_insn->sv_check_reg(reg, false)) {
164 bitwidth = get_bitwidth(dest_elwidth, flen);
165 //} else {
166 // bitwidth = regflen;
167 // flen = regflen;
168 //}
169 fprintf(stderr, "DO_WRITE_FRD rd %ld ew %d data %lx %lx\n",
170 reg, dest_elwidth, ((freg_t)value).v[0], ((freg_t)value).v[1]);
171 unsigned int shift = 0;
172 unsigned int offs = 0;
173 if (spec.offset != NULL) {
174 unsigned int nbytes = flen / bitwidth;
175 offs = remap(spec);
176 shift = offs % nbytes;
177 offs /= nbytes;
178 if (spec.isvec) {
179 reg += offs;
180 }
181 fprintf(stderr, "writefreg spec %ld bitwidth %d offs %d shift %d\n",
182 reg, bitwidth, offs, shift);
183 }
184 if (((int)reg) >= SV_NFPR) {
185 throw trap_illegal_instruction(0);
186 }
187 freg_shift fd;
188 if (flen != bitwidth)
189 {
190 char report[2] = {};
191 freg_shift fs;
192 fs.f = value;
193 fd.f = _insn->p->get_state()->FPR[reg];
194 int sblen = shift*bitwidth/8;
195 for (int i = 0; i < ((int)bitwidth/8); i++) {
196 if (i < (bitwidth/8)) {
197 fd.b[i+sblen] = fs.b[i];
198 }
199 }
200 if (!spec.isvec) {
201 for (int i = (bitwidth/8); i < ((int)flen/8); i++) {
202 fd.b[i] = 0xff;
203 }
204 report[0] = 's';
205 }
206 fprintf(stderr, "writefreg %s %ld bitwidth %d offs %d shift %d " \
207 " %lx:%lx %lx:%lx\n",
208 report, spec.reg, bitwidth, offs, shift,
209 fs.f.v[0], fs.f.v[1],
210 fd.f.v[0], fd.f.v[1]);
211 }
212 else
213 {
214 fd.f = value;
215 }
216 STATE.FPR.write(reg, fd.f);
217 dirty_fp_state;
218 }
219
220 void (sv_proc_t::WRITE_REG)(reg_spec_t const& spec, sv_reg_t const& value)
221 {
222 uint64_t wval = (uint64_t)value;
223 reg_t reg = spec.reg;
224 int bitwidth = get_bitwidth(_insn->reg_elwidth(reg, true), xlen);
225 unsigned int shift = 0;
226 unsigned int offs = 0;
227 if (spec.offset != NULL) {
228 unsigned int nbytes = xlen / bitwidth;
229 offs = remap(spec);
230 shift = offs % nbytes;
231 offs /= nbytes;
232 if (spec.isvec) {
233 reg += offs;
234 }
235 fprintf(stderr, "writereg spec %ld %lx bitwidth %d offs %d shift %d\n",
236 reg, wval, bitwidth, offs, shift);
237 }
238 if (((int)reg) >= SV_NFPR) {
239 throw trap_illegal_instruction(0);
240 }
241 if (xlen != bitwidth)
242 {
243 char report[2] = {};
244 uint64_t data = _insn->p->get_state()->XPR[reg];
245 uint64_t mask = ((1UL<<bitwidth)-1UL) << (shift*bitwidth);
246 wval = (uint64_t)(wval << (shift*bitwidth)); // element within reg-block
247 wval &= mask;
248 uint64_t ndata = data;
249 if (spec.isvec) {
250 ndata = data & (uint64_t)(~mask); // masks off right bits
251 wval |= ndata;
252 } else {
253 if (_insn->signextended) {
254 wval = sext_bwid(wval, bitwidth);
255 report[0] = 's';
256 } else {
257 wval = zext_bwid(wval, bitwidth);
258 report[0] = 'z';
259 }
260 }
261 // XXX BAD HACK, keep an eye on this
262 // when xlen = 32, spike appears to expect all 32-bit
263 // results to be sign-extended in the 64-bit register.
264 // this MAY not be properly spec-compliant when xlen
265 // is changed at runtime.
266 if (xlen == 32 && bitwidth != 32) {
267 wval = sext_bwid(wval, 32);
268 }
269 fprintf(stderr, "writereg %s %ld bitwidth %d offs %d shift %d %lx " \
270 " %lx %lx %lx\n",
271 report, spec.reg, bitwidth, offs, shift, data,
272 ndata, mask, wval);
273 }
274 STATE.XPR.write(reg, wval);
275 }
276
277 freg_t (sv_proc_t::READ_FREG)(reg_spec_t const& spec)
278 {
279 int regflen = sizeof(freg_t) * 8; // FLEN (not specified in spike)
280 int flen = _insn->src_flen;
281 reg_t reg = spec.reg;
282 uint8_t elwidth = _insn->reg_elwidth(reg, false);
283 int bitwidth = get_bitwidth(elwidth, flen);
284 int shift = 0;
285 int offs = 0;
286 if (spec.offset != NULL) {
287 int nbytes = flen / bitwidth;
288 if (nbytes == 0) {
289 nbytes = 1;
290 }
291 offs = remap(spec);
292 shift = offs % nbytes;
293 offs /= nbytes;
294 if (spec.isvec) {
295 reg += offs;
296 }
297 }
298 if (((int)reg) >= SV_NFPR) {
299 throw trap_illegal_instruction(0);
300 }
301 freg_shift fs;
302 fs.f = _insn->p->get_state()->FPR[reg];
303 fprintf(stderr, "READ_FREG rd %ld offs %d ew %d bw %d fl %d data %lx %lx\n",
304 reg, offs, elwidth, bitwidth, flen, fs.f.v[0], fs.f.v[1]);
305
306 if (regflen != bitwidth)
307 {
308 // shuffle the data down by bytes (annoying but easier)
309 int sblen = shift*bitwidth/8;
310 for (int i = 0; i < ((int)regflen/8); i++) {
311 if (i < (bitwidth/8)) {
312 fs.b[i] = fs.b[i+sblen];
313 } else {
314 fs.b[i] = 0xff;
315 }
316 }
317 fprintf(stderr, "readfreg %ld bitwidth %d offs %d " \
318 "shift %d %lx:%lx\n",
319 spec.reg, bitwidth, offs, shift,
320 fs.f.v[0], fs.f.v[1]);
321 }
322 return fs.f;
323 }
324
325 reg_t sv_proc_t::READ_REG(reg_spec_t const& spec,
326 bool addr_mode, size_t width)
327 {
328 reg_t reg = spec.reg;
329 int bitwidth = get_bitwidth(_insn->reg_elwidth(reg, true), width);
330 int shift = 0;
331 int origoffs = 0;
332 int offs = 0;
333 if (spec.offset != NULL) {
334 int nbytes = width / bitwidth;
335 if (nbytes == 0) {
336 nbytes = 1;
337 }
338 origoffs = remap(spec);
339 shift = origoffs % nbytes;
340 offs = origoffs / nbytes;
341 if (spec.isvec) {
342 reg += offs;
343 }
344 }
345 if (((int)reg) >= SV_NFPR) {
346 throw trap_illegal_instruction(0);
347 }
348 uint64_t data = _insn->p->get_state()->XPR[reg];
349 uint64_t ndata = data;
350 if (addr_mode)
351 {
352 // offset data to load by the number of BYTES not bits
353 if (spec.isvec) {
354 ndata = data + (bitwidth * shift / 8);
355 } else {
356 ndata = data + (bitwidth * origoffs / 8);
357 }
358 fprintf(stderr, "readreg ADDRmode %p %ld %ld bw %d offs (%d) %d " \
359 "shift %d %lx->%lx\n",
360 spec.offset, spec.reg, reg, bitwidth,
361 origoffs, offs, shift, data, ndata);
362 }
363 else
364 {
365 if (((uint64_t)xlen) != ((uint64_t)bitwidth))
366 {
367 // gets element within the reg-block
368 ndata = data >> (shift*bitwidth);
369 ndata &= ((1UL<<bitwidth)-1UL); // masks off the right bits
370 }
371 fprintf(stderr, "readreg %ld bitwidth %d offs %d " \
372 "shift %d %lx->%lx\n",
373 spec.reg, bitwidth, offs, shift, data, ndata);
374 }
375 return ndata;
376 }
377
378 sv_reg_t sv_proc_t::get_intreg(reg_spec_t const&spec)
379 {
380 uint64_t data = READ_REG(spec);
381 uint8_t bitwidth = _insn->src_bitwidth;
382 return sv_reg_t(data, xlen, bitwidth);
383 }
384
385 sv_freg_t sv_proc_t::get_fpreg(reg_spec_t const&spec)
386 {
387 freg_t data = READ_FREG(spec);
388 //uint8_t bitwidth = _insn->src_bitwidth;
389 reg_t reg = spec.reg;
390 uint8_t elwidth = _insn->reg_elwidth(reg, false);
391 return sv_freg_t(data, xlen, elwidth);
392 }
393
394 #define GET_REG(name) \
395 sv_reg_t sv_proc_t::get_##name() \
396 { \
397 reg_spec_t reg = _insn->name (); \
398 return get_intreg(reg); \
399 }
400
401 GET_REG(rs1)
402 GET_REG(rs2)
403 GET_REG(rs3)
404 GET_REG(rvc_rs1s)
405 GET_REG(rvc_rs2s)
406 GET_REG(rvc_rs1)
407 GET_REG(rvc_rs2)
408
409 sv_reg_t sv_proc_t::get_rvc_sp()
410 {
411 return get_intreg({X_SP, _insn->get_sp_offs(), _insn->get_sp_subo()});
412 }
413
414 #define GET_FPREG(name, getter) \
415 sv_freg_t sv_proc_t::get_##name() \
416 { \
417 reg_spec_t reg = _insn->getter (); \
418 return get_fpreg(reg); \
419 }
420
421 GET_FPREG(frs1, rs1)
422 GET_FPREG(frs2, rs2)
423 GET_FPREG(frs3, rs3)
424 GET_FPREG(rvc_frs2, rvc_rs2)
425 GET_FPREG(rvc_frs2s, rvc_rs2s)
426
427 sv_reg_t sv_proc_t::get_shamt()
428 {
429 return sv_reg_t(_insn->i_imm() & 0x3F); // XXX TODO: has to be elwidth'd
430 }
431
432 sv_reg_t sv_proc_t::uint64_max()
433 {
434 return ((UINT64_C(18446744073709551615)));
435 }
436
437 sv_sreg_t (sv_proc_t::sext_xlen)(sv_sreg_t const& v)
438 {
439 int64_t x = v;
440 x = (((sreg_t)(x) << (64-xlen)) >> (64-xlen));
441 return sv_sreg_t(x);
442 }
443
444 sv_sreg_t (sv_proc_t::sext_xlen)(sv_reg_t const& v)
445 {
446 uint64_t x = v;
447 x = (((sreg_t)(x) << (64-xlen)) >> (64-xlen));
448 return sv_sreg_t((sreg_t)x);
449 }
450
451 sv_reg_t (sv_proc_t::zext_xlen)(sv_reg_t const& v)
452 {
453 uint64_t x = v;
454 x = (((reg_t)(x) << (64-xlen)) >> (64-xlen));
455 return sv_reg_t(x);
456 }
457
458 sv_sreg_t (sv_proc_t::sext32)(sv_reg_t const& v)
459 {
460 uint64_t x = v;
461 x = ((sreg_t)(int32_t)(x));
462 return sv_sreg_t((int64_t)x, v.get_xlen(), v.get_elwidth());
463 }
464
465 sv_reg_t (sv_proc_t::zext32)(sv_reg_t const& v)
466 {
467 uint64_t x = v;
468 x = ((reg_t)(uint32_t)(x));
469 return sv_reg_t(x);
470 }
471
472 #define OP_PREP_FINISH( SLHSTYPE, SRHSTYPE, SRESTYPE, \
473 LHSTYPE, RHSTYPE, RESTYPE ) \
474 bool sv_proc_t::rv_int_op_prepare(SLHSTYPE const & lhs, SRHSTYPE const & rhs, \
475 LHSTYPE &vlhs, RHSTYPE &vrhs, \
476 uint8_t &bitwidth) \
477 { \
478 bitwidth = _insn->src_bitwidth; \
479 if (bitwidth == 0) { \
480 bitwidth = xlen; \
481 } \
482 if (bitwidth == xlen) { \
483 vlhs = lhs; \
484 vrhs = rhs; \
485 return true; \
486 } \
487 uint8_t lbitwidth = get_bitwidth(lhs.get_elwidth(), xlen); \
488 uint8_t rbitwidth = get_bitwidth(lhs.get_elwidth(), xlen); \
489 if (_insn->signextended) { \
490 vlhs = sext_bwid(lhs, lbitwidth); \
491 vrhs = sext_bwid(rhs, rbitwidth); \
492 } else { \
493 vlhs = zext_bwid(lhs, lbitwidth); \
494 vrhs = zext_bwid(rhs, rbitwidth); \
495 } \
496 return false; \
497 } \
498 SRESTYPE sv_proc_t::rv_int_op_finish(SLHSTYPE const & lhs, \
499 SRHSTYPE const & rhs, \
500 RESTYPE &result, uint8_t &bitwidth) \
501 { \
502 if (_insn->signextended) { \
503 result = sext_bwid(result, bitwidth); \
504 } else { \
505 result = zext_bwid(result, bitwidth); \
506 } \
507 uint8_t reswidth = maxelwidth(lhs.get_elwidth(), rhs.get_elwidth()); \
508 fprintf(stderr, "result sext %d wid %d %lx\n", _insn->signextended, \
509 reswidth, result); \
510 return SRESTYPE(result, xlen, reswidth); \
511 }
512
513
514 OP_PREP_FINISH(sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t)
515 OP_PREP_FINISH(sv_sreg_t, sv_reg_t, sv_sreg_t, int64_t, uint64_t, int64_t)
516 OP_PREP_FINISH(sv_sreg_t, sv_sreg_t, sv_sreg_t, int64_t, int64_t, int64_t)
517
518 #define OP_RES_FN( fname, SLHSTYPE, SRHSTYPE, SRESTYPE, \
519 LHSTYPE, RHSTYPE, RESTYPE ) \
520 SRESTYPE sv_proc_t::rv##fname (SLHSTYPE const & lhs, SRHSTYPE const & rhs) \
521 { \
522 uint8_t bitwidth = _insn->src_bitwidth; \
523 LHSTYPE vlhs = 0; \
524 RHSTYPE vrhs = 0; \
525 if (rv_int_op_prepare(lhs, rhs, vlhs, vrhs, bitwidth)) { \
526 RESTYPE result = lhs fname rhs; \
527 fprintf(stderr, "%s result %lx %lx %lx\n", \
528 xstr(fname), (LHSTYPE)lhs, (RHSTYPE)rhs, (RESTYPE)result); \
529 return SRESTYPE(result); \
530 } \
531 RESTYPE result = vlhs fname vrhs; \
532 fprintf(stderr, "%s result %lx %lx %lx bw %d\n", \
533 xstr(fname), (LHSTYPE)lhs, (RHSTYPE)rhs, (RESTYPE)result, bitwidth); \
534 return rv_int_op_finish(lhs, rhs, result, bitwidth); \
535 }
536
537 #define _add +
538 #define _sub -
539 #define _and &
540 #define _or |
541 #define _xor ^
542 #define _div /
543 #define _rem %
544 #define _mul *
545
546 OP_RES_FN ( _add, sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
547 OP_RES_FN ( _sub, sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
548 OP_RES_FN ( _and, sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
549 OP_RES_FN ( _or , sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
550 OP_RES_FN ( _xor, sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
551 OP_RES_FN ( _div, sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
552 OP_RES_FN ( _div, sv_sreg_t, sv_sreg_t, sv_sreg_t, int64_t, int64_t, int64_t )
553 OP_RES_FN ( _rem, sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
554 OP_RES_FN ( _rem, sv_sreg_t, sv_sreg_t, sv_sreg_t, int64_t, int64_t, int64_t )
555 OP_RES_FN ( _mul, sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
556 OP_RES_FN ( _mul, sv_sreg_t, sv_reg_t, sv_sreg_t, int64_t, uint64_t, int64_t )
557 OP_RES_FN ( _mul, sv_sreg_t, sv_sreg_t, sv_sreg_t, int64_t, int64_t, int64_t )
558
559 /* 32-bit mulh/mulhu/mulhsu */
560
561 // normally the result is shuffled down by 32 bits (elwidth==default)
562 // however with variable bitwidth we want the top elwidth bits,
563 // using the SOURCE registers to determine that width.
564 // specifically: truncation of the result due to a shorter
565 // destination elwidth is NOT our problem.
566 #define OP_MULH_FN( fname, SLHSTYPE, SRHSTYPE, SRESTYPE, \
567 LHSTYPE, RHSTYPE, RESTYPE ) \
568 SRESTYPE sv_proc_t::rv##fname (SLHSTYPE const & lhs, SRHSTYPE const & rhs) \
569 { \
570 uint8_t bitwidth = _insn->src_bitwidth; \
571 LHSTYPE vlhs = 0; \
572 RHSTYPE vrhs = 0; \
573 if (rv_int_op_prepare(lhs, rhs, vlhs, vrhs, bitwidth)) { \
574 RESTYPE result = (lhs * rhs) >> 32; \
575 fprintf(stderr, "%s result %lx %lx %lx\n", \
576 xstr(fname), (LHSTYPE)lhs, (RHSTYPE)rhs, (RESTYPE)result); \
577 return SRESTYPE(result); \
578 } \
579 uint8_t bw32 = std::min(bitwidth, (uint8_t)32); \
580 RESTYPE result = (vlhs * vrhs) >> bw32; \
581 result = zext_bwid(result, bw32); \
582 fprintf(stderr, "%s result %lx %lx %lx bw %d\n", \
583 xstr(fname), (LHSTYPE)lhs, (RHSTYPE)rhs, (RESTYPE)result, bitwidth); \
584 return rv_int_op_finish(lhs, rhs, result, bitwidth); \
585 }
586
587 OP_MULH_FN(_mulhu , sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
588 OP_MULH_FN(_mulhsu, sv_sreg_t, sv_reg_t, sv_sreg_t, int64_t, uint64_t, int64_t )
589 OP_MULH_FN(_mulh , sv_sreg_t, sv_sreg_t, sv_sreg_t, int64_t, int64_t, int64_t )
590
591 /* 64-bit mulh/mulhu/mulhsu */
592 /* here is slightly different from other macros, because if either
593 src register is 64-bit it's necessary to use the 64-bit mulhu
594 function. then, if the result is <64bit it's truncated.
595 only when both source registers are <= 32 bit can we use
596 the 32-bit rv_mulh.
597 */
598 #define OP_M64_FN( fname, SLHSTYPE, SRHSTYPE, SRESTYPE, \
599 LHSTYPE, RHSTYPE, RESTYPE ) \
600 SRESTYPE sv_proc_t::fname (SLHSTYPE const & lhs, SRHSTYPE const & rhs) \
601 { \
602 uint8_t bitwidth = _insn->src_bitwidth; \
603 LHSTYPE vlhs = 0; \
604 RHSTYPE vrhs = 0; \
605 if (rv_int_op_prepare(lhs, rhs, vlhs, vrhs, bitwidth) || \
606 lhs.get_elwidth() == 0 || rhs.get_elwidth() == 0 ) { \
607 RESTYPE result = ::fname(vlhs, vrhs); \
608 fprintf(stderr, "%s result %lx %lx %lx\n", \
609 xstr(fname), (LHSTYPE)lhs, (RHSTYPE)rhs, (RESTYPE)result); \
610 return rv_int_op_finish(lhs, rhs, result, bitwidth); \
611 } \
612 return rv_##fname(lhs, rhs); \
613 }
614
615 OP_M64_FN( mulhu , sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
616 OP_M64_FN( mulhsu, sv_sreg_t, sv_reg_t, sv_sreg_t, int64_t, uint64_t, int64_t )
617 OP_M64_FN( mulh , sv_sreg_t, sv_sreg_t, sv_sreg_t, int64_t, int64_t, int64_t )
618
619 #define OP_SHF_FN( fname, SLHSTYPE, SRHSTYPE, SRESTYPE, \
620 LHSTYPE, RHSTYPE, RESTYPE ) \
621 SRESTYPE sv_proc_t::rv##fname (SLHSTYPE const & lhs, SRHSTYPE const & rhs, \
622 unsigned int dflt_bitwidth) \
623 { \
624 uint8_t bitwidth = _insn->src_bitwidth; \
625 LHSTYPE vlhs = 0; \
626 RHSTYPE vrhs = 0; \
627 if (rv_int_op_prepare(lhs, rhs, vlhs, vrhs, bitwidth)) { \
628 RESTYPE result = lhs fname rv_and(rhs, sv_reg_t(dflt_bitwidth-1U)); \
629 fprintf(stderr, "%s result %lx %lx %lx\n", \
630 xstr(fname), (LHSTYPE)lhs, (RHSTYPE)rhs, (RESTYPE)result); \
631 return SRESTYPE(result); \
632 } \
633 RESTYPE result = vlhs fname (vrhs & (bitwidth-1)); \
634 fprintf(stderr, "%s result %lx %lx %lx bw %d\n", \
635 xstr(fname), (LHSTYPE)lhs, (RHSTYPE)rhs, (RESTYPE)result, bitwidth); \
636 return rv_int_op_finish(lhs, rhs, result, bitwidth); \
637 }
638
639 #define _sl <<
640 #define _sr >>
641
642 OP_SHF_FN ( _sl, sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
643 OP_SHF_FN ( _sr, sv_reg_t, sv_reg_t, sv_reg_t, uint64_t, uint64_t, uint64_t )
644
645 #define lt <
646 #define gt >
647 #define ge >=
648 #define le <=
649 #define eq ==
650 #define ne !=
651
652 #define OP_BOOL_FN( fname, SLHSTYPE, SRHSTYPE, \
653 LHSTYPE, RHSTYPE ) \
654 bool sv_proc_t::rv_##fname (SLHSTYPE const & lhs, SRHSTYPE const & rhs) \
655 { \
656 uint8_t bitwidth = _insn->src_bitwidth; \
657 LHSTYPE vlhs = 0; \
658 RHSTYPE vrhs = 0; \
659 if (rv_int_op_prepare(lhs, rhs, vlhs, vrhs, bitwidth)) { \
660 bool result = lhs fname rhs; \
661 fprintf(stderr, "%s result %lx %lx %x\n", \
662 xstr(fname), (LHSTYPE)lhs, (RHSTYPE)rhs, result); \
663 return result; \
664 } \
665 bool result = vlhs fname vrhs; \
666 fprintf(stderr, "%s result %lx %lx %d bw %d\n", \
667 xstr(fname), (LHSTYPE)lhs, (RHSTYPE)rhs, result, bitwidth); \
668 return result; \
669 }
670
671 OP_BOOL_FN( lt, sv_reg_t, sv_reg_t, uint64_t, uint64_t )
672 OP_BOOL_FN( lt, sv_sreg_t, sv_sreg_t, int64_t, int64_t )
673 OP_BOOL_FN( gt, sv_reg_t, sv_reg_t, uint64_t, uint64_t )
674 OP_BOOL_FN( gt, sv_sreg_t, sv_sreg_t, int64_t, int64_t )
675 OP_BOOL_FN( ge, sv_reg_t, sv_reg_t, uint64_t, uint64_t )
676 OP_BOOL_FN( ge, sv_sreg_t, sv_sreg_t, int64_t, int64_t )
677 OP_BOOL_FN( eq, sv_reg_t, sv_reg_t, uint64_t, uint64_t )
678 OP_BOOL_FN( ne, sv_reg_t, sv_reg_t, uint64_t, uint64_t )
679
680 // ----
681
682 sv_sreg_t sv_proc_t::sv_reg_to_sreg(sv_reg_t const& v)
683 {
684 uint64_t x = v;
685 return sv_sreg_t(sreg_t(x));
686 }
687
688 sv_reg_t sv_proc_t::sv_reg_int32(sv_reg_t const& v)
689 {
690 int32_t x = (int32_t)v;
691 return sv_reg_t((uint64_t)x);
692 }
693
694 sv_reg_t sv_proc_t::sv_reg_uint32(sv_reg_t const& v)
695 {
696 uint32_t x = (uint32_t)v;
697 return sv_reg_t((uint64_t)x);
698 }
699
700 // ----
701
702 sv_float32_t (sv_proc_t::f32)(sv_freg_t x)
703 {
704 freg_t v = (freg_t)x;
705 switch (x.get_elwidth())
706 {
707 // 8-bit
708 case 1: throw trap_illegal_instruction(0); // XXX for now
709 // 16-bit data, up-convert to f32
710 case 2:
711 {
712 sv_reg_t x32(x.to_uint32());
713 float16_t f_16 = f16(x);
714 fprintf(stderr, "f16-to-f32 %lx\n", (uint64_t)x32);
715 return f16_to_f32(f_16);
716 }
717 case 3:
718 {
719 return sv_float32_t(::f32(x.to_uint32()));
720 }
721 default: break;
722 }
723 sv_float32_t value = ::f32(v);
724 fprintf(stderr, "::f32 %lx %lx %x\n", v.v[0], v.v[1], ((float32_t)value).v);
725 return value;
726 }
727
728 sv_float32_t (sv_proc_t::f32)(sv_reg_t const& v)
729 {
730 uint64_t x = v;
731 fprintf(stderr, "::f32 %lx\n", x);
732 switch (v.get_elwidth())
733 {
734 // 8-bit
735 case 1: throw trap_illegal_instruction(0); // XXX for now
736 // 16-bit data, up-convert to f32
737 case 2:
738 fprintf(stderr, "f16-to-f32\n");
739 return f16_to_f32(f16(x));
740 // 0 and 3 are 32-bit
741 default: break;
742 }
743 return ::f32(x);
744 }
745
746 sv_float64_t (sv_proc_t::f64)(sv_freg_t x)
747 {
748 switch (x.get_elwidth())
749 {
750 // 8-bit
751 case 1: throw trap_illegal_instruction(0); // XXX for now
752 // 16-bit data, up-convert to f32
753 case 2:
754 {
755 sv_reg_t x64(x.to_uint64());
756 float16_t f_16 = f16(x);
757 fprintf(stderr, "f16-to-f64 %lx\n", (uint64_t)x64);
758 return f16_to_f64(f_16);
759 }
760 case 3:
761 {
762 sv_reg_t x64(x.to_uint64());
763 float32_t f_32 = f32(x);
764 fprintf(stderr, "f32-to-f64 %lx\n", (uint64_t)x64);
765 return f32_to_f64(f_32);
766 }
767 default: break;
768 }
769 return ::f64(x);
770 }
771
772 sv_float64_t (sv_proc_t::f64)(sv_reg_t const& v)
773 {
774 uint64_t x = v;
775 switch (v.get_elwidth())
776 {
777 // 8-bit
778 case 1: throw trap_illegal_instruction(0); // XXX for now
779 // 16-bit data, up-convert to f32
780 case 2:
781 {
782 float16_t f_16 = f16(x);
783 fprintf(stderr, "f16-to-f64 %lx\n", x);
784 return f16_to_f64(f_16);
785 }
786 case 3:
787 {
788 float32_t f_32 = f32(x);
789 fprintf(stderr, "f32-to-f64 %lx\n", x);
790 return f32_to_f64(f_32);
791 }
792 default: break;
793 }
794 return ::f64(x);
795 }
796
797 sv_float128_t sv_proc_t::f128( sv_freg_t v)
798 {
799 uint64_t x = ((float128_t)v).v[0];
800 switch (v.get_elwidth())
801 {
802 // 8-bit
803 case 1: throw trap_illegal_instruction(0); // XXX for now
804 // 16-bit data, up-convert to f32
805 case 2:
806 {
807 float16_t f_16 = f16(v);
808 fprintf(stderr, "f16-to-f128 %lx\n", x);
809 return f16_to_f128(f_16);
810 }
811 case 3:
812 {
813 float32_t f_32 = f32(v);
814 fprintf(stderr, "f32-to-f128 %lx\n", x);
815 return f32_to_f128(f_32);
816 }
817 default: break;
818 }
819 return ::f128(v);
820 }
821
822 // ----
823
824 sv_reg_t (sv_proc_t::f32_classify)(sv_float32_t a)
825 {
826 return sv_reg_t(::f32_classify(a));
827 }
828
829 sv_reg_t (sv_proc_t::f64_classify)(sv_float64_t a)
830 {
831 return sv_reg_t(::f64_classify(a));
832 }
833
834 sv_reg_t (sv_proc_t::f128_classify)(sv_float128_t a)
835 {
836 return sv_reg_t(::f128_classify(a));
837 }
838
839 sv_float32_t (sv_proc_t::i32_to_f32)( sv_reg_t const& v )
840 {
841 uint64_t x = v;
842 return ::i32_to_f32(x);
843 }
844
845 sv_float64_t (sv_proc_t::ui32_to_f64)( sv_reg_t const& v )
846 {
847 uint64_t x = v;
848 return ::ui32_to_f64(x);
849 }
850
851 sv_float64_t (sv_proc_t::i32_to_f64)( sv_reg_t const& v )
852 {
853 uint64_t x = v;
854 return ::i32_to_f64(x);
855 }
856
857 sv_float32_t (sv_proc_t::ui32_to_f32)( sv_reg_t const& v )
858 {
859 uint64_t x = v;
860 return ::ui32_to_f32(x);
861 }
862
863 sv_float32_t (sv_proc_t::i64_to_f32)( sv_reg_t const& v )
864 {
865 uint64_t x = v;
866 return ::i64_to_f32(x);
867 }
868
869 sv_float32_t (sv_proc_t::ui64_to_f32)( sv_reg_t const& v )
870 {
871 uint64_t x = v;
872 return ::ui64_to_f32(x);
873 }
874
875 sv_float64_t (sv_proc_t::i64_to_f64)( sv_reg_t const& v )
876 {
877 uint64_t x = v;
878 return ::i64_to_f64(x);
879 }
880
881 sv_float64_t (sv_proc_t::ui64_to_f64)( sv_reg_t const& v )
882 {
883 uint64_t x = v;
884 return ::ui64_to_f64(x);
885 }
886
887 sv_float128_t (sv_proc_t::ui64_to_f128)( sv_reg_t const& v )
888 {
889 uint64_t x = v;
890 return ::ui64_to_f128(x);
891 }
892
893 sv_float128_t (sv_proc_t::i64_to_f128)( sv_reg_t const& v )
894 {
895 uint64_t x = v;
896 return ::i64_to_f128(x);
897 }
898
899 sv_float128_t (sv_proc_t::i32_to_f128)( sv_reg_t const& v )
900 {
901 uint64_t x = v;
902 return ::i32_to_f128(x);
903 }
904
905 sv_float128_t (sv_proc_t::ui32_to_f128)( sv_reg_t const& v )
906 {
907 uint64_t x = v;
908 return ::ui32_to_f128(x);
909 }
910
911 sv_reg_t (sv_proc_t::f32_to_ui32)( sv_float32_t a, uint_fast8_t roundingMode,
912 bool exact )
913 {
914 return sv_reg_t(::f32_to_ui32(a, roundingMode, exact));
915 }
916
917 sv_reg_t (sv_proc_t::f32_to_i32)( sv_float32_t a, uint_fast8_t roundingMode,
918 bool exact )
919 {
920 return sv_reg_t(::f32_to_i32(a, roundingMode, exact));
921 }
922
923 sv_reg_t (sv_proc_t::f32_to_i64)( sv_float32_t a, uint_fast8_t roundingMode,
924 bool exact )
925 {
926 return sv_reg_t(::f32_to_i64(a, roundingMode, exact));
927 }
928
929 sv_reg_t (sv_proc_t::f32_to_ui64)( sv_float32_t a, uint_fast8_t roundingMode,
930 bool exact )
931 {
932 return sv_reg_t(::f32_to_ui64(a, roundingMode, exact));
933 }
934
935 sv_reg_t (sv_proc_t::f64_to_ui32)( sv_float64_t a, uint_fast8_t roundingMode,
936 bool exact )
937 {
938 return sv_reg_t(::f64_to_ui32(a, roundingMode, exact));
939 }
940
941 sv_reg_t (sv_proc_t::f64_to_i32)( sv_float64_t a, uint_fast8_t roundingMode,
942 bool exact )
943 {
944 return sv_reg_t(::f64_to_i32(a, roundingMode, exact));
945 }
946
947 sv_reg_t (sv_proc_t::f64_to_i64)( sv_float64_t a, uint_fast8_t roundingMode,
948 bool exact )
949 {
950 return sv_reg_t(::f64_to_i64(a, roundingMode, exact));
951 }
952
953 sv_reg_t (sv_proc_t::f64_to_ui64)( sv_float64_t a, uint_fast8_t roundingMode,
954 bool exact )
955 {
956 return sv_reg_t(::f64_to_ui64(a, roundingMode, exact));
957 }
958
959 sv_reg_t (sv_proc_t::f128_to_ui64)( sv_float128_t a, uint_fast8_t roundingMode,
960 bool exact )
961 {
962 return sv_reg_t(::f128_to_ui64(a, roundingMode, exact));
963 }
964
965 sv_reg_t (sv_proc_t::f128_to_ui32)( sv_float128_t a, uint_fast8_t roundingMode,
966 bool exact )
967 {
968 return sv_reg_t(::f128_to_ui32(a, roundingMode, exact));
969 }
970
971 sv_reg_t (sv_proc_t::f128_to_i32)( sv_float128_t a, uint_fast8_t roundingMode,
972 bool exact )
973 {
974 return sv_reg_t(::f128_to_i32(a, roundingMode, exact));
975 }
976
977 sv_reg_t (sv_proc_t::f128_to_i64)( sv_float128_t a, uint_fast8_t roundingMode,
978 bool exact )
979 {
980 return sv_reg_t(::f128_to_i64(a, roundingMode, exact));
981 }
982
983 // --------
984
985 sv_float64_t (sv_proc_t::f64_add)( sv_float64_t a, sv_float64_t b )
986 {
987 return ::f64_add(a, b);
988 }
989
990 sv_float64_t (sv_proc_t::f64_sub)( sv_float64_t a, sv_float64_t b )
991 {
992 return ::f64_sub(a, b);
993 }
994
995 sv_float64_t (sv_proc_t::f64_mul)( sv_float64_t a, sv_float64_t b )
996 {
997 return ::f64_mul(a, b);
998 }
999
1000 sv_float64_t (sv_proc_t::f64_mulAdd)( sv_float64_t a, sv_float64_t b , sv_float64_t c)
1001 {
1002 return ::f64_mulAdd(a, b, c);
1003 }
1004
1005 sv_float64_t (sv_proc_t::f64_div)( sv_float64_t a, sv_float64_t b )
1006 {
1007 return ::f64_div(a, b);
1008 }
1009
1010 sv_float64_t (sv_proc_t::f64_rem)( sv_float64_t a, sv_float64_t b )
1011 {
1012 return ::f64_rem(a, b);
1013 }
1014
1015 sv_float64_t (sv_proc_t::f64_sqrt)( sv_float64_t a )
1016 {
1017 return ::f64_sqrt(a);
1018 }
1019
1020 bool (sv_proc_t::f64_eq)( sv_float64_t a, sv_float64_t b )
1021 {
1022 return ::f64_eq(a, b);
1023 }
1024
1025 bool (sv_proc_t::f64_le)( sv_float64_t a, sv_float64_t b )
1026 {
1027 return ::f64_le(a, b);
1028 }
1029
1030 bool (sv_proc_t::f64_lt)( sv_float64_t a, sv_float64_t b )
1031 {
1032 return ::f64_lt(a, b);
1033 }
1034
1035 bool (sv_proc_t::f64_eq_signaling)( sv_float64_t a, sv_float64_t b )
1036 {
1037 return ::f64_eq_signaling(a, b);
1038 }
1039
1040 bool (sv_proc_t::f64_le_quiet)( sv_float64_t a, sv_float64_t b )
1041 {
1042 return ::f64_le_quiet(a, b);
1043 }
1044
1045 bool (sv_proc_t::f64_lt_quiet)( sv_float64_t a, sv_float64_t b )
1046 {
1047 return ::f64_lt_quiet(a, b);
1048 }
1049
1050
1051 // --------
1052
1053 sv_float32_t (sv_proc_t::f32_add)( sv_float32_t a, sv_float32_t b )
1054 {
1055 reg_t reg = _insn->rd().reg;
1056 uint8_t dest_elwidth = _insn->reg_elwidth(reg, false);
1057 //uint8_t reswidth = maxelwidth(a.get_elwidth(), b.get_elwidth());
1058 //return sv_float32_t(::f32_add(a, b), xlen, reswidth);
1059 sv_float32_t value = ::f32_add(a, b);
1060 fprintf(stderr, "f32_add a %x b %x sv_float32_t %x\n",
1061 ((float32_t)a).v, ((float32_t)b).v, ((float32_t)value).v);
1062 return value;
1063 }
1064
1065 sv_float32_t (sv_proc_t::f32_sub)( sv_float32_t a, sv_float32_t b )
1066 {
1067 return ::f32_sub(a, b);
1068 }
1069
1070 sv_float32_t (sv_proc_t::f32_mul)( sv_float32_t a, sv_float32_t b )
1071 {
1072 return ::f32_mul(a, b);
1073 }
1074
1075 sv_float32_t (sv_proc_t::f32_mulAdd)( sv_float32_t a, sv_float32_t b , sv_float32_t c)
1076 {
1077 return ::f32_mulAdd(a, b, c);
1078 }
1079
1080 sv_float32_t (sv_proc_t::f32_div)( sv_float32_t a, sv_float32_t b )
1081 {
1082 return ::f32_div(a, b);
1083 }
1084
1085 sv_float32_t (sv_proc_t::f32_rem)( sv_float32_t a, sv_float32_t b )
1086 {
1087 return ::f32_rem(a, b);
1088 }
1089
1090 sv_float32_t (sv_proc_t::f32_sqrt)( sv_float32_t a )
1091 {
1092 return ::f32_sqrt(a);
1093 }
1094
1095 bool (sv_proc_t::f32_eq)( sv_float32_t a, sv_float32_t b )
1096 {
1097 return ::f32_eq(a, b);
1098 }
1099
1100 bool (sv_proc_t::f32_le)( sv_float32_t a, sv_float32_t b )
1101 {
1102 return ::f32_le(a, b);
1103 }
1104
1105 bool (sv_proc_t::f32_lt)( sv_float32_t a, sv_float32_t b )
1106 {
1107 return ::f32_lt(a, b);
1108 }
1109
1110 bool (sv_proc_t::f32_eq_signaling)( sv_float32_t a, sv_float32_t b )
1111 {
1112 return ::f32_eq_signaling(a, b);
1113 }
1114
1115 bool (sv_proc_t::f32_le_quiet)( sv_float32_t a, sv_float32_t b )
1116 {
1117 return ::f32_le_quiet(a, b);
1118 }
1119
1120 bool (sv_proc_t::f32_lt_quiet)( sv_float32_t a, sv_float32_t b )
1121 {
1122 return ::f32_lt_quiet(a, b);
1123 }
1124
1125
1126 // --------
1127
1128 sv_float128_t (sv_proc_t::f128_add)( sv_float128_t a, sv_float128_t b )
1129 {
1130 return ::f128_add(a, b);
1131 }
1132
1133 sv_float128_t (sv_proc_t::f128_sub)( sv_float128_t a, sv_float128_t b )
1134 {
1135 return ::f128_sub(a, b);
1136 }
1137
1138 sv_float128_t (sv_proc_t::f128_mul)( sv_float128_t a, sv_float128_t b )
1139 {
1140 return ::f128_mul(a, b);
1141 }
1142
1143 sv_float128_t (sv_proc_t::f128_mulAdd)( sv_float128_t a, sv_float128_t b , sv_float128_t c)
1144 {
1145 return ::f128_mulAdd(a, b, c);
1146 }
1147
1148 sv_float128_t (sv_proc_t::f128_div)( sv_float128_t a, sv_float128_t b )
1149 {
1150 return ::f128_div(a, b);
1151 }
1152
1153 sv_float128_t (sv_proc_t::f128_rem)( sv_float128_t a, sv_float128_t b )
1154 {
1155 return ::f128_rem(a, b);
1156 }
1157
1158 sv_float128_t (sv_proc_t::f128_sqrt)( sv_float128_t a )
1159 {
1160 return ::f128_sqrt(a);
1161 }
1162
1163 bool (sv_proc_t::f128_eq)( sv_float128_t a, sv_float128_t b )
1164 {
1165 return ::f128_eq(a, b);
1166 }
1167
1168 bool (sv_proc_t::f128_le)( sv_float128_t a, sv_float128_t b )
1169 {
1170 return ::f128_le(a, b);
1171 }
1172
1173 bool (sv_proc_t::f128_lt)( sv_float128_t a, sv_float128_t b )
1174 {
1175 return ::f128_lt(a, b);
1176 }
1177
1178 bool (sv_proc_t::f128_eq_signaling)( sv_float128_t a, sv_float128_t b )
1179 {
1180 return ::f128_eq_signaling(a, b);
1181 }
1182
1183 bool (sv_proc_t::f128_le_quiet)( sv_float128_t a, sv_float128_t b )
1184 {
1185 return ::f128_le_quiet(a, b);
1186 }
1187
1188 bool (sv_proc_t::f128_lt_quiet)( sv_float128_t a, sv_float128_t b )
1189 {
1190 return ::f128_lt_quiet(a, b);
1191 }
1192
1193 sv_freg_t sv_proc_t::fsgnj128(sv_freg_t a, sv_freg_t b, bool n, bool x)
1194 {
1195 return sv_freg_t(::fsgnj128(a, b, n, x), a.get_xlen(), a.get_elwidth());
1196 }
1197
1198 sv_float128_t sv_proc_t::f32_to_f128( sv_float32_t a)
1199 {
1200 return ::f32_to_f128(a);
1201 }
1202
1203 sv_float128_t sv_proc_t::f64_to_f128( sv_float64_t a)
1204 {
1205 return ::f64_to_f128(a);
1206 }
1207
1208 //-----
1209
1210 sv_reg_t sv_proc_t::mmu_load(reg_spec_t const& spec, sv_reg_t const& offs,
1211 size_t width, bool ext)
1212 {
1213 // okaay, so a different "mode" applies, here: addr_mode.
1214 // addr_mode doesn't truncate the register to elwidth-specified
1215 // bitsize, it adds a modulo-offset based on the current VL loop index
1216 reg_t reg = READ_REG(spec, true, width);
1217 sv_reg_t addr = sv_reg_t((uint64_t)reg + (int64_t)offs);
1218 sv_reg_t v(0);
1219
1220 // now that the address has been moved on by the modulo-offset,
1221 // get only an elwidth-sized element (if not "default")
1222 uint8_t rwidth = _insn->reg_elwidth(spec.reg, true);
1223 width = get_bitwidth(rwidth, width);
1224 fprintf(stderr, "mmu_load wid %ld reg %lx offs %lx\n",
1225 width, (uint64_t)reg, (int64_t)offs);
1226 switch (width)
1227 {
1228 case 8:
1229 if (ext) v = p->get_mmu()->load_uint8(addr);
1230 else v = p->get_mmu()->load_int8(addr);
1231 break;
1232 case 16:
1233 if (ext) v = p->get_mmu()->load_uint16(addr);
1234 else v = p->get_mmu()->load_int16(addr);
1235 break;
1236 case 32:
1237 if (ext) v = p->get_mmu()->load_uint32(addr);
1238 else v = p->get_mmu()->load_int32(addr);
1239 break;
1240 case 64:
1241 if (ext) v = p->get_mmu()->load_uint64(addr);
1242 else v = p->get_mmu()->load_int64(addr);
1243 break;
1244 }
1245 fprintf(stderr, "mmu_load wid %ld reg %lx offs %lx loaded %lx\n",
1246 width, (uint64_t)reg, (int64_t)offs, (uint64_t)v);
1247 v.set_elwidth(rwidth);
1248 v.set_xlen(xlen);
1249 return v;
1250 }
1251
1252 void sv_proc_t::mmu_store(reg_spec_t const& spec, sv_reg_t const& offs,
1253 size_t width, reg_t val)
1254 {
1255 // different "mode" applies, here: addr_mode.
1256 reg_t reg = READ_REG(spec, true, width);
1257 sv_reg_t addr = sv_reg_t((uint64_t)reg + (int64_t)offs);
1258
1259 // now that the address has been moved on by the modulo-offset,
1260 // get only an elwidth-sized element (if not "default")
1261 width = get_bitwidth(_insn->reg_elwidth(spec.reg, true), width);
1262 fprintf(stderr, "mmu_store wid %ld addr %lx offs %lx stored %lx\n",
1263 width, (uint64_t)reg, (int64_t)offs, (uint64_t)val);
1264 switch (width)
1265 {
1266 case 8:
1267 p->get_mmu()->store_uint8(addr, val);
1268 break;
1269 case 16:
1270 p->get_mmu()->store_uint16(addr, val);
1271 break;
1272 case 32:
1273 p->get_mmu()->store_uint32(addr, val);
1274 break;
1275 case 64:
1276 p->get_mmu()->store_uint64(addr, val);
1277 break;
1278 }
1279 }
1280
1281 // ------
1282
1283 sv_reg_t sv_proc_t::to_uint64(sv_freg_t const& reg)
1284 {
1285 return sv_reg_t(reg.to_uint64());
1286 }
1287
1288 sv_reg_t sv_proc_t::to_uint32(sv_freg_t const& reg)
1289 {
1290 switch (reg.get_elwidth())
1291 {
1292 // 8-bit
1293 case 1: throw trap_illegal_instruction(0); // XXX for now
1294 // 16-bit data, up-convert to f32
1295 case 2:
1296 {
1297 float32_t x32 = ::f32(reg.to_uint32());
1298 return sv_reg_t(f32_to_f16(x32).v, xlen, reg.get_elwidth());
1299 }
1300 // 0 and 3 are 32-bit
1301 default: break;
1302 }
1303 return sv_reg_t(reg.to_uint32(), xlen, reg.get_elwidth());
1304 }
1305
1306 sv_reg_t sv_proc_t::to_uint32(sv_float32_t const& reg)
1307 {
1308 switch (reg.get_elwidth())
1309 {
1310 // 8-bit
1311 case 1: throw trap_illegal_instruction(0); // XXX for now
1312 // 16-bit data, up-convert to f32
1313 case 2:
1314 {
1315 float32_t x32 = ::f32(reg.to_uint32());
1316 return sv_reg_t(f32_to_f16(x32).v, xlen, reg.get_elwidth());
1317 }
1318 // 0 and 3 are 32-bit
1319 default: break;
1320 }
1321 return sv_reg_t(reg.to_uint32(), xlen, reg.get_elwidth());
1322 }
1323
1324 // ------
1325
1326 uint64_t sv_freg_t::to_uint64() const&
1327 {
1328 return reg.v[0];
1329 }
1330
1331 uint32_t sv_freg_t::to_uint32() const&
1332 {
1333 return reg.v[0];
1334 }
1335
1336 uint32_t sv_float32_t::to_uint32() const&
1337 {
1338 return reg.v;
1339 }
1340