1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Copyright (C) 2020 Michael Nolan
4 # Funded by NLnet http://nlnet.nl
5 """core of the python-based POWER9 simulator
7 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
8 not speed, it is for both learning and educational purposes, as well as
9 a method of verifying the HDL.
13 * https://bugs.libre-soc.org/show_bug.cgi?id=424
16 from collections
import namedtuple
17 from copy
import deepcopy
18 from functools
import wraps
22 from openpower
.syscalls
import ppc_flags
24 from elftools
.elf
.elffile
import ELFFile
# for isinstance
26 from nmigen
.sim
import Settle
27 import openpower
.syscalls
28 from openpower
.consts
import (MSRb
, PIb
, # big-endian (PowerISA versions)
29 SVP64CROffs
, SVP64MODEb
)
30 from openpower
.decoder
.helpers
import (ISACallerHelper
, ISAFPHelpers
, exts
,
31 gtu
, undefined
, copy_assign_rhs
)
32 from openpower
.decoder
.isa
.mem
import Mem
, MemMMap
, MemException
, LoadedELF
33 from openpower
.decoder
.isa
.radixmmu
import RADIX
34 from openpower
.decoder
.isa
.svshape
import SVSHAPE
35 from openpower
.decoder
.isa
.svstate
import SVP64State
36 from openpower
.decoder
.orderedset
import OrderedSet
37 from openpower
.decoder
.power_enums
import (FPTRANS_INSNS
, CRInSel
, CROutSel
,
38 In1Sel
, In2Sel
, In3Sel
, LDSTMode
,
39 MicrOp
, OutSel
, SVMode
,
40 SVP64LDSTmode
, SVP64PredCR
,
41 SVP64PredInt
, SVP64PredMode
,
42 SVP64RMMode
, SVPType
, XER_bits
,
43 insns
, spr_byname
, spr_dict
,
45 from openpower
.insndb
.core
import SVP64Instruction
46 from openpower
.decoder
.power_svp64
import SVP64RM
, decode_extra
47 from openpower
.decoder
.selectable_int
import (FieldSelectableInt
,
48 SelectableInt
, selectconcat
,
49 EFFECTIVELY_UNLIMITED
)
50 from openpower
.consts
import DEFAULT_MSR
51 from openpower
.fpscr
import FPSCRState
52 from openpower
.xer
import XERState
53 from openpower
.util
import LogType
, log
55 LDST_UPDATE_INSNS
= ['ldu', 'lwzu', 'lbzu', 'lhzu', 'lhau', 'lfsu', 'lfdu',
56 'stwu', 'stbu', 'sthu', 'stfsu', 'stfdu', 'stdu',
60 instruction_info
= namedtuple('instruction_info',
61 'func read_regs uninit_regs write_regs ' +
62 'special_regs op_fields form asmregs')
72 # rrright. this is here basically because the compiler pywriter returns
73 # results in a specific priority order. to make sure regs match up they
74 # need partial sorting. sigh.
76 # TODO (lkcl): adjust other registers that should be in a particular order
77 # probably CA, CA32, and CR
105 "overflow": 7, # should definitely be last
109 fregs
= ['FRA', 'FRB', 'FRC', 'FRS', 'FRT']
112 def get_masked_reg(regs
, base
, offs
, ew_bits
):
113 # rrrright. start by breaking down into row/col, based on elwidth
114 gpr_offs
= offs
// (64 // ew_bits
)
115 gpr_col
= offs
% (64 // ew_bits
)
116 # compute the mask based on ew_bits
117 mask
= (1 << ew_bits
) - 1
118 # now select the 64-bit register, but get its value (easier)
119 val
= regs
[base
+ gpr_offs
]
120 # shift down so element we want is at LSB
121 val
>>= gpr_col
* ew_bits
122 # mask so we only return the LSB element
126 def set_masked_reg(regs
, base
, offs
, ew_bits
, value
):
127 # rrrright. start by breaking down into row/col, based on elwidth
128 gpr_offs
= offs
// (64//ew_bits
)
129 gpr_col
= offs
% (64//ew_bits
)
130 # compute the mask based on ew_bits
131 mask
= (1 << ew_bits
)-1
132 # now select the 64-bit register, but get its value (easier)
133 val
= regs
[base
+gpr_offs
]
134 # now mask out the bit we don't want
135 val
= val
& ~
(mask
<< (gpr_col
*ew_bits
))
136 # then wipe the bit we don't want from the value
138 # OR the new value in, shifted up
139 val |
= value
<< (gpr_col
*ew_bits
)
140 regs
[base
+gpr_offs
] = val
143 def create_args(reglist
, extra
=None):
144 retval
= list(OrderedSet(reglist
))
145 retval
.sort(key
=lambda reg
: REG_SORT_ORDER
.get(reg
, 0))
146 if extra
is not None:
147 return [extra
] + retval
151 def create_full_args(*, read_regs
, special_regs
, uninit_regs
, write_regs
,
154 *read_regs
, *uninit_regs
, *write_regs
, *special_regs
], extra
=extra
)
157 def is_ffirst_mode(dec2
):
158 rm_mode
= yield dec2
.rm_dec
.mode
159 return rm_mode
== SVP64RMMode
.FFIRST
.value
163 def __init__(self
, decoder
, isacaller
, svstate
, regfile
):
166 self
.isacaller
= isacaller
167 self
.svstate
= svstate
168 for i
in range(len(regfile
)):
169 self
[i
] = SelectableInt(regfile
[i
], 64)
171 def __call__(self
, ridx
, is_vec
=False, offs
=0, elwidth
=64):
172 if isinstance(ridx
, SelectableInt
):
175 return self
[ridx
+offs
]
176 # rrrright. start by breaking down into row/col, based on elwidth
177 gpr_offs
= offs
// (64//elwidth
)
178 gpr_col
= offs
% (64//elwidth
)
179 # now select the 64-bit register, but get its value (easier)
180 val
= self
[ridx
+gpr_offs
].value
181 # now shift down and mask out
182 val
= val
>> (gpr_col
*elwidth
) & ((1 << elwidth
)-1)
183 # finally, return a SelectableInt at the required elwidth
184 log("GPR call", ridx
, "isvec", is_vec
, "offs", offs
,
185 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
))
186 return SelectableInt(val
, elwidth
)
188 def set_form(self
, form
):
191 def write(self
, rnum
, value
, is_vec
=False, elwidth
=64):
193 if isinstance(rnum
, SelectableInt
):
195 if isinstance(value
, SelectableInt
):
198 if isinstance(rnum
, tuple):
199 rnum
, base
, offs
= rnum
202 # rrrright. start by breaking down into row/col, based on elwidth
203 gpr_offs
= offs
// (64//elwidth
)
204 gpr_col
= offs
% (64//elwidth
)
205 # compute the mask based on elwidth
206 mask
= (1 << elwidth
)-1
207 # now select the 64-bit register, but get its value (easier)
208 val
= self
[base
+gpr_offs
].value
209 # now mask out the bit we don't want
210 val
= val
& ~
(mask
<< (gpr_col
*elwidth
))
211 # then wipe the bit we don't want from the value
213 # OR the new value in, shifted up
214 val |
= value
<< (gpr_col
*elwidth
)
215 # finally put the damn value into the regfile
216 log("GPR write", base
, "isvec", is_vec
, "offs", offs
,
217 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
),
219 dict.__setitem
__(self
, base
+gpr_offs
, SelectableInt(val
, 64))
221 def __setitem__(self
, rnum
, value
):
222 # rnum = rnum.value # only SelectableInt allowed
223 log("GPR setitem", rnum
, value
)
224 if isinstance(rnum
, SelectableInt
):
226 dict.__setitem
__(self
, rnum
, value
)
228 def getz(self
, rnum
, rvalue
=None):
229 # rnum = rnum.value # only SelectableInt allowed
230 log("GPR getzero?", rnum
, rvalue
)
231 if rvalue
is not None:
233 return SelectableInt(0, rvalue
.bits
)
236 return SelectableInt(0, 64)
239 def _get_regnum(self
, attr
):
240 getform
= self
.sd
.sigforms
[self
.form
]
241 rnum
= getattr(getform
, attr
)
244 def ___getitem__(self
, attr
):
245 """ XXX currently not used
247 rnum
= self
._get
_regnum
(attr
)
248 log("GPR getitem", attr
, rnum
)
249 return self
.regfile
[rnum
]
251 def dump(self
, printout
=True):
253 for i
in range(len(self
)):
254 res
.append(self
[i
].value
)
256 for i
in range(0, len(res
), 8):
259 s
.append("%08x" % res
[i
+j
])
261 log("reg", "%2d" % i
, s
, kind
=LogType
.InstrInOuts
)
266 def __init__(self
, dec2
, initial_sprs
={}, gpr
=None):
268 self
.gpr
= gpr
# for SVSHAPE[0-3]
270 for key
, v
in initial_sprs
.items():
271 if isinstance(key
, SelectableInt
):
273 key
= special_sprs
.get(key
, key
)
274 if isinstance(key
, int):
277 info
= spr_byname
[key
]
278 if not isinstance(v
, SelectableInt
):
279 v
= SelectableInt(v
, info
.length
)
282 def __getitem__(self
, key
):
284 #log("dict", self.items())
285 # if key in special_sprs get the special spr, otherwise return key
286 if isinstance(key
, SelectableInt
):
288 if isinstance(key
, int):
289 key
= spr_dict
[key
].SPR
290 key
= special_sprs
.get(key
, key
)
291 if key
== 'HSRR0': # HACK!
293 if key
== 'HSRR1': # HACK!
296 res
= dict.__getitem
__(self
, key
)
298 if isinstance(key
, int):
301 info
= spr_byname
[key
]
302 self
[key
] = SelectableInt(0, info
.length
)
303 res
= dict.__getitem
__(self
, key
)
304 #log("spr returning", key, res)
307 def __setitem__(self
, key
, value
):
308 if isinstance(key
, SelectableInt
):
310 if isinstance(key
, int):
311 key
= spr_dict
[key
].SPR
313 key
= special_sprs
.get(key
, key
)
314 if key
== 'HSRR0': # HACK!
315 self
.__setitem
__('SRR0', value
)
316 if key
== 'HSRR1': # HACK!
317 self
.__setitem
__('SRR1', value
)
319 value
= XERState(value
)
320 if key
in ('SVSHAPE0', 'SVSHAPE1', 'SVSHAPE2', 'SVSHAPE3'):
321 value
= SVSHAPE(value
, self
.gpr
)
322 log("setting spr", key
, value
)
323 dict.__setitem
__(self
, key
, value
)
325 def __call__(self
, ridx
):
328 def dump(self
, printout
=True):
330 keys
= list(self
.keys())
333 sprname
= spr_dict
.get(k
, None)
337 sprname
= sprname
.SPR
338 res
.append((sprname
, self
[k
].value
))
340 for sprname
, value
in res
:
341 print(" ", sprname
, hex(value
))
346 def __init__(self
, pc_init
=0):
347 self
.CIA
= SelectableInt(pc_init
, 64)
348 self
.NIA
= self
.CIA
+ SelectableInt(4, 64) # only true for v3.0B!
350 def update_nia(self
, is_svp64
):
351 increment
= 8 if is_svp64
else 4
352 self
.NIA
= self
.CIA
+ SelectableInt(increment
, 64)
354 def update(self
, namespace
, is_svp64
):
355 """updates the program counter (PC) by 4 if v3.0B mode or 8 if SVP64
357 self
.CIA
= namespace
['NIA'].narrow(64)
358 self
.update_nia(is_svp64
)
359 namespace
['CIA'] = self
.CIA
360 namespace
['NIA'] = self
.NIA
364 # See PowerISA Version 3.0 B Book 1
365 # Section 2.3.1 Condition Register pages 30 - 31
367 LT
= FL
= 0 # negative, less than, floating-point less than
368 GT
= FG
= 1 # positive, greater than, floating-point greater than
369 EQ
= FE
= 2 # equal, floating-point equal
370 SO
= FU
= 3 # summary overflow, floating-point unordered
372 def __init__(self
, init
=0):
373 # rev_cr = int('{:016b}'.format(initial_cr)[::-1], 2)
374 # self.cr = FieldSelectableInt(self._cr, list(range(32, 64)))
375 self
.cr
= SelectableInt(init
, 64) # underlying reg
376 # field-selectable versions of Condition Register TODO check bitranges?
379 bits
= tuple(range(i
*4+32, (i
+1)*4+32))
380 _cr
= FieldSelectableInt(self
.cr
, bits
)
384 # decode SVP64 predicate integer to reg number and invert
385 def get_predint(gpr
, mask
):
389 log("get_predint", mask
, SVP64PredInt
.ALWAYS
.value
)
390 if mask
== SVP64PredInt
.ALWAYS
.value
:
391 return 0xffff_ffff_ffff_ffff # 64 bits of 1
392 if mask
== SVP64PredInt
.R3_UNARY
.value
:
393 return 1 << (r3
.value
& 0b111111)
394 if mask
== SVP64PredInt
.R3
.value
:
396 if mask
== SVP64PredInt
.R3_N
.value
:
398 if mask
== SVP64PredInt
.R10
.value
:
400 if mask
== SVP64PredInt
.R10_N
.value
:
402 if mask
== SVP64PredInt
.R30
.value
:
404 if mask
== SVP64PredInt
.R30_N
.value
:
408 # decode SVP64 predicate CR to reg number and invert status
409 def _get_predcr(mask
):
410 if mask
== SVP64PredCR
.LT
.value
:
412 if mask
== SVP64PredCR
.GE
.value
:
414 if mask
== SVP64PredCR
.GT
.value
:
416 if mask
== SVP64PredCR
.LE
.value
:
418 if mask
== SVP64PredCR
.EQ
.value
:
420 if mask
== SVP64PredCR
.NE
.value
:
422 if mask
== SVP64PredCR
.SO
.value
:
424 if mask
== SVP64PredCR
.NS
.value
:
428 # read individual CR fields (0..VL-1), extract the required bit
429 # and construct the mask
430 def get_predcr(crl
, predselect
, vl
):
431 idx
, noninv
= _get_predcr(predselect
)
434 cr
= crl
[i
+SVP64CROffs
.CRPred
]
435 if cr
[idx
].value
== noninv
:
437 log("get_predcr", vl
, idx
, noninv
, i
+SVP64CROffs
.CRPred
,
438 bin(cr
.asint()), cr
[idx
].value
, bin(mask
))
442 # TODO, really should just be using PowerDecoder2
443 def get_idx_map(dec2
, name
):
445 in1_sel
= yield op
.in1_sel
446 in2_sel
= yield op
.in2_sel
447 in3_sel
= yield op
.in3_sel
448 in1
= yield dec2
.e
.read_reg1
.data
449 # identify which regnames map to in1/2/3
450 if name
== 'RA' or name
== 'RA_OR_ZERO':
451 if (in1_sel
== In1Sel
.RA
.value
or
452 (in1_sel
== In1Sel
.RA_OR_ZERO
.value
and in1
!= 0)):
454 if in1_sel
== In1Sel
.RA_OR_ZERO
.value
:
457 if in2_sel
== In2Sel
.RB
.value
:
459 if in3_sel
== In3Sel
.RB
.value
:
461 # XXX TODO, RC doesn't exist yet!
463 if in3_sel
== In3Sel
.RC
.value
:
465 elif name
in ['EA', 'RS']:
466 if in1_sel
== In1Sel
.RS
.value
:
468 if in2_sel
== In2Sel
.RS
.value
:
470 if in3_sel
== In3Sel
.RS
.value
:
473 if in1_sel
== In1Sel
.FRA
.value
:
475 if in3_sel
== In3Sel
.FRA
.value
:
478 if in2_sel
== In2Sel
.FRB
.value
:
481 if in3_sel
== In3Sel
.FRC
.value
:
484 if in1_sel
== In1Sel
.FRS
.value
:
486 if in3_sel
== In3Sel
.FRS
.value
:
489 if in1_sel
== In1Sel
.FRT
.value
:
492 if in1_sel
== In1Sel
.RT
.value
:
497 # TODO, really should just be using PowerDecoder2
498 def get_idx_in(dec2
, name
, ewmode
=False):
499 idx
= yield from get_idx_map(dec2
, name
)
503 in1_sel
= yield op
.in1_sel
504 in2_sel
= yield op
.in2_sel
505 in3_sel
= yield op
.in3_sel
506 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
507 in1
= yield dec2
.e
.read_reg1
.data
508 in2
= yield dec2
.e
.read_reg2
.data
509 in3
= yield dec2
.e
.read_reg3
.data
511 in1_base
= yield dec2
.e
.read_reg1
.base
512 in2_base
= yield dec2
.e
.read_reg2
.base
513 in3_base
= yield dec2
.e
.read_reg3
.base
514 in1_offs
= yield dec2
.e
.read_reg1
.offs
515 in2_offs
= yield dec2
.e
.read_reg2
.offs
516 in3_offs
= yield dec2
.e
.read_reg3
.offs
517 in1
= (in1
, in1_base
, in1_offs
)
518 in2
= (in2
, in2_base
, in2_offs
)
519 in3
= (in3
, in3_base
, in3_offs
)
521 in1_isvec
= yield dec2
.in1_isvec
522 in2_isvec
= yield dec2
.in2_isvec
523 in3_isvec
= yield dec2
.in3_isvec
524 log("get_idx_in in1", name
, in1_sel
, In1Sel
.RA
.value
,
526 log("get_idx_in in2", name
, in2_sel
, In2Sel
.RB
.value
,
528 log("get_idx_in in3", name
, in3_sel
, In3Sel
.RS
.value
,
530 log("get_idx_in FRS in3", name
, in3_sel
, In3Sel
.FRS
.value
,
532 log("get_idx_in FRB in2", name
, in2_sel
, In2Sel
.FRB
.value
,
534 log("get_idx_in FRC in3", name
, in3_sel
, In3Sel
.FRC
.value
,
537 return in1
, in1_isvec
539 return in2
, in2_isvec
541 return in3
, in3_isvec
545 # TODO, really should just be using PowerDecoder2
546 def get_cr_in(dec2
, name
):
548 in_sel
= yield op
.cr_in
549 in_bitfield
= yield dec2
.dec_cr_in
.cr_bitfield
.data
550 sv_cr_in
= yield op
.sv_cr_in
551 spec
= yield dec2
.crin_svdec
.spec
552 sv_override
= yield dec2
.dec_cr_in
.sv_override
553 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
554 in1
= yield dec2
.e
.read_cr1
.data
555 cr_isvec
= yield dec2
.cr_in_isvec
556 log("get_cr_in", in_sel
, CROutSel
.CR0
.value
, in1
, cr_isvec
)
557 log(" sv_cr_in", sv_cr_in
)
558 log(" cr_bf", in_bitfield
)
560 log(" override", sv_override
)
561 # identify which regnames map to in / o2
563 if in_sel
== CRInSel
.BI
.value
:
566 if in_sel
== CRInSel
.BFA
.value
:
567 if name
in ['BA', 'BB']:
568 if in_sel
== CRInSel
.BA_BB
.value
:
570 if in_sel
== CRInSel
.BFA
.value
:
572 log("get_cr_in not found", name
)
576 # TODO, really should just be using PowerDecoder2
577 def get_cr_out(dec2
, name
):
579 out_sel
= yield op
.cr_out
580 out_bitfield
= yield dec2
.dec_cr_out
.cr_bitfield
.data
581 sv_cr_out
= yield op
.sv_cr_out
582 spec
= yield dec2
.crout_svdec
.spec
583 sv_override
= yield dec2
.dec_cr_out
.sv_override
584 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
585 out
= yield dec2
.e
.write_cr
.data
586 o_isvec
= yield dec2
.cr_out_isvec
587 log("get_cr_out", out_sel
, CROutSel
.CR0
.value
, out
, o_isvec
)
588 log(" sv_cr_out", sv_cr_out
)
589 log(" cr_bf", out_bitfield
)
591 log(" override", sv_override
)
592 # identify which regnames map to out / o2
594 if out_sel
== CROutSel
.BF
.value
:
597 if out_sel
== CROutSel
.BT
.value
:
600 if out_sel
== CROutSel
.CR0
.value
:
602 if name
== 'CR1': # these are not actually calculated correctly
603 if out_sel
== CROutSel
.CR1
.value
:
605 # check RC1 set? if so return implicit vector, this is a REAL bad hack
606 RC1
= yield dec2
.rm_dec
.RC1
608 log("get_cr_out RC1 mode")
610 return 0, True # XXX TODO: offset CR0 from SVSTATE SPR
612 return 1, True # XXX TODO: offset CR1 from SVSTATE SPR
614 log("get_cr_out not found", name
)
618 # TODO, really should just be using PowerDecoder2
619 def get_out_map(dec2
, name
):
621 out_sel
= yield op
.out_sel
622 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
623 out
= yield dec2
.e
.write_reg
.data
624 # identify which regnames map to out / o2
626 if out_sel
== OutSel
.RA
.value
:
629 if out_sel
== OutSel
.RT
.value
:
631 if out_sel
== OutSel
.RT_OR_ZERO
.value
and out
!= 0:
633 elif name
== 'RT_OR_ZERO':
634 if out_sel
== OutSel
.RT_OR_ZERO
.value
:
637 if out_sel
== OutSel
.FRA
.value
:
640 if out_sel
== OutSel
.FRS
.value
:
643 if out_sel
== OutSel
.FRT
.value
:
648 # TODO, really should just be using PowerDecoder2
649 def get_idx_out(dec2
, name
, ewmode
=False):
651 out_sel
= yield op
.out_sel
652 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
653 out
= yield dec2
.e
.write_reg
.data
654 o_isvec
= yield dec2
.o_isvec
656 offs
= yield dec2
.e
.write_reg
.offs
657 base
= yield dec2
.e
.write_reg
.base
658 out
= (out
, base
, offs
)
659 # identify which regnames map to out / o2
660 ismap
= yield from get_out_map(dec2
, name
)
662 log("get_idx_out", name
, out_sel
, out
, o_isvec
)
664 log("get_idx_out not found", name
, out_sel
, out
, o_isvec
)
668 # TODO, really should just be using PowerDecoder2
669 def get_out2_map(dec2
, name
):
670 # check first if register is activated for write
672 out_sel
= yield op
.out_sel
673 out
= yield dec2
.e
.write_ea
.data
674 out_ok
= yield dec2
.e
.write_ea
.ok
678 if name
in ['EA', 'RA']:
679 if hasattr(op
, "upd"):
680 # update mode LD/ST uses read-reg A also as an output
682 log("get_idx_out2", upd
, LDSTMode
.update
.value
,
683 out_sel
, OutSel
.RA
.value
,
685 if upd
== LDSTMode
.update
.value
:
688 fft_en
= yield dec2
.implicit_rs
690 log("get_idx_out2", out_sel
, OutSel
.RS
.value
,
694 fft_en
= yield dec2
.implicit_rs
696 log("get_idx_out2", out_sel
, OutSel
.FRS
.value
,
702 # TODO, really should just be using PowerDecoder2
703 def get_idx_out2(dec2
, name
, ewmode
=False):
704 # check first if register is activated for write
706 out_sel
= yield op
.out_sel
707 out
= yield dec2
.e
.write_ea
.data
709 offs
= yield dec2
.e
.write_ea
.offs
710 base
= yield dec2
.e
.write_ea
.base
711 out
= (out
, base
, offs
)
712 o_isvec
= yield dec2
.o2_isvec
713 ismap
= yield from get_out2_map(dec2
, name
)
715 log("get_idx_out2", name
, out_sel
, out
, o_isvec
)
721 """deals with svstate looping.
724 def __init__(self
, svstate
):
725 self
.svstate
= svstate
728 def new_iterators(self
):
729 self
.src_it
= self
.src_iterator()
730 self
.dst_it
= self
.dst_iterator()
734 self
.new_ssubstep
= 0
735 self
.new_dsubstep
= 0
736 self
.pred_dst_zero
= 0
737 self
.pred_src_zero
= 0
739 def src_iterator(self
):
740 """source-stepping iterator
742 pack
= self
.svstate
.pack
746 # pack advances subvl in *outer* loop
747 while True: # outer subvl loop
748 while True: # inner vl loop
751 srcmask
= self
.srcmask
752 srcstep
= self
.svstate
.srcstep
753 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
754 if self
.pred_sz
or pred_src_zero
:
755 self
.pred_src_zero
= not pred_src_zero
756 log(" advance src", srcstep
, vl
,
757 self
.svstate
.ssubstep
, subvl
)
758 # yield actual substep/srcstep
759 yield (self
.svstate
.ssubstep
, srcstep
)
760 # the way yield works these could have been modified.
763 srcstep
= self
.svstate
.srcstep
764 log(" advance src check", srcstep
, vl
,
765 self
.svstate
.ssubstep
, subvl
, srcstep
== vl
-1,
766 self
.svstate
.ssubstep
== subvl
)
767 if srcstep
== vl
-1: # end-point
768 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
769 if self
.svstate
.ssubstep
== subvl
: # end-point
770 log(" advance pack stop")
772 break # exit inner loop
773 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance ss
775 if self
.svstate
.ssubstep
== subvl
: # end-point
776 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
777 log(" advance pack stop")
779 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
782 # these cannot be done as for-loops because SVSTATE may change
783 # (srcstep/substep may be modified, interrupted, subvl/vl change)
784 # but they *can* be done as while-loops as long as every SVSTATE
785 # "thing" is re-read every single time a yield gives indices
786 while True: # outer vl loop
787 while True: # inner subvl loop
790 srcmask
= self
.srcmask
791 srcstep
= self
.svstate
.srcstep
792 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
793 if self
.pred_sz
or pred_src_zero
:
794 self
.pred_src_zero
= not pred_src_zero
795 log(" advance src", srcstep
, vl
,
796 self
.svstate
.ssubstep
, subvl
)
797 # yield actual substep/srcstep
798 yield (self
.svstate
.ssubstep
, srcstep
)
799 if self
.svstate
.ssubstep
== subvl
: # end-point
800 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
801 break # exit inner loop
802 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
804 if srcstep
== vl
-1: # end-point
805 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
808 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance srcstep
810 def dst_iterator(self
):
811 """dest-stepping iterator
813 unpack
= self
.svstate
.unpack
817 # pack advances subvl in *outer* loop
818 while True: # outer subvl loop
819 while True: # inner vl loop
822 dstmask
= self
.dstmask
823 dststep
= self
.svstate
.dststep
824 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
825 if self
.pred_dz
or pred_dst_zero
:
826 self
.pred_dst_zero
= not pred_dst_zero
827 log(" advance dst", dststep
, vl
,
828 self
.svstate
.dsubstep
, subvl
)
829 # yield actual substep/dststep
830 yield (self
.svstate
.dsubstep
, dststep
)
831 # the way yield works these could have been modified.
833 dststep
= self
.svstate
.dststep
834 log(" advance dst check", dststep
, vl
,
835 self
.svstate
.ssubstep
, subvl
)
836 if dststep
== vl
-1: # end-point
837 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
838 if self
.svstate
.dsubstep
== subvl
: # end-point
839 log(" advance unpack stop")
842 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance ds
844 if self
.svstate
.dsubstep
== subvl
: # end-point
845 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
846 log(" advance unpack stop")
848 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
850 # these cannot be done as for-loops because SVSTATE may change
851 # (dststep/substep may be modified, interrupted, subvl/vl change)
852 # but they *can* be done as while-loops as long as every SVSTATE
853 # "thing" is re-read every single time a yield gives indices
854 while True: # outer vl loop
855 while True: # inner subvl loop
857 dstmask
= self
.dstmask
858 dststep
= self
.svstate
.dststep
859 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
860 if self
.pred_dz
or pred_dst_zero
:
861 self
.pred_dst_zero
= not pred_dst_zero
862 log(" advance dst", dststep
, self
.svstate
.vl
,
863 self
.svstate
.dsubstep
, subvl
)
864 # yield actual substep/dststep
865 yield (self
.svstate
.dsubstep
, dststep
)
866 if self
.svstate
.dsubstep
== subvl
: # end-point
867 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
869 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
872 if dststep
== vl
-1: # end-point
873 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
875 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance dststep
877 def src_iterate(self
):
878 """source-stepping iterator
882 pack
= self
.svstate
.pack
883 unpack
= self
.svstate
.unpack
884 ssubstep
= self
.svstate
.ssubstep
885 end_ssub
= ssubstep
== subvl
886 end_src
= self
.svstate
.srcstep
== vl
-1
887 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
891 srcstep
= self
.svstate
.srcstep
892 srcmask
= self
.srcmask
894 # pack advances subvl in *outer* loop
896 assert srcstep
<= vl
-1
897 end_src
= srcstep
== vl
-1
902 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
906 srcstep
+= 1 # advance srcstep
907 if not self
.srcstep_skip
:
909 if ((1 << srcstep
) & srcmask
) != 0:
912 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
914 # advance subvl in *inner* loop
917 assert srcstep
<= vl
-1
918 end_src
= srcstep
== vl
-1
919 if end_src
: # end-point
925 if not self
.srcstep_skip
:
927 if ((1 << srcstep
) & srcmask
) != 0:
930 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
931 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
934 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
936 self
.svstate
.srcstep
= SelectableInt(srcstep
, 7)
937 log(" advance src", self
.svstate
.srcstep
, self
.svstate
.ssubstep
,
940 def dst_iterate(self
):
941 """dest step iterator
945 pack
= self
.svstate
.pack
946 unpack
= self
.svstate
.unpack
947 dsubstep
= self
.svstate
.dsubstep
948 end_dsub
= dsubstep
== subvl
949 dststep
= self
.svstate
.dststep
950 end_dst
= dststep
== vl
-1
951 dstmask
= self
.dstmask
952 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
957 # unpack advances subvl in *outer* loop
959 assert dststep
<= vl
-1
960 end_dst
= dststep
== vl
-1
965 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
969 dststep
+= 1 # advance dststep
970 if not self
.dststep_skip
:
972 if ((1 << dststep
) & dstmask
) != 0:
975 log(" dskip", bin(dstmask
), bin(1 << dststep
))
977 # advance subvl in *inner* loop
980 assert dststep
<= vl
-1
981 end_dst
= dststep
== vl
-1
982 if end_dst
: # end-point
988 if not self
.dststep_skip
:
990 if ((1 << dststep
) & dstmask
) != 0:
993 log(" dskip", bin(dstmask
), bin(1 << dststep
))
994 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
997 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
999 self
.svstate
.dststep
= SelectableInt(dststep
, 7)
1000 log(" advance dst", self
.svstate
.dststep
, self
.svstate
.dsubstep
,
1003 def at_loopend(self
):
1004 """tells if this is the last possible element. uses the cached values
1005 for src/dst-step and sub-steps
1008 vl
= self
.svstate
.vl
1009 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
1010 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
1011 end_ssub
= ssubstep
== subvl
1012 end_dsub
= dsubstep
== subvl
1013 if srcstep
== vl
-1 and end_ssub
:
1015 if dststep
== vl
-1 and end_dsub
:
1019 def advance_svstate_steps(self
):
1020 """ advance sub/steps. note that Pack/Unpack *INVERTS* the order.
1021 TODO when Pack/Unpack is set, substep becomes the *outer* loop
1023 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
1024 if self
.loopend
: # huhn??
1029 def read_src_mask(self
):
1030 """read/update pred_sz and src mask
1032 # get SVSTATE VL (oh and print out some debug stuff)
1033 vl
= self
.svstate
.vl
1034 srcstep
= self
.svstate
.srcstep
1035 ssubstep
= self
.svstate
.ssubstep
1037 # get predicate mask (all 64 bits)
1038 srcmask
= 0xffff_ffff_ffff_ffff
1040 pmode
= yield self
.dec2
.rm_dec
.predmode
1041 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1042 srcpred
= yield self
.dec2
.rm_dec
.srcpred
1043 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1044 pred_sz
= yield self
.dec2
.rm_dec
.pred_sz
1045 if pmode
== SVP64PredMode
.INT
.value
:
1046 srcmask
= dstmask
= get_predint(self
.gpr
, dstpred
)
1047 if sv_ptype
== SVPType
.P2
.value
:
1048 srcmask
= get_predint(self
.gpr
, srcpred
)
1049 elif pmode
== SVP64PredMode
.CR
.value
:
1050 srcmask
= dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1051 if sv_ptype
== SVPType
.P2
.value
:
1052 srcmask
= get_predcr(self
.crl
, srcpred
, vl
)
1053 # work out if the ssubsteps are completed
1054 ssubstart
= ssubstep
== 0
1055 log(" pmode", pmode
)
1056 log(" ptype", sv_ptype
)
1057 log(" srcpred", bin(srcpred
))
1058 log(" srcmask", bin(srcmask
))
1059 log(" pred_sz", bin(pred_sz
))
1060 log(" ssubstart", ssubstart
)
1062 # store all that above
1063 self
.srcstep_skip
= False
1064 self
.srcmask
= srcmask
1065 self
.pred_sz
= pred_sz
1066 self
.new_ssubstep
= ssubstep
1067 log(" new ssubstep", ssubstep
)
1068 # until the predicate mask has a "1" bit... or we run out of VL
1069 # let srcstep==VL be the indicator to move to next instruction
1071 self
.srcstep_skip
= True
1073 def read_dst_mask(self
):
1074 """same as read_src_mask - check and record everything needed
1076 # get SVSTATE VL (oh and print out some debug stuff)
1077 # yield Delay(1e-10) # make changes visible
1078 vl
= self
.svstate
.vl
1079 dststep
= self
.svstate
.dststep
1080 dsubstep
= self
.svstate
.dsubstep
1082 # get predicate mask (all 64 bits)
1083 dstmask
= 0xffff_ffff_ffff_ffff
1085 pmode
= yield self
.dec2
.rm_dec
.predmode
1086 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
1087 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1088 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1089 pred_dz
= yield self
.dec2
.rm_dec
.pred_dz
1090 if pmode
== SVP64PredMode
.INT
.value
:
1091 dstmask
= get_predint(self
.gpr
, dstpred
)
1092 elif pmode
== SVP64PredMode
.CR
.value
:
1093 dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1094 # work out if the ssubsteps are completed
1095 dsubstart
= dsubstep
== 0
1096 log(" pmode", pmode
)
1097 log(" ptype", sv_ptype
)
1098 log(" dstpred", bin(dstpred
))
1099 log(" dstmask", bin(dstmask
))
1100 log(" pred_dz", bin(pred_dz
))
1101 log(" dsubstart", dsubstart
)
1103 self
.dststep_skip
= False
1104 self
.dstmask
= dstmask
1105 self
.pred_dz
= pred_dz
1106 self
.new_dsubstep
= dsubstep
1107 log(" new dsubstep", dsubstep
)
1109 self
.dststep_skip
= True
1111 def svstate_pre_inc(self
):
1112 """check if srcstep/dststep need to skip over masked-out predicate bits
1113 note that this is not supposed to do anything to substep,
1114 it is purely for skipping masked-out bits
1117 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
1118 yield from self
.read_src_mask()
1119 yield from self
.read_dst_mask()
1126 srcstep
= self
.svstate
.srcstep
1127 srcmask
= self
.srcmask
1128 pred_src_zero
= self
.pred_sz
1129 vl
= self
.svstate
.vl
1130 # srcstep-skipping opportunity identified
1131 if self
.srcstep_skip
:
1132 # cannot do this with sv.bc - XXX TODO
1135 while (((1 << srcstep
) & srcmask
) == 0) and (srcstep
!= vl
):
1136 log(" sskip", bin(1 << srcstep
))
1139 # now work out if the relevant mask bits require zeroing
1141 pred_src_zero
= ((1 << srcstep
) & srcmask
) == 0
1143 # store new srcstep / dststep
1144 self
.new_srcstep
= srcstep
1145 self
.pred_src_zero
= pred_src_zero
1146 log(" new srcstep", srcstep
)
1149 # dststep-skipping opportunity identified
1150 dststep
= self
.svstate
.dststep
1151 dstmask
= self
.dstmask
1152 pred_dst_zero
= self
.pred_dz
1153 vl
= self
.svstate
.vl
1154 if self
.dststep_skip
:
1155 # cannot do this with sv.bc - XXX TODO
1158 while (((1 << dststep
) & dstmask
) == 0) and (dststep
!= vl
):
1159 log(" dskip", bin(1 << dststep
))
1162 # now work out if the relevant mask bits require zeroing
1164 pred_dst_zero
= ((1 << dststep
) & dstmask
) == 0
1166 # store new srcstep / dststep
1167 self
.new_dststep
= dststep
1168 self
.pred_dst_zero
= pred_dst_zero
1169 log(" new dststep", dststep
)
1172 class ExitSyscallCalled(Exception):
1176 class SyscallEmulator(openpower
.syscalls
.Dispatcher
):
1177 def __init__(self
, isacaller
):
1178 self
.__isacaller
= isacaller
1180 host
= os
.uname().machine
1181 bits
= (64 if (sys
.maxsize
> (2**32)) else 32)
1182 host
= openpower
.syscalls
.architecture(arch
=host
, bits
=bits
)
1184 return super().__init
__(guest
="ppc64", host
=host
)
1186 def __call__(self
, identifier
, *arguments
):
1187 (identifier
, *arguments
) = map(int, (identifier
, *arguments
))
1188 return super().__call
__(identifier
, *arguments
)
1190 def sys_exit_group(self
, status
, *rest
):
1191 self
.__isacaller
.halted
= True
1192 raise ExitSyscallCalled(status
)
1194 def sys_write(self
, fd
, buf
, count
, *rest
):
1196 buf
= self
.__isacaller
.mem
.get_ctypes(buf
, count
, is_write
=False)
1200 return os
.write(fd
, buf
)
1201 except OSError as e
:
1204 def sys_writev(self
, fd
, iov
, iovcnt
, *rest
):
1206 if iovcnt
< 0 or iovcnt
> IOV_MAX
:
1207 return -errno
.EINVAL
1208 struct_iovec
= struct
.Struct("<QQ")
1211 iov
= self
.__isacaller
.mem
.get_ctypes(
1212 iov
, struct_iovec
.size
* iovcnt
, is_write
=False)
1213 iov
= list(struct_iovec
.iter_unpack(iov
))
1216 for i
, iovec
in enumerate(iov
):
1217 iov_base
, iov_len
= iovec
1218 iov
[i
] = self
.__isacaller
.mem
.get_ctypes(
1219 iov_base
, iov_len
, is_write
=False)
1220 except (ValueError, MemException
):
1221 return -errno
.EFAULT
1223 return os
.writev(fd
, iov
)
1224 except OSError as e
:
1227 def sys_read(self
, fd
, buf
, count
, *rest
):
1229 buf
= self
.__isacaller
.mem
.get_ctypes(buf
, count
, is_write
=True)
1233 return os
.readv(fd
, [buf
])
1234 except OSError as e
:
1237 def sys_mmap(self
, addr
, length
, prot
, flags
, fd
, offset
, *rest
):
1238 return self
.__isacaller
.mem
.mmap_syscall(
1239 addr
, length
, prot
, flags
, fd
, offset
, is_mmap2
=False)
1241 def sys_mmap2(self
, addr
, length
, prot
, flags
, fd
, offset
, *rest
):
1242 return self
.__isacaller
.mem
.mmap_syscall(
1243 addr
, length
, prot
, flags
, fd
, offset
, is_mmap2
=True)
1245 def sys_brk(self
, addr
, *rest
):
1246 return self
.__isacaller
.mem
.brk_syscall(addr
)
1248 def sys_munmap(self
, addr
, length
, *rest
):
1249 return -errno
.ENOSYS
# TODO: implement
1251 def sys_mprotect(self
, addr
, length
, prot
, *rest
):
1252 return -errno
.ENOSYS
# TODO: implement
1254 def sys_pkey_mprotect(self
, addr
, length
, prot
, pkey
, *rest
):
1255 return -errno
.ENOSYS
# TODO: implement
1257 def sys_openat(self
, dirfd
, pathname
, flags
, mode
, *rest
):
1259 path
= self
.__isacaller
.mem
.read_cstr(pathname
)
1260 except (ValueError, MemException
):
1261 return -errno
.EFAULT
1263 if dirfd
== ppc_flags
.AT_FDCWD
:
1264 return os
.open(path
, flags
, mode
)
1266 return os
.open(path
, flags
, mode
, dir_fd
=dirfd
)
1267 except OSError as e
:
1273 nodename
= uname
.nodename
.encode()
1274 release
= b
'5.6.0-1-powerpc64le'
1275 version
= b
'#1 SMP Debian 5.6.7-1 (2020-04-29)'
1276 machine
= b
'ppc64le'
1278 return sysname
, nodename
, release
, version
, machine
, domainname
1280 def sys_uname(self
, buf
, *rest
):
1281 s
= struct
.Struct("<65s65s65s65s65s")
1283 buf
= self
.__isacaller
.mem
.get_ctypes(buf
, s
.size
, is_write
=True)
1284 except (ValueError, MemException
):
1285 return -errno
.EFAULT
1286 sysname
, nodename
, release
, version
, machine
, domainname
= \
1288 s
.pack_into(buf
, 0, sysname
, nodename
, release
, version
, machine
)
1291 def sys_newuname(self
, buf
, *rest
):
1292 name_len
= ppc_flags
.__NEW
_UTS
_LEN
+ 1
1293 s
= struct
.Struct("<%ds%ds%ds%ds%ds%ds" % ((name_len
,) * 6))
1295 buf
= self
.__isacaller
.mem
.get_ctypes(buf
, s
.size
, is_write
=True)
1296 except (ValueError, MemException
):
1297 return -errno
.EFAULT
1298 sysname
, nodename
, release
, version
, machine
, domainname
= \
1301 sysname
, nodename
, release
, version
, machine
, domainname
)
1304 def sys_readlink(self
, pathname
, buf
, bufsiz
, *rest
):
1305 dirfd
= ppc_flags
.AT_FDCWD
1306 return self
.sys_readlinkat(dirfd
, pathname
, buf
, bufsiz
)
1308 def sys_readlinkat(self
, dirfd
, pathname
, buf
, bufsiz
, *rest
):
1310 path
= self
.__isacaller
.mem
.read_cstr(pathname
)
1312 buf
= self
.__isacaller
.mem
.get_ctypes(
1313 buf
, bufsiz
, is_write
=True)
1316 except (ValueError, MemException
):
1317 return -errno
.EFAULT
1319 if dirfd
== ppc_flags
.AT_FDCWD
:
1320 result
= os
.readlink(path
)
1322 result
= os
.readlink(path
, dir_fd
=dirfd
)
1323 retval
= min(len(result
), len(buf
))
1324 buf
[:retval
] = result
[:retval
]
1326 except OSError as e
:
1330 class ISACaller(ISACallerHelper
, ISAFPHelpers
, StepLoop
):
1331 # decoder2 - an instance of power_decoder2
1332 # regfile - a list of initial values for the registers
1333 # initial_{etc} - initial values for SPRs, Condition Register, Mem, MSR
1334 # respect_pc - tracks the program counter. requires initial_insns
1335 def __init__(self
, decoder2
, regfile
, initial_sprs
=None, initial_cr
=0,
1336 initial_mem
=None, initial_msr
=0,
1349 use_syscall_emu
=False,
1350 emulating_mmap
=False,
1351 real_page_size
=None):
1353 self
.syscall
= SyscallEmulator(isacaller
=self
)
1354 if not use_mmap_mem
:
1355 log("forcing use_mmap_mem due to use_syscall_emu active")
1360 # we will eventually be able to load ELF files without use_syscall_emu
1361 # (e.g. the linux kernel), so do it in a separate if block
1362 if isinstance(initial_insns
, ELFFile
):
1363 if not use_mmap_mem
:
1364 log("forcing use_mmap_mem due to loading an ELF file")
1366 if not emulating_mmap
:
1367 log("forcing emulating_mmap due to loading an ELF file")
1368 emulating_mmap
= True
1370 # trace log file for model output. if None do nothing
1371 self
.insnlog
= insnlog
1372 self
.insnlog_is_file
= hasattr(insnlog
, "write")
1373 if not self
.insnlog_is_file
and self
.insnlog
:
1374 self
.insnlog
= open(self
.insnlog
, "w")
1376 self
.bigendian
= bigendian
1378 self
.is_svp64_mode
= False
1379 self
.respect_pc
= respect_pc
1380 if initial_sprs
is None:
1382 if initial_mem
is None:
1384 if fpregfile
is None:
1385 fpregfile
= [0] * 32
1386 if initial_insns
is None:
1388 assert self
.respect_pc
== False, "instructions required to honor pc"
1389 if initial_msr
is None:
1390 initial_msr
= DEFAULT_MSR
1392 log("ISACaller insns", respect_pc
, initial_insns
, disassembly
)
1393 log("ISACaller initial_msr", initial_msr
)
1395 # "fake program counter" mode (for unit testing)
1399 if isinstance(initial_mem
, tuple):
1400 self
.fake_pc
= initial_mem
[0]
1401 disasm_start
= self
.fake_pc
1403 disasm_start
= initial_pc
1405 # disassembly: we need this for now (not given from the decoder)
1406 self
.disassembly
= {}
1408 for i
, code
in enumerate(disassembly
):
1409 self
.disassembly
[i
*4 + disasm_start
] = code
1411 # set up registers, instruction memory, data memory, PC, SPRs, MSR, CR
1412 self
.svp64rm
= SVP64RM()
1413 if initial_svstate
is None:
1415 if isinstance(initial_svstate
, int):
1416 initial_svstate
= SVP64State(initial_svstate
)
1417 # SVSTATE, MSR and PC
1418 StepLoop
.__init
__(self
, initial_svstate
)
1419 self
.msr
= SelectableInt(initial_msr
, 64) # underlying reg
1421 # GPR FPR SPR registers
1422 initial_sprs
= deepcopy(initial_sprs
) # so as not to get modified
1423 self
.gpr
= GPR(decoder2
, self
, self
.svstate
, regfile
)
1424 self
.fpr
= GPR(decoder2
, self
, self
.svstate
, fpregfile
)
1425 # initialise SPRs before MMU
1426 self
.spr
= SPR(decoder2
, initial_sprs
, gpr
=self
.gpr
)
1428 # set up 4 dummy SVSHAPEs if they aren't already set up
1430 sname
= 'SVSHAPE%d' % i
1431 val
= self
.spr
.get(sname
, 0)
1432 # make sure it's an SVSHAPE -- conversion done by SPR.__setitem__
1433 self
.spr
[sname
] = val
1434 self
.last_op_svshape
= False
1438 self
.mem
= MemMMap(row_bytes
=8,
1439 initial_mem
=initial_mem
,
1441 emulating_mmap
=emulating_mmap
)
1442 self
.imem
= self
.mem
1443 lelf
= self
.mem
.initialize(row_bytes
=4, initial_mem
=initial_insns
)
1444 if isinstance(lelf
, LoadedELF
): # stuff parsed from ELF
1445 initial_pc
= lelf
.pc
1446 for k
, v
in lelf
.gprs
.items():
1447 self
.gpr
[k
] = SelectableInt(v
, 64)
1448 initial_fpscr
= lelf
.fpscr
1449 self
.mem
.log_fancy(kind
=LogType
.InstrInOuts
)
1451 self
.mem
= Mem(row_bytes
=8, initial_mem
=initial_mem
,
1453 self
.mem
.log_fancy(kind
=LogType
.InstrInOuts
)
1454 self
.imem
= Mem(row_bytes
=4, initial_mem
=initial_insns
)
1455 # MMU mode, redirect underlying Mem through RADIX
1457 self
.mem
= RADIX(self
.mem
, self
)
1459 self
.imem
= RADIX(self
.imem
, self
)
1461 # TODO, needed here:
1462 # FPR (same as GPR except for FP nums)
1463 # 4.2.2 p124 FPSCR (definitely "separate" - not in SPR)
1464 # note that mffs, mcrfs, mtfsf "manage" this FPSCR
1465 self
.fpscr
= FPSCRState(initial_fpscr
)
1467 # 2.3.1 CR (and sub-fields CR0..CR6 - CR0 SO comes from XER.SO)
1468 # note that mfocrf, mfcr, mtcr, mtocrf, mcrxrx "manage" CRs
1470 # 2.3.2 LR (actually SPR #8) -- Done
1471 # 2.3.3 CTR (actually SPR #9) -- Done
1472 # 2.3.4 TAR (actually SPR #815)
1473 # 3.2.2 p45 XER (actually SPR #1) -- Done
1474 # 3.2.3 p46 p232 VRSAVE (actually SPR #256)
1476 # create CR then allow portions of it to be "selectable" (below)
1477 self
.cr_fields
= CRFields(initial_cr
)
1478 self
.cr
= self
.cr_fields
.cr
1479 self
.cr_backup
= 0 # sigh, dreadful hack: for fail-first (VLi)
1481 # "undefined", just set to variable-bit-width int (use exts "max")
1482 # self.undefined = SelectableInt(0, EFFECTIVELY_UNLIMITED)
1485 self
.namespace
.update(self
.spr
)
1486 self
.namespace
.update({'GPR': self
.gpr
,
1490 'memassign': self
.memassign
,
1493 'SVSTATE': self
.svstate
,
1494 'SVSHAPE0': self
.spr
['SVSHAPE0'],
1495 'SVSHAPE1': self
.spr
['SVSHAPE1'],
1496 'SVSHAPE2': self
.spr
['SVSHAPE2'],
1497 'SVSHAPE3': self
.spr
['SVSHAPE3'],
1500 'FPSCR': self
.fpscr
,
1501 'undefined': undefined
,
1502 'mode_is_64bit': True,
1503 'SO': XER_bits
['SO'],
1504 'XLEN': 64, # elwidth overrides
1508 if real_page_size
is None:
1509 # PowerISA v3.1B Book III Section 6.7 page 1191 (1217)
1510 # defines real page size as 2 ** 12 bytes (4KiB)
1511 real_page_size
= 2 ** 12
1512 self
.real_page_size
= real_page_size
1513 self
.reserve_addr
= SelectableInt(0, self
.XLEN
)
1514 self
.reserve
= SelectableInt(0, 1)
1515 self
.reserve_length
= SelectableInt(0, 4)
1517 self
.namespace
.update({'RESERVE': self
.RESERVE
,
1518 'RESERVE_ADDR': self
.RESERVE_ADDR
,
1519 'RESERVE_LENGTH': self
.RESERVE_LENGTH
,
1520 'REAL_PAGE_SIZE': self
.REAL_PAGE_SIZE
,
1523 for name
in BFP_FLAG_NAMES
:
1524 setattr(self
, name
, 0)
1526 # update pc to requested start point
1527 self
.set_pc(initial_pc
)
1529 # field-selectable versions of Condition Register
1530 self
.crl
= self
.cr_fields
.crl
1532 self
.namespace
["CR%d" % i
] = self
.crl
[i
]
1534 self
.decoder
= decoder2
.dec
1535 self
.dec2
= decoder2
1537 super().__init
__(XLEN
=self
.namespace
["XLEN"], FPSCR
=self
.fpscr
)
1539 def trace(self
, out
):
1540 if self
.insnlog
is None:
1542 self
.insnlog
.write(out
)
1546 return self
.namespace
["XLEN"]
1553 def RESERVE_LENGTH(self
):
1554 return self
.reserve_length
1557 def RESERVE_ADDR(self
):
1558 return self
.reserve_addr
1561 def REAL_PAGE_SIZE(self
):
1562 return self
.real_page_size
1564 def real_addr(self
, EA
):
1565 """ get the "real address to which `EA` maps"
1567 Specified in PowerISA v3.1B Book II Section 1.7.2.1 page 1049 (1075)
1569 # FIXME: translate EA to a physical address
1576 def call_trap(self
, trap_addr
, trap_bit
):
1577 """calls TRAP and sets up NIA to the new execution location.
1578 next instruction will begin at trap_addr.
1580 self
.TRAP(trap_addr
, trap_bit
)
1581 self
.namespace
['NIA'] = self
.trap_nia
1582 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1584 def TRAP(self
, trap_addr
=0x700, trap_bit
=PIb
.TRAP
):
1585 """TRAP> saves PC, MSR (and TODO SVSTATE), and updates MSR
1587 TRAP function is callable from inside the pseudocode itself,
1588 hence the default arguments. when calling from inside ISACaller
1589 it is best to use call_trap()
1591 trap_addr: int | SelectableInt
1592 the address to go to (before any modifications from `KAIVB`)
1593 trap_bit: int | None
1594 the bit in `SRR1` to set, `None` means don't set any bits.
1596 if isinstance(trap_addr
, SelectableInt
):
1597 trap_addr
= trap_addr
.value
1598 # https://bugs.libre-soc.org/show_bug.cgi?id=859
1599 kaivb
= self
.spr
['KAIVB'].value
1600 msr
= self
.namespace
['MSR'].value
1601 log("TRAP:", hex(trap_addr
), hex(msr
), "kaivb", hex(kaivb
))
1602 # store CIA(+4?) in SRR0, set NIA to 0x700
1603 # store MSR in SRR1, set MSR to um errr something, have to check spec
1604 # store SVSTATE (if enabled) in SVSRR0
1605 self
.spr
['SRR0'].value
= self
.pc
.CIA
.value
1606 self
.spr
['SRR1'].value
= msr
1607 if self
.is_svp64_mode
:
1608 self
.spr
['SVSRR0'] = self
.namespace
['SVSTATE'].value
1609 self
.trap_nia
= SelectableInt(trap_addr |
(kaivb
& ~
0x1fff), 64)
1610 if trap_bit
is not None:
1611 self
.spr
['SRR1'][trap_bit
] = 1 # change *copy* of MSR in SRR1
1613 # set exception bits. TODO: this should, based on the address
1614 # in figure 66 p1065 V3.0B and the table figure 65 p1063 set these
1615 # bits appropriately. however it turns out that *for now* in all
1616 # cases (all trap_addrs) the exact same thing is needed.
1617 self
.msr
[MSRb
.IR
] = 0
1618 self
.msr
[MSRb
.DR
] = 0
1619 self
.msr
[MSRb
.FE0
] = 0
1620 self
.msr
[MSRb
.FE1
] = 0
1621 self
.msr
[MSRb
.EE
] = 0
1622 self
.msr
[MSRb
.RI
] = 0
1623 self
.msr
[MSRb
.SF
] = 1
1624 self
.msr
[MSRb
.TM
] = 0
1625 self
.msr
[MSRb
.VEC
] = 0
1626 self
.msr
[MSRb
.VSX
] = 0
1627 self
.msr
[MSRb
.PR
] = 0
1628 self
.msr
[MSRb
.FP
] = 0
1629 self
.msr
[MSRb
.PMM
] = 0
1630 self
.msr
[MSRb
.TEs
] = 0
1631 self
.msr
[MSRb
.TEe
] = 0
1632 self
.msr
[MSRb
.UND
] = 0
1633 self
.msr
[MSRb
.LE
] = 1
1635 def memassign(self
, ea
, sz
, val
):
1636 self
.mem
.memassign(ea
, sz
, val
)
1638 def prep_namespace(self
, insn_name
, info
, xlen
):
1639 # TODO: get field names from form in decoder*1* (not decoder2)
1640 # decoder2 is hand-created, and decoder1.sigform is auto-generated
1642 # then "yield" fields only from op_fields rather than hard-coded
1644 formname
, op_fields
= info
.form
, info
.op_fields
1645 fields
= self
.decoder
.sigforms
[formname
]
1646 log("prep_namespace", formname
, op_fields
, insn_name
)
1647 for name
in op_fields
:
1648 # CR immediates. deal with separately. needs modifying
1650 crlen5
= ['BC', 'BA', 'BB', 'BT', 'BI'] # 5-bit
1651 crlen3
= ['BF', 'BFA'] # 3-bit (BF: bit-field)
1652 if self
.is_svp64_mode
and name
in crlen5
:
1653 # 5-bit, must reconstruct the value
1655 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, name
)
1657 regnum
, is_vec
= yield from get_cr_in(self
.dec2
, name
)
1658 sig
= getattr(fields
, name
)
1660 # low 2 LSBs (CR field selector) remain same, CR num extended
1661 assert regnum
<= 7, "sigh, TODO, 128 CR fields"
1662 val
= (val
& 0b11) |
(regnum
<< 2)
1663 elif self
.is_svp64_mode
and name
in crlen3
:
1665 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, name
)
1667 regnum
, is_vec
= yield from get_cr_in(self
.dec2
, name
)
1668 log('hack %s' % name
, regnum
, is_vec
)
1671 sig
= getattr(fields
, name
)
1673 # these are all opcode fields involved in index-selection of CR,
1674 # and need to do "standard" arithmetic. CR[BA+32] for example
1675 # would, if using SelectableInt, only be 5-bit.
1676 if name
not in crlen3
and name
not in crlen5
:
1677 val
= SelectableInt(val
, sig
.width
)
1679 # finally put the field into the namespace
1680 self
.namespace
[name
] = val
1682 self
.namespace
['XER'] = self
.spr
['XER']
1683 self
.namespace
['CA'] = self
.spr
['XER'][XER_bits
['CA']].value
1684 self
.namespace
['CA32'] = self
.spr
['XER'][XER_bits
['CA32']].value
1685 self
.namespace
['OV'] = self
.spr
['XER'][XER_bits
['OV']].value
1686 self
.namespace
['OV32'] = self
.spr
['XER'][XER_bits
['OV32']].value
1687 self
.namespace
['XLEN'] = xlen
1688 self
.namespace
['RESERVE'] = self
.reserve
1689 self
.namespace
['RESERVE_ADDR'] = self
.reserve_addr
1690 self
.namespace
['RESERVE_LENGTH'] = self
.reserve_length
1692 # add some SVSTATE convenience variables
1693 vl
= self
.svstate
.vl
1694 srcstep
= self
.svstate
.srcstep
1695 self
.namespace
['VL'] = vl
1696 self
.namespace
['srcstep'] = srcstep
1698 # take a copy of the CR field value: if non-VLi fail-first fails
1699 # this is because the pseudocode writes *directly* to CR. sigh
1700 self
.cr_backup
= self
.cr
.value
1702 # sv.bc* need some extra fields
1703 if not self
.is_svp64_mode
or not insn_name
.startswith("sv.bc"):
1706 # blegh grab bits manually
1707 mode
= yield self
.dec2
.rm_dec
.rm_in
.mode
1708 # convert to SelectableInt before test
1709 mode
= SelectableInt(mode
, 5)
1710 bc_vlset
= mode
[SVP64MODEb
.BC_VLSET
] != 0
1711 bc_vli
= mode
[SVP64MODEb
.BC_VLI
] != 0
1712 bc_snz
= mode
[SVP64MODEb
.BC_SNZ
] != 0
1713 bc_vsb
= yield self
.dec2
.rm_dec
.bc_vsb
1714 bc_ctrtest
= yield self
.dec2
.rm_dec
.bc_ctrtest
1715 bc_lru
= yield self
.dec2
.rm_dec
.bc_lru
1716 bc_gate
= yield self
.dec2
.rm_dec
.bc_gate
1717 sz
= yield self
.dec2
.rm_dec
.pred_sz
1718 self
.namespace
['mode'] = SelectableInt(mode
, 5)
1719 self
.namespace
['ALL'] = SelectableInt(bc_gate
, 1)
1720 self
.namespace
['VSb'] = SelectableInt(bc_vsb
, 1)
1721 self
.namespace
['LRu'] = SelectableInt(bc_lru
, 1)
1722 self
.namespace
['CTRtest'] = SelectableInt(bc_ctrtest
, 1)
1723 self
.namespace
['VLSET'] = SelectableInt(bc_vlset
, 1)
1724 self
.namespace
['VLI'] = SelectableInt(bc_vli
, 1)
1725 self
.namespace
['sz'] = SelectableInt(sz
, 1)
1726 self
.namespace
['SNZ'] = SelectableInt(bc_snz
, 1)
1728 def get_kludged_op_add_ca_ov(self
, inputs
, inp_ca_ov
):
1729 """ this was not at all necessary to do. this function massively
1730 duplicates - in a laborious and complex fashion - the contents of
1731 the CSV files that were extracted two years ago from microwatt's
1732 source code. A-inversion is the "inv A" column, output inversion
1733 is the "inv out" column, carry-in equal to 0 or 1 or CA is the
1736 all of that information is available in
1737 self.instrs[ins_name].op_fields
1738 where info is usually assigned to self.instrs[ins_name]
1740 https://git.libre-soc.org/?p=openpower-isa.git;a=blob;f=openpower/isatables/minor_31.csv;hb=HEAD
1742 the immediate constants are *also* decoded correctly and placed
1743 usually by DecodeIn2Imm into operand2, as part of power_decoder2.py
1745 def ca(a
, b
, ca_in
, width
):
1746 mask
= (1 << width
) - 1
1747 y
= (a
& mask
) + (b
& mask
) + ca_in
1750 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1751 insn
= insns
.get(asmcode
)
1752 SI
= yield self
.dec2
.dec
.SI
1755 inputs
= [i
.value
for i
in inputs
]
1758 if insn
in ("add", "addo", "addc", "addco"):
1762 elif insn
== "addic" or insn
== "addic.":
1766 elif insn
in ("subf", "subfo", "subfc", "subfco"):
1770 elif insn
== "subfic":
1774 elif insn
== "adde" or insn
== "addeo":
1778 elif insn
== "subfe" or insn
== "subfeo":
1782 elif insn
== "addme" or insn
== "addmeo":
1786 elif insn
== "addze" or insn
== "addzeo":
1790 elif insn
== "subfme" or insn
== "subfmeo":
1794 elif insn
== "subfze" or insn
== "subfzeo":
1798 elif insn
== "addex":
1799 # CA[32] aren't actually written, just generate so we have
1800 # something to return
1801 ca64
= ov64
= ca(inputs
[0], inputs
[1], OV
, 64)
1802 ca32
= ov32
= ca(inputs
[0], inputs
[1], OV
, 32)
1803 return ca64
, ca32
, ov64
, ov32
1804 elif insn
== "neg" or insn
== "nego":
1809 raise NotImplementedError(
1810 "op_add kludge unimplemented instruction: ", asmcode
, insn
)
1812 ca64
= ca(a
, b
, ca_in
, 64)
1813 ca32
= ca(a
, b
, ca_in
, 32)
1814 ov64
= ca64
!= ca(a
, b
, ca_in
, 63)
1815 ov32
= ca32
!= ca(a
, b
, ca_in
, 31)
1816 return ca64
, ca32
, ov64
, ov32
1818 def handle_carry_(self
, inputs
, output
, ca
, ca32
, inp_ca_ov
):
1819 if ca
is not None and ca32
is not None:
1821 op
= yield self
.dec2
.e
.do
.insn_type
1822 if op
== MicrOp
.OP_ADD
.value
and ca
is None and ca32
is None:
1823 retval
= yield from self
.get_kludged_op_add_ca_ov(
1825 ca
, ca32
, ov
, ov32
= retval
1826 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1827 if insns
.get(asmcode
) == 'addex':
1828 # TODO: if 32-bit mode, set ov to ov32
1829 self
.spr
['XER'][XER_bits
['OV']] = ov
1830 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1831 log(f
"write OV/OV32 OV={ov} OV32={ov32}",
1832 kind
=LogType
.InstrInOuts
)
1834 # TODO: if 32-bit mode, set ca to ca32
1835 self
.spr
['XER'][XER_bits
['CA']] = ca
1836 self
.spr
['XER'][XER_bits
['CA32']] = ca32
1837 log(f
"write CA/CA32 CA={ca} CA32={ca32}",
1838 kind
=LogType
.InstrInOuts
)
1840 inv_a
= yield self
.dec2
.e
.do
.invert_in
1842 inputs
[0] = ~inputs
[0]
1844 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1846 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1847 inputs
.append(SelectableInt(imm
, 64))
1850 log("gt input", x
, output
)
1851 gt
= (gtu(x
, output
))
1854 cy
= 1 if any(gts
) else 0
1856 if ca
is None: # already written
1857 self
.spr
['XER'][XER_bits
['CA']] = cy
1860 # ARGH... different for OP_ADD... *sigh*...
1861 op
= yield self
.dec2
.e
.do
.insn_type
1862 if op
== MicrOp
.OP_ADD
.value
:
1863 res32
= (output
.value
& (1 << 32)) != 0
1864 a32
= (inputs
[0].value
& (1 << 32)) != 0
1865 if len(inputs
) >= 2:
1866 b32
= (inputs
[1].value
& (1 << 32)) != 0
1869 cy32
= res32 ^ a32 ^ b32
1870 log("CA32 ADD", cy32
)
1874 log("input", x
, output
)
1875 log(" x[32:64]", x
, x
[32:64])
1876 log(" o[32:64]", output
, output
[32:64])
1877 gt
= (gtu(x
[32:64], output
[32:64])) == SelectableInt(1, 1)
1879 cy32
= 1 if any(gts
) else 0
1880 log("CA32", cy32
, gts
)
1881 if ca32
is None: # already written
1882 self
.spr
['XER'][XER_bits
['CA32']] = cy32
1884 def handle_overflow(self
, inputs
, output
, div_overflow
, inp_ca_ov
):
1885 op
= yield self
.dec2
.e
.do
.insn_type
1886 if op
== MicrOp
.OP_ADD
.value
:
1887 retval
= yield from self
.get_kludged_op_add_ca_ov(
1889 ca
, ca32
, ov
, ov32
= retval
1890 # TODO: if 32-bit mode, set ov to ov32
1891 self
.spr
['XER'][XER_bits
['OV']] = ov
1892 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1893 self
.spr
['XER'][XER_bits
['SO']] |
= ov
1895 if hasattr(self
.dec2
.e
.do
, "invert_in"):
1896 inv_a
= yield self
.dec2
.e
.do
.invert_in
1898 inputs
[0] = ~inputs
[0]
1900 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1902 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1903 inputs
.append(SelectableInt(imm
, 64))
1904 log("handle_overflow", inputs
, output
, div_overflow
)
1905 if len(inputs
) < 2 and div_overflow
is None:
1908 # div overflow is different: it's returned by the pseudo-code
1909 # because it's more complex than can be done by analysing the output
1910 if div_overflow
is not None:
1911 ov
, ov32
= div_overflow
, div_overflow
1912 # arithmetic overflow can be done by analysing the input and output
1913 elif len(inputs
) >= 2:
1915 input_sgn
= [exts(x
.value
, x
.bits
) < 0 for x
in inputs
]
1916 output_sgn
= exts(output
.value
, output
.bits
) < 0
1917 ov
= 1 if input_sgn
[0] == input_sgn
[1] and \
1918 output_sgn
!= input_sgn
[0] else 0
1921 input32_sgn
= [exts(x
.value
, 32) < 0 for x
in inputs
]
1922 output32_sgn
= exts(output
.value
, 32) < 0
1923 ov32
= 1 if input32_sgn
[0] == input32_sgn
[1] and \
1924 output32_sgn
!= input32_sgn
[0] else 0
1926 # now update XER OV/OV32/SO
1927 so
= self
.spr
['XER'][XER_bits
['SO']]
1928 new_so
= so | ov
# sticky overflow ORs in old with new
1929 self
.spr
['XER'][XER_bits
['OV']] = ov
1930 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1931 self
.spr
['XER'][XER_bits
['SO']] = new_so
1932 log(" set overflow", ov
, ov32
, so
, new_so
)
1934 def handle_comparison(self
, out
, cr_idx
=0, overflow
=None, no_so
=False):
1935 assert isinstance(out
, SelectableInt
), \
1936 "out zero not a SelectableInt %s" % repr(outputs
)
1937 log("handle_comparison", out
.bits
, hex(out
.value
))
1938 # TODO - XXX *processor* in 32-bit mode
1939 # https://bugs.libre-soc.org/show_bug.cgi?id=424
1941 # o32 = exts(out.value, 32)
1942 # print ("handle_comparison exts 32 bit", hex(o32))
1943 out
= exts(out
.value
, out
.bits
)
1944 log("handle_comparison exts", hex(out
))
1945 # create the three main CR flags, EQ GT LT
1946 zero
= SelectableInt(out
== 0, 1)
1947 positive
= SelectableInt(out
> 0, 1)
1948 negative
= SelectableInt(out
< 0, 1)
1949 # get (or not) XER.SO. for setvl this is important *not* to read SO
1951 SO
= SelectableInt(1, 0)
1953 SO
= self
.spr
['XER'][XER_bits
['SO']]
1954 log("handle_comparison SO", SO
.value
,
1955 "overflow", overflow
,
1957 "+ve", positive
.value
,
1958 "-ve", negative
.value
)
1959 # alternative overflow checking (setvl mainly at the moment)
1960 if overflow
is not None and overflow
== 1:
1961 SO
= SelectableInt(1, 1)
1962 # create the four CR field values and set the required CR field
1963 cr_field
= selectconcat(negative
, positive
, zero
, SO
)
1964 log("handle_comparison cr_field", self
.cr
, cr_idx
, cr_field
)
1965 self
.crl
[cr_idx
].eq(cr_field
)
1968 def set_pc(self
, pc_val
):
1969 self
.namespace
['NIA'] = SelectableInt(pc_val
, 64)
1970 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1972 def get_next_insn(self
):
1973 """check instruction
1976 pc
= self
.pc
.CIA
.value
1979 ins
= self
.imem
.ld(pc
, 4, False, True, instr_fetch
=True)
1981 raise KeyError("no instruction at 0x%x" % pc
)
1984 def setup_one(self
):
1985 """set up one instruction
1987 pc
, insn
= self
.get_next_insn()
1988 yield from self
.setup_next_insn(pc
, insn
)
1990 # cache since it's really slow to construct
1991 __PREFIX_CACHE
= SVP64Instruction
.Prefix(SelectableInt(value
=0, bits
=32))
1993 def __decode_prefix(self
, opcode
):
1994 pfx
= self
.__PREFIX
_CACHE
1995 pfx
.storage
.eq(opcode
)
1998 def setup_next_insn(self
, pc
, ins
):
1999 """set up next instruction
2002 log("setup: 0x%x 0x%x %s" % (pc
, ins
& 0xffffffff, bin(ins
)))
2003 log("CIA NIA", self
.respect_pc
, self
.pc
.CIA
.value
, self
.pc
.NIA
.value
)
2005 yield self
.dec2
.sv_rm
.eq(0)
2006 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff)
2007 yield self
.dec2
.dec
.bigendian
.eq(self
.bigendian
)
2008 yield self
.dec2
.state
.msr
.eq(self
.msr
.value
)
2009 yield self
.dec2
.state
.pc
.eq(pc
)
2010 if self
.svstate
is not None:
2011 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2013 # SVP64. first, check if the opcode is EXT001, and SVP64 id bits set
2015 opcode
= yield self
.dec2
.dec
.opcode_in
2016 opcode
= SelectableInt(value
=opcode
, bits
=32)
2017 pfx
= self
.__decode
_prefix
(opcode
)
2018 log("prefix test: opcode:", pfx
.PO
, bin(pfx
.PO
), pfx
.id)
2019 self
.is_svp64_mode
= bool((pfx
.PO
== 0b000001) and (pfx
.id == 0b11))
2020 self
.pc
.update_nia(self
.is_svp64_mode
)
2022 yield self
.dec2
.is_svp64_mode
.eq(self
.is_svp64_mode
)
2023 self
.namespace
['NIA'] = self
.pc
.NIA
2024 self
.namespace
['SVSTATE'] = self
.svstate
2025 if not self
.is_svp64_mode
:
2028 # in SVP64 mode. decode/print out svp64 prefix, get v3.0B instruction
2029 log("svp64.rm", bin(pfx
.rm
))
2030 log(" svstate.vl", self
.svstate
.vl
)
2031 log(" svstate.mvl", self
.svstate
.maxvl
)
2032 ins
= self
.imem
.ld(pc
+4, 4, False, True, instr_fetch
=True)
2033 log(" svsetup: 0x%x 0x%x %s" % (pc
+4, ins
& 0xffffffff, bin(ins
)))
2034 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff) # v3.0B suffix
2035 yield self
.dec2
.sv_rm
.eq(int(pfx
.rm
)) # svp64 prefix
2038 def execute_one(self
):
2039 """execute one instruction
2041 # get the disassembly code for this instruction
2042 if not self
.disassembly
:
2043 code
= yield from self
.get_assembly_name()
2046 if self
.is_svp64_mode
:
2047 offs
, dbg
= 4, "svp64 "
2048 code
= self
.disassembly
[self
._pc
+offs
]
2049 log(" %s sim-execute" % dbg
, hex(self
._pc
), code
)
2050 opname
= code
.split(' ')[0]
2052 yield from self
.call(opname
) # execute the instruction
2053 except MemException
as e
: # check for memory errors
2054 if e
.args
[0] == 'unaligned': # alignment error
2055 # run a Trap but set DAR first
2056 print("memory unaligned exception, DAR", e
.dar
, repr(e
))
2057 self
.spr
['DAR'] = SelectableInt(e
.dar
, 64)
2058 self
.call_trap(0x600, PIb
.PRIV
) # 0x600, privileged
2060 elif e
.args
[0] == 'invalid': # invalid
2061 # run a Trap but set DAR first
2062 log("RADIX MMU memory invalid error, mode %s" % e
.mode
)
2063 if e
.mode
== 'EXECUTE':
2064 # XXX TODO: must set a few bits in SRR1,
2065 # see microwatt loadstore1.vhdl
2066 # if m_in.segerr = '0' then
2067 # v.srr1(47 - 33) := m_in.invalid;
2068 # v.srr1(47 - 35) := m_in.perm_error; -- noexec fault
2069 # v.srr1(47 - 44) := m_in.badtree;
2070 # v.srr1(47 - 45) := m_in.rc_error;
2071 # v.intr_vec := 16#400#;
2073 # v.intr_vec := 16#480#;
2074 self
.call_trap(0x400, PIb
.PRIV
) # 0x400, privileged
2076 self
.call_trap(0x300, PIb
.PRIV
) # 0x300, privileged
2078 # not supported yet:
2079 raise e
# ... re-raise
2081 # append to the trace log file
2082 self
.trace(" # %s\n" % code
)
2084 log("gprs after code", code
)
2087 for i
in range(len(self
.crl
)):
2088 crs
.append(bin(self
.crl
[i
].asint()))
2089 log("crs", " ".join(crs
))
2090 log("vl,maxvl", self
.svstate
.vl
, self
.svstate
.maxvl
)
2092 # don't use this except in special circumstances
2093 if not self
.respect_pc
:
2096 log("execute one, CIA NIA", hex(self
.pc
.CIA
.value
),
2097 hex(self
.pc
.NIA
.value
))
2099 def get_assembly_name(self
):
2100 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
2101 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
2102 dec_insn
= yield self
.dec2
.e
.do
.insn
2103 insn_1_11
= yield self
.dec2
.e
.do
.insn
[1:11]
2104 asmcode
= yield self
.dec2
.dec
.op
.asmcode
2105 int_op
= yield self
.dec2
.dec
.op
.internal_op
2106 log("get assembly name asmcode", asmcode
, int_op
,
2107 hex(dec_insn
), bin(insn_1_11
))
2108 asmop
= insns
.get(asmcode
, None)
2110 # sigh reconstruct the assembly instruction name
2111 if hasattr(self
.dec2
.e
.do
, "oe"):
2112 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
2113 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
2117 if hasattr(self
.dec2
.e
.do
, "rc"):
2118 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
2119 rc_ok
= yield self
.dec2
.e
.do
.rc
.ok
2123 # annoying: ignore rc_ok if RC1 is set (for creating *assembly name*)
2124 RC1
= yield self
.dec2
.rm_dec
.RC1
2128 # grrrr have to special-case MUL op (see DecodeOE)
2129 log("ov %d en %d rc %d en %d op %d" %
2130 (ov_ok
, ov_en
, rc_ok
, rc_en
, int_op
))
2131 if int_op
in [MicrOp
.OP_MUL_H64
.value
, MicrOp
.OP_MUL_H32
.value
]:
2136 if not asmop
.endswith("."): # don't add "." to "andis."
2139 if hasattr(self
.dec2
.e
.do
, "lk"):
2140 lk
= yield self
.dec2
.e
.do
.lk
2143 log("int_op", int_op
)
2144 if int_op
in [MicrOp
.OP_B
.value
, MicrOp
.OP_BC
.value
]:
2145 AA
= yield self
.dec2
.dec
.fields
.FormI
.AA
[0:-1]
2149 spr_msb
= yield from self
.get_spr_msb()
2150 if int_op
== MicrOp
.OP_MFCR
.value
:
2155 # XXX TODO: for whatever weird reason this doesn't work
2156 # https://bugs.libre-soc.org/show_bug.cgi?id=390
2157 if int_op
== MicrOp
.OP_MTCRF
.value
:
2164 def reset_remaps(self
):
2165 self
.remap_loopends
= [0] * 4
2166 self
.remap_idxs
= [0, 1, 2, 3]
2168 def get_remap_indices(self
):
2169 """WARNING, this function stores remap_idxs and remap_loopends
2170 in the class for later use. this to avoid problems with yield
2172 # go through all iterators in lock-step, advance to next remap_idx
2173 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
2174 # get four SVSHAPEs. here we are hard-coding
2176 SVSHAPE0
= self
.spr
['SVSHAPE0']
2177 SVSHAPE1
= self
.spr
['SVSHAPE1']
2178 SVSHAPE2
= self
.spr
['SVSHAPE2']
2179 SVSHAPE3
= self
.spr
['SVSHAPE3']
2180 # set up the iterators
2181 remaps
= [(SVSHAPE0
, SVSHAPE0
.get_iterator()),
2182 (SVSHAPE1
, SVSHAPE1
.get_iterator()),
2183 (SVSHAPE2
, SVSHAPE2
.get_iterator()),
2184 (SVSHAPE3
, SVSHAPE3
.get_iterator()),
2188 for i
, (shape
, remap
) in enumerate(remaps
):
2189 # zero is "disabled"
2190 if shape
.value
== 0x0:
2191 self
.remap_idxs
[i
] = 0
2192 # pick src or dststep depending on reg num (0-2=in, 3-4=out)
2193 step
= dststep
if (i
in [3, 4]) else srcstep
2194 # this is terrible. O(N^2) looking for the match. but hey.
2195 for idx
, (remap_idx
, loopends
) in enumerate(remap
):
2198 self
.remap_idxs
[i
] = remap_idx
2199 self
.remap_loopends
[i
] = loopends
2200 dbg
.append((i
, step
, remap_idx
, loopends
))
2201 for (i
, step
, remap_idx
, loopends
) in dbg
:
2202 log("SVSHAPE %d idx, end" % i
, step
, remap_idx
, bin(loopends
))
2205 def get_spr_msb(self
):
2206 dec_insn
= yield self
.dec2
.e
.do
.insn
2207 return dec_insn
& (1 << 20) != 0 # sigh - XFF.spr[-1]?
2209 def call(self
, name
, syscall_emu_active
=False):
2210 """call(opcode) - the primary execution point for instructions
2212 self
.last_st_addr
= None # reset the last known store address
2213 self
.last_ld_addr
= None # etc.
2215 ins_name
= name
.strip() # remove spaces if not already done so
2217 log("halted - not executing", ins_name
)
2220 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
2221 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
2222 asmop
= yield from self
.get_assembly_name()
2223 log("call", ins_name
, asmop
,
2224 kind
=LogType
.InstrInOuts
)
2226 # sv.setvl is *not* a loop-function. sigh
2227 log("is_svp64_mode", self
.is_svp64_mode
, asmop
)
2230 int_op
= yield self
.dec2
.dec
.op
.internal_op
2231 spr_msb
= yield from self
.get_spr_msb()
2233 instr_is_privileged
= False
2234 if int_op
in [MicrOp
.OP_ATTN
.value
,
2235 MicrOp
.OP_MFMSR
.value
,
2236 MicrOp
.OP_MTMSR
.value
,
2237 MicrOp
.OP_MTMSRD
.value
,
2239 MicrOp
.OP_RFID
.value
]:
2240 instr_is_privileged
= True
2241 if int_op
in [MicrOp
.OP_MFSPR
.value
,
2242 MicrOp
.OP_MTSPR
.value
] and spr_msb
:
2243 instr_is_privileged
= True
2245 # check MSR priv bit and whether op is privileged: if so, throw trap
2246 PR
= self
.msr
[MSRb
.PR
]
2247 log("is priv", instr_is_privileged
, hex(self
.msr
.value
), PR
)
2248 if instr_is_privileged
and PR
== 1:
2249 self
.call_trap(0x700, PIb
.PRIV
)
2252 # check halted condition
2253 if ins_name
== 'attn':
2257 # User mode system call emulation consists of several steps:
2258 # 1. Detect whether instruction is sc or scv.
2259 # 2. Call the HDL implementation which invokes trap.
2260 # 3. Reroute the guest system call to host system call.
2261 # 4. Force return from the interrupt as if we had guest OS.
2262 # FIXME: enable PPC_FEATURE2_SCV in mem.py DEFAULT_AT_HWCAP2 when
2263 # scv emulation works.
2264 if ((asmop
in ("sc", "scv")) and
2265 (self
.syscall
is not None) and
2266 not syscall_emu_active
):
2267 # Memoize PC and trigger an interrupt
2269 pc
= self
.pc
.CIA
.value
2272 yield from self
.call(asmop
, syscall_emu_active
=True)
2274 # Reroute the syscall to host OS
2275 identifier
= self
.gpr(0)
2276 arguments
= map(self
.gpr
, range(3, 9))
2277 result
= self
.syscall(identifier
, *arguments
)
2278 self
.gpr
.write(3, result
, False, self
.namespace
["XLEN"])
2280 # Return from interrupt
2281 yield from self
.call("rfid", syscall_emu_active
=True)
2283 elif ((name
in ("rfid", "hrfid")) and syscall_emu_active
):
2286 # check illegal instruction
2288 if ins_name
not in ['mtcrf', 'mtocrf']:
2289 illegal
= ins_name
!= asmop
2291 # list of instructions not being supported by binutils (.long)
2292 dotstrp
= asmop
[:-1] if asmop
[-1] == '.' else asmop
2293 if dotstrp
in [*FPTRANS_INSNS
,
2295 'ffmadds', 'fdmadds', 'ffadds',
2297 "brh", "brw", "brd",
2298 'setvl', 'svindex', 'svremap', 'svstep',
2299 'svshape', 'svshape2',
2300 'binlog', 'crbinlog', 'crfbinlog',
2301 'crternlogi', 'crfternlogi', 'ternlogi',
2302 'bmask', 'cprop', 'gbbd',
2303 'absdu', 'absds', 'absdacs', 'absdacu', 'avgadd',
2304 'fmvis', 'fishmv', 'pcdec', "maddedu", "divmod2du",
2305 "dsld", "dsrd", "maddedus",
2306 "sadd", "saddw", "sadduw",
2311 "maddsubrs", "maddrs", "msubrs",
2312 "cfuged", "cntlzdm", "cnttzdm", "pdepd", "pextd",
2313 "setbc", "setbcr", "setnbc", "setnbcr",
2318 # match against instructions treated as nop, see nop below
2319 if asmop
.startswith("dcbt"):
2323 # branch-conditional redirects to sv.bc
2324 if asmop
.startswith('bc') and self
.is_svp64_mode
:
2325 ins_name
= 'sv.%s' % ins_name
2327 # ld-immediate-with-pi mode redirects to ld-with-postinc
2328 ldst_imm_postinc
= False
2329 if 'u' in ins_name
and self
.is_svp64_mode
:
2330 ldst_pi
= yield self
.dec2
.rm_dec
.ldst_postinc
2332 ins_name
= ins_name
.replace("u", "up")
2333 ldst_imm_postinc
= True
2334 log(" enable ld/st postinc", ins_name
)
2336 log(" post-processed name", dotstrp
, ins_name
, asmop
)
2338 # illegal instructions call TRAP at 0x700
2340 print("illegal", ins_name
, asmop
)
2341 self
.call_trap(0x700, PIb
.ILLEG
)
2342 print("name %s != %s - calling ILLEGAL trap, PC: %x" %
2343 (ins_name
, asmop
, self
.pc
.CIA
.value
))
2346 # this is for setvl "Vertical" mode: if set true,
2347 # srcstep/dststep is explicitly advanced. mode says which SVSTATE to
2348 # test for Rc=1 end condition. 3 bits of all 3 loops are put into CR0
2349 self
.allow_next_step_inc
= False
2350 self
.svstate_next_mode
= 0
2352 # nop has to be supported, we could let the actual op calculate
2353 # but PowerDecoder has a pattern for nop
2354 if ins_name
== 'nop':
2355 self
.update_pc_next()
2358 # get elwidths, defaults to 64
2362 if self
.is_svp64_mode
:
2363 ew_src
= yield self
.dec2
.rm_dec
.ew_src
2364 ew_dst
= yield self
.dec2
.rm_dec
.ew_dst
2365 ew_src
= 8 << (3-int(ew_src
)) # convert to bitlength
2366 ew_dst
= 8 << (3-int(ew_dst
)) # convert to bitlength
2367 xlen
= max(ew_src
, ew_dst
)
2368 log("elwidth", ew_src
, ew_dst
)
2369 log("XLEN:", self
.is_svp64_mode
, xlen
)
2371 # look up instruction in ISA.instrs, prepare namespace
2372 if ins_name
== 'pcdec': # grrrr yes there are others ("stbcx." etc.)
2373 info
= self
.instrs
[ins_name
+"."]
2374 elif asmop
[-1] == '.' and asmop
in self
.instrs
:
2375 info
= self
.instrs
[asmop
]
2377 info
= self
.instrs
[ins_name
]
2378 yield from self
.prep_namespace(ins_name
, info
, xlen
)
2380 # dict retains order
2381 inputs
= dict.fromkeys(create_full_args(
2382 read_regs
=info
.read_regs
, special_regs
=info
.special_regs
,
2383 uninit_regs
=info
.uninit_regs
, write_regs
=info
.write_regs
))
2385 # preserve order of register names
2386 write_without_special_regs
= OrderedSet(info
.write_regs
)
2387 write_without_special_regs
-= OrderedSet(info
.special_regs
)
2388 input_names
= create_args([
2389 *info
.read_regs
, *info
.uninit_regs
, *write_without_special_regs
])
2390 log("input names", input_names
)
2392 # get SVP64 entry for the current instruction
2393 sv_rm
= self
.svp64rm
.instrs
.get(ins_name
)
2394 if sv_rm
is not None:
2395 dest_cr
, src_cr
, src_byname
, dest_byname
= decode_extra(sv_rm
)
2397 dest_cr
, src_cr
, src_byname
, dest_byname
= False, False, {}, {}
2398 log("sv rm", sv_rm
, dest_cr
, src_cr
, src_byname
, dest_byname
)
2400 # see if srcstep/dststep need skipping over masked-out predicate bits
2401 # svstep also needs advancement because it calls SVSTATE_NEXT.
2402 # bit the remaps get computed just after pre_inc moves them on
2403 # with remap_set_steps substituting for PowerDecider2 not doing it,
2404 # and SVSTATE_NEXT not being able to.use yield, the preinc on
2405 # svstep is necessary for now.
2407 if (self
.is_svp64_mode
or ins_name
in ['svstep']):
2408 yield from self
.svstate_pre_inc()
2409 if self
.is_svp64_mode
:
2410 pre
= yield from self
.update_new_svstate_steps()
2412 self
.svp64_reset_loop()
2414 self
.update_pc_next()
2416 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
2417 pred_dst_zero
= self
.pred_dst_zero
2418 pred_src_zero
= self
.pred_src_zero
2419 vl
= self
.svstate
.vl
2420 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2422 # VL=0 in SVP64 mode means "do nothing: skip instruction"
2423 if self
.is_svp64_mode
and vl
== 0:
2424 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2425 log("SVP64: VL=0, end of call", self
.namespace
['CIA'],
2426 self
.namespace
['NIA'], kind
=LogType
.InstrInOuts
)
2429 # for when SVREMAP is active, using pre-arranged schedule.
2430 # note: modifying PowerDecoder2 needs to "settle"
2431 remap_en
= self
.svstate
.SVme
2432 persist
= self
.svstate
.RMpst
2433 active
= (persist
or self
.last_op_svshape
) and remap_en
!= 0
2434 if self
.is_svp64_mode
:
2435 yield self
.dec2
.remap_active
.eq(remap_en
if active
else 0)
2437 if persist
or self
.last_op_svshape
:
2438 remaps
= self
.get_remap_indices()
2439 if self
.is_svp64_mode
and (persist
or self
.last_op_svshape
):
2440 yield from self
.remap_set_steps(remaps
)
2441 # after that, settle down (combinatorial) to let Vector reg numbers
2442 # work themselves out
2444 if self
.is_svp64_mode
:
2445 remap_active
= yield self
.dec2
.remap_active
2447 remap_active
= False
2448 log("remap active", bin(remap_active
), self
.is_svp64_mode
)
2450 # LDST does *not* allow elwidth overrides on RA (Effective Address).
2451 # this has to be detected. XXX TODO: RB for ldst-idx *may* need
2452 # conversion (to 64-bit) also.
2453 # see write reg this *HAS* to also override XLEN to 64 on LDST/Update
2454 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2455 is_ldst
= (sv_mode
in [SVMode
.LDST_IDX
.value
, SVMode
.LDST_IMM
.value
] \
2456 and self
.is_svp64_mode
)
2457 log("is_ldst", sv_mode
, is_ldst
)
2459 # main input registers (RT, RA ...)
2460 for name
in input_names
:
2461 if name
== "overflow":
2462 inputs
[name
] = SelectableInt(0, 1)
2463 elif name
.startswith("RESERVE"):
2464 inputs
[name
] = getattr(self
, name
)
2465 elif name
== "FPSCR":
2466 inputs
[name
] = self
.FPSCR
2467 elif name
in ("CA", "CA32", "OV", "OV32"):
2468 inputs
[name
] = self
.spr
['XER'][XER_bits
[name
]]
2470 inputs
[name
] = self
.crl
[0]
2471 elif name
in spr_byname
:
2472 inputs
[name
] = self
.spr
[name
]
2473 elif is_ldst
and name
== 'RA':
2474 regval
= (yield from self
.get_input(name
, ew_src
, 64))
2475 log("EA (RA) regval name", name
, regval
)
2476 inputs
[name
] = regval
2478 regval
= (yield from self
.get_input(name
, ew_src
, xlen
))
2479 log("regval name", name
, regval
)
2480 inputs
[name
] = regval
2482 # arrrrgh, awful hack, to get _RT into namespace
2483 if ins_name
in ['setvl', 'svstep']:
2485 RT
= yield self
.dec2
.dec
.RT
2486 self
.namespace
[regname
] = SelectableInt(RT
, 5)
2488 self
.namespace
["RT"] = SelectableInt(0, 5)
2489 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, "RT")
2490 log('hack input reg %s %s' % (name
, str(regnum
)), is_vec
)
2492 # in SVP64 mode for LD/ST work out immediate
2493 # XXX TODO: replace_ds for DS-Form rather than D-Form.
2494 # use info.form to detect
2495 if self
.is_svp64_mode
and not ldst_imm_postinc
:
2496 yield from self
.check_replace_d(info
, remap_active
)
2498 # "special" registers
2499 for special
in info
.special_regs
:
2500 if special
in special_sprs
:
2501 inputs
[special
] = self
.spr
[special
]
2503 inputs
[special
] = self
.namespace
[special
]
2505 # clear trap (trap) NIA
2506 self
.trap_nia
= None
2508 # check if this was an sv.bc* and create an indicator that
2509 # this is the last check to be made as a loop. combined with
2510 # the ALL/ANY mode we can early-exit. note that BI (to test)
2511 # is an input so there is no termination if BI is scalar
2512 # (because early-termination is for *output* scalars)
2513 if self
.is_svp64_mode
and ins_name
.startswith("sv.bc"):
2514 end_loop
= srcstep
== vl
-1 or dststep
== vl
-1
2515 self
.namespace
['end_loop'] = SelectableInt(end_loop
, 1)
2517 inp_ca_ov
= (self
.spr
['XER'][XER_bits
['CA']].value
,
2518 self
.spr
['XER'][XER_bits
['OV']].value
)
2520 for k
, v
in inputs
.items():
2522 v
= SelectableInt(0, self
.XLEN
)
2523 # prevent pseudo-code from modifying input registers
2524 v
= copy_assign_rhs(v
)
2525 if isinstance(v
, SelectableInt
):
2529 # execute actual instruction here (finally)
2530 log("inputs", inputs
)
2531 inputs
= list(inputs
.values())
2532 results
= info
.func(self
, *inputs
)
2533 output_names
= create_args(info
.write_regs
)
2535 # record .ok before anything after the pseudo-code can modify it
2537 for out
, n
in zip(results
or [], output_names
):
2540 if isinstance(out
, SelectableInt
):
2542 log("results", outs
)
2543 log("results ok", outs_ok
)
2545 # "inject" decorator takes namespace from function locals: we need to
2546 # overwrite NIA being overwritten (sigh)
2547 if self
.trap_nia
is not None:
2548 self
.namespace
['NIA'] = self
.trap_nia
2550 log("after func", self
.namespace
['CIA'], self
.namespace
['NIA'])
2552 # check if op was a LD/ST so that debugging can check the
2554 if int_op
in [MicrOp
.OP_STORE
.value
,
2556 self
.last_st_addr
= self
.mem
.last_st_addr
2557 if int_op
in [MicrOp
.OP_LOAD
.value
,
2559 self
.last_ld_addr
= self
.mem
.last_ld_addr
2560 log("op", int_op
, MicrOp
.OP_STORE
.value
, MicrOp
.OP_LOAD
.value
,
2561 self
.last_st_addr
, self
.last_ld_addr
)
2563 # detect if CA/CA32 already in outputs (sra*, basically)
2565 ca32
= outs
.get("CA32")
2567 log("carry already done?", ca
, ca32
, output_names
)
2568 # soc test_pipe_caller tests don't have output_carry
2569 has_output_carry
= hasattr(self
.dec2
.e
.do
, "output_carry")
2570 carry_en
= has_output_carry
and (yield self
.dec2
.e
.do
.output_carry
)
2572 yield from self
.handle_carry_(
2573 inputs
, results
[0], ca
, ca32
, inp_ca_ov
=inp_ca_ov
)
2575 # get output named "overflow" and "CR0"
2576 overflow
= outs
.get('overflow')
2577 cr0
= outs
.get('CR0')
2578 cr1
= outs
.get('CR1')
2580 # soc test_pipe_caller tests don't have oe
2581 has_oe
= hasattr(self
.dec2
.e
.do
, "oe")
2582 # yeah just no. not in parallel processing
2583 if has_oe
and not self
.is_svp64_mode
:
2584 # detect if overflow was in return result
2585 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
2586 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
2587 log("internal overflow", ins_name
, overflow
, "en?", ov_en
, ov_ok
)
2589 yield from self
.handle_overflow(
2590 inputs
, results
[0], overflow
, inp_ca_ov
=inp_ca_ov
)
2592 # only do SVP64 dest predicated Rc=1 if dest-pred is not enabled
2594 if not self
.is_svp64_mode
or not pred_dst_zero
:
2595 if hasattr(self
.dec2
.e
.do
, "rc"):
2596 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
2597 # don't do Rc=1 for svstep it is handled explicitly.
2598 # XXX TODO: now that CR0 is supported, sort out svstep's pseudocode
2599 # to write directly to CR0 instead of in ISACaller. hooyahh.
2600 if rc_en
and ins_name
not in ['svstep']:
2601 if outs_ok
.get('FPSCR', False):
2602 FPSCR
= outs
['FPSCR']
2605 yield from self
.do_rc_ov(
2606 ins_name
, results
[0], overflow
, cr0
, cr1
, FPSCR
)
2609 ffirst_hit
= False, False
2610 if self
.is_svp64_mode
:
2611 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2612 is_cr
= sv_mode
== SVMode
.CROP
.value
2613 chk
= rc_en
or is_cr
2614 if outs_ok
.get('CR', False):
2615 # early write so check_ffirst can see value
2616 self
.namespace
['CR'].eq(outs
['CR'])
2617 ffirst_hit
= (yield from self
.check_ffirst(info
, chk
, srcstep
))
2619 # any modified return results?
2620 yield from self
.do_outregs(
2621 info
, outs
, carry_en
, ffirst_hit
, ew_dst
, outs_ok
)
2623 # check if a FP Exception occurred. TODO for DD-FFirst, check VLi
2624 # and raise the exception *after* if VLi=1 but if VLi=0 then
2625 # truncate and make the exception "disappear".
2626 if self
.FPSCR
.FEX
and (self
.msr
[MSRb
.FE0
] or self
.msr
[MSRb
.FE1
]):
2627 self
.call_trap(0x700, PIb
.FP
)
2630 yield from self
.do_nia(asmop
, ins_name
, rc_en
, ffirst_hit
)
2632 def check_ffirst(self
, info
, rc_en
, srcstep
):
2633 """fail-first mode: checks a bit of Rc Vector, truncates VL
2635 rm_mode
= yield self
.dec2
.rm_dec
.mode
2636 ff_inv
= yield self
.dec2
.rm_dec
.inv
2637 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2638 RC1
= yield self
.dec2
.rm_dec
.RC1
2639 vli_
= yield self
.dec2
.rm_dec
.vli
# VL inclusive if truncated
2640 log(" ff rm_mode", rc_en
, rm_mode
, SVP64RMMode
.FFIRST
.value
)
2644 log(" cr_bit", cr_bit
)
2645 log(" rc_en", rc_en
)
2646 ffirst
= yield from is_ffirst_mode(self
.dec2
)
2647 if not rc_en
or not ffirst
:
2649 # get the CR vevtor, do BO-test
2651 log("asmregs", info
.asmregs
[0], info
.write_regs
)
2652 if 'CR' in info
.write_regs
and 'BF' in info
.asmregs
[0]:
2654 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, crf
)
2655 crtest
= self
.crl
[regnum
]
2656 ffirst_hit
= crtest
[cr_bit
] != ff_inv
2657 log("cr test", crf
, regnum
, int(crtest
), crtest
, cr_bit
, ff_inv
)
2658 log("cr test?", ffirst_hit
)
2661 # Fail-first activated, truncate VL
2662 vli
= SelectableInt(int(vli_
), 7)
2663 self
.svstate
.vl
= srcstep
+ vli
2664 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2665 yield Settle() # let decoder update
2668 def do_rc_ov(self
, ins_name
, result
, overflow
, cr0
, cr1
, FPSCR
):
2669 cr_out
= yield self
.dec2
.op
.cr_out
2670 if cr_out
== CROutSel
.CR1
.value
:
2674 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, rc_reg
)
2675 # hang on... for `setvl` actually you want to test SVSTATE.VL
2676 is_setvl
= ins_name
in ('svstep', 'setvl')
2678 result
= SelectableInt(result
.vl
, 64)
2680 # overflow = None # do not override overflow except in setvl
2684 cr1
= int(FPSCR
.FX
) << 3
2685 cr1 |
= int(FPSCR
.FEX
) << 2
2686 cr1 |
= int(FPSCR
.VX
) << 1
2687 cr1 |
= int(FPSCR
.OX
)
2688 log("default fp cr1", cr1
)
2690 log("explicit cr1", cr1
)
2691 self
.crl
[regnum
].eq(cr1
)
2693 # if there was not an explicit CR0 in the pseudocode,
2695 c
= self
.handle_comparison(result
, regnum
, overflow
, no_so
=is_setvl
)
2696 log("implicit cr0 %d" % regnum
, c
)
2698 # otherwise we just blat CR0 into the required regnum
2699 log("explicit cr0 %d" % regnum
, cr0
)
2700 self
.crl
[regnum
].eq(cr0
)
2702 def do_outregs(self
, info
, outs
, ca_en
, ffirst_hit
, ew_dst
, outs_ok
):
2703 ffirst_hit
, vli
= ffirst_hit
2704 # write out any regs for this instruction, but only if fail-first is ok
2705 # XXX TODO: allow CR-vector to be written out even if ffirst fails
2706 if not ffirst_hit
or vli
:
2707 for name
, output
in outs
.items():
2708 if not outs_ok
[name
]:
2709 log("skipping writing output with .ok=False", name
, output
)
2711 yield from self
.check_write(info
, name
, output
, ca_en
, ew_dst
)
2712 # restore the CR value on non-VLI failfirst (from sv.cmp and others
2713 # which write directly to CR in the pseudocode (gah, what a mess)
2714 # if ffirst_hit and not vli:
2715 # self.cr.value = self.cr_backup
2717 def do_nia(self
, asmop
, ins_name
, rc_en
, ffirst_hit
):
2718 ffirst_hit
, vli
= ffirst_hit
2720 self
.svp64_reset_loop()
2723 # check advancement of src/dst/sub-steps and if PC needs updating
2724 nia_update
= (yield from self
.check_step_increment(
2725 rc_en
, asmop
, ins_name
))
2727 self
.update_pc_next()
2729 def check_replace_d(self
, info
, remap_active
):
2730 replace_d
= False # update / replace constant in pseudocode
2731 ldstmode
= yield self
.dec2
.rm_dec
.ldstmode
2732 vl
= self
.svstate
.vl
2733 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2734 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2735 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2736 if info
.form
== 'DS':
2737 # DS-Form, multiply by 4 then knock 2 bits off after
2738 imm
= yield self
.dec2
.dec
.fields
.FormDS
.DS
[0:14] * 4
2740 imm
= yield self
.dec2
.dec
.fields
.FormD
.D
[0:16]
2741 imm
= exts(imm
, 16) # sign-extend to integer
2742 # get the right step. LD is from srcstep, ST is dststep
2743 op
= yield self
.dec2
.e
.do
.insn_type
2745 if op
== MicrOp
.OP_LOAD
.value
:
2747 offsmul
= yield self
.dec2
.in1_step
2748 log("D-field REMAP src", imm
, offsmul
, ldstmode
)
2750 offsmul
= (srcstep
* (subvl
+1)) + ssubstep
2751 log("D-field src", imm
, offsmul
, ldstmode
)
2752 elif op
== MicrOp
.OP_STORE
.value
:
2753 # XXX NOTE! no bit-reversed STORE! this should not ever be used
2754 offsmul
= (dststep
* (subvl
+1)) + dsubstep
2755 log("D-field dst", imm
, offsmul
, ldstmode
)
2756 # Unit-Strided LD/ST adds offset*width to immediate
2757 if ldstmode
== SVP64LDSTmode
.UNITSTRIDE
.value
:
2758 ldst_len
= yield self
.dec2
.e
.do
.data_len
2759 imm
= SelectableInt(imm
+ offsmul
* ldst_len
, 32)
2761 # Element-strided multiplies the immediate by element step
2762 elif ldstmode
== SVP64LDSTmode
.ELSTRIDE
.value
:
2763 imm
= SelectableInt(imm
* offsmul
, 32)
2766 ldst_ra_vec
= yield self
.dec2
.rm_dec
.ldst_ra_vec
2767 ldst_imz_in
= yield self
.dec2
.rm_dec
.ldst_imz_in
2768 log("LDSTmode", SVP64LDSTmode(ldstmode
),
2769 offsmul
, imm
, ldst_ra_vec
, ldst_imz_in
)
2770 # new replacement D... errr.. DS
2772 if info
.form
== 'DS':
2773 # TODO: assert 2 LSBs are zero?
2774 log("DS-Form, TODO, assert 2 LSBs zero?", bin(imm
.value
))
2775 imm
.value
= imm
.value
>> 2
2776 self
.namespace
['DS'] = imm
2778 self
.namespace
['D'] = imm
2780 def get_input(self
, name
, ew_src
, xlen
):
2781 # using PowerDecoder2, first, find the decoder index.
2782 # (mapping name RA RB RC RS to in1, in2, in3)
2783 regnum
, is_vec
= yield from get_idx_in(self
.dec2
, name
, True)
2785 # doing this is not part of svp64, it's because output
2786 # registers, to be modified, need to be in the namespace.
2787 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2789 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2791 if isinstance(regnum
, tuple):
2792 (regnum
, base
, offs
) = regnum
2794 base
, offs
= regnum
, 0 # temporary HACK
2796 # in case getting the register number is needed, _RA, _RB
2797 # (HACK: only in straight non-svp64-mode for now, or elwidth == 64)
2798 regname
= "_" + name
2799 if not self
.is_svp64_mode
or ew_src
== 64:
2800 self
.namespace
[regname
] = regnum
2802 # FIXME: we're trying to access a sub-register, plain register
2803 # numbers don't work for that. for now, just pass something that
2804 # can be compared to 0 and probably will cause an error if misused.
2805 # see https://bugs.libre-soc.org/show_bug.cgi?id=1221
2806 self
.namespace
[regname
] = regnum
* 10000
2808 if not self
.is_svp64_mode
or not self
.pred_src_zero
:
2809 log('reading reg %s %s' % (name
, str(regnum
)), is_vec
)
2811 fval
= self
.fpr(base
, is_vec
, offs
, ew_src
)
2812 reg_val
= SelectableInt(fval
)
2813 assert ew_src
== self
.XLEN
, "TODO fix elwidth conversion"
2814 self
.trace("r:FPR:%d:%d:%d " % (base
, offs
, ew_src
))
2815 log("read fp reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
),
2816 kind
=LogType
.InstrInOuts
)
2817 elif name
is not None:
2818 gval
= self
.gpr(base
, is_vec
, offs
, ew_src
)
2819 reg_val
= SelectableInt(gval
.value
, bits
=xlen
)
2820 self
.trace("r:GPR:%d:%d:%d " % (base
, offs
, ew_src
))
2821 log("read int reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
),
2822 kind
=LogType
.InstrInOuts
)
2824 log('zero input reg %s %s' % (name
, str(regnum
)), is_vec
)
2825 reg_val
= SelectableInt(0, ew_src
)
2828 def remap_set_steps(self
, remaps
):
2829 """remap_set_steps sets up the in1/2/3 and out1/2 steps.
2830 they work in concert with PowerDecoder2 at the moment,
2831 there is no HDL implementation of REMAP. therefore this
2832 function, because ISACaller still uses PowerDecoder2,
2833 will *explicitly* write the dec2.XX_step values. this has
2836 # just some convenient debug info
2838 sname
= 'SVSHAPE%d' % i
2839 shape
= self
.spr
[sname
]
2840 log(sname
, bin(shape
.value
))
2841 log(" lims", shape
.lims
)
2842 log(" mode", shape
.mode
)
2843 log(" skip", shape
.skip
)
2845 # set up the list of steps to remap
2846 mi0
= self
.svstate
.mi0
2847 mi1
= self
.svstate
.mi1
2848 mi2
= self
.svstate
.mi2
2849 mo0
= self
.svstate
.mo0
2850 mo1
= self
.svstate
.mo1
2851 steps
= [[self
.dec2
.in1_step
, mi0
], # RA
2852 [self
.dec2
.in2_step
, mi1
], # RB
2853 [self
.dec2
.in3_step
, mi2
], # RC
2854 [self
.dec2
.o_step
, mo0
], # RT
2855 [self
.dec2
.o2_step
, mo1
], # EA
2858 rnames
= ['RA', 'RB', 'RC', 'RT', 'RS']
2859 for i
, reg
in enumerate(rnames
):
2860 idx
= yield from get_idx_map(self
.dec2
, reg
)
2862 idx
= yield from get_idx_map(self
.dec2
, "F"+reg
)
2864 steps
[i
][0] = self
.dec2
.in1_step
2866 steps
[i
][0] = self
.dec2
.in2_step
2868 steps
[i
][0] = self
.dec2
.in3_step
2869 log("remap step", i
, reg
, idx
, steps
[i
][1])
2870 remap_idxs
= self
.remap_idxs
2872 # now cross-index the required SHAPE for each of 3-in 2-out regs
2873 rnames
= ['RA', 'RB', 'RC', 'RT', 'EA']
2874 for i
, (dstep
, shape_idx
) in enumerate(steps
):
2875 (shape
, remap
) = remaps
[shape_idx
]
2876 remap_idx
= remap_idxs
[shape_idx
]
2877 # zero is "disabled"
2878 if shape
.value
== 0x0:
2880 # now set the actual requested step to the current index
2881 if dstep
is not None:
2882 yield dstep
.eq(remap_idx
)
2884 # debug printout info
2885 rremaps
.append((shape
.mode
, hex(shape
.value
), dstep
,
2886 i
, rnames
[i
], shape_idx
, remap_idx
))
2888 log("shape remap", x
)
2890 def check_write(self
, info
, name
, output
, carry_en
, ew_dst
):
2891 if name
== 'overflow': # ignore, done already (above)
2893 if name
== 'CR0': # ignore, done already (above)
2895 if isinstance(output
, int):
2896 output
= SelectableInt(output
, EFFECTIVELY_UNLIMITED
)
2898 if name
.startswith("RESERVE"):
2899 log("write %s 0x%x" % (name
, output
.value
))
2900 getattr(self
, name
).eq(output
)
2902 if name
in ['FPSCR', ]:
2903 log("write FPSCR 0x%x" % (output
.value
))
2904 self
.FPSCR
.eq(output
)
2907 if name
in ['CA', 'CA32']:
2909 log("writing %s to XER" % name
, output
)
2910 log("write XER %s 0x%x" % (name
, output
.value
))
2911 self
.spr
['XER'][XER_bits
[name
]] = output
.value
2913 log("NOT writing %s to XER" % name
, output
)
2915 # write special SPRs
2916 if name
in info
.special_regs
:
2917 log('writing special %s' % name
, output
, special_sprs
)
2918 log("write reg %s 0x%x" % (name
, output
.value
),
2919 kind
=LogType
.InstrInOuts
)
2920 if name
in special_sprs
:
2921 self
.spr
[name
] = output
2923 self
.namespace
[name
].eq(output
)
2925 log('msr written', hex(self
.msr
.value
))
2927 # find out1/out2 PR/FPR
2928 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2930 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2932 # temporary hack for not having 2nd output
2933 regnum
= yield getattr(self
.decoder
, name
)
2935 # convenient debug prefix
2940 # check zeroing due to predicate bit being zero
2941 if self
.is_svp64_mode
and self
.pred_dst_zero
:
2942 log('zeroing reg %s %s' % (str(regnum
), str(output
)), is_vec
)
2943 output
= SelectableInt(0, EFFECTIVELY_UNLIMITED
)
2944 log("write reg %s%s 0x%x ew %d" % (reg_prefix
, str(regnum
),
2945 output
.value
, ew_dst
),
2946 kind
=LogType
.InstrInOuts
)
2947 # zero-extend tov64 bit begore storing (should use EXT oh well)
2948 if output
.bits
> 64:
2949 output
= SelectableInt(output
.value
, 64)
2950 rnum
, base
, offset
= regnum
2952 self
.fpr
.write(regnum
, output
, is_vec
, ew_dst
)
2953 self
.trace("w:FPR:%d:%d:%d " % (rnum
, offset
, ew_dst
))
2956 # LDST/Update does *not* allow elwidths on RA (Effective Address).
2957 # this has to be detected, and overridden. see get_input (related)
2958 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2959 is_ldst
= (sv_mode
in [SVMode
.LDST_IDX
.value
, SVMode
.LDST_IMM
.value
] \
2960 and self
.is_svp64_mode
)
2961 if is_ldst
and name
in ['EA', 'RA']:
2962 op
= self
.dec2
.dec
.op
2963 if hasattr(op
, "upd"):
2964 # update mode LD/ST uses read-reg A also as an output
2966 log("write is_ldst is_update", sv_mode
, is_ldst
, upd
)
2967 if upd
== LDSTMode
.update
.value
:
2968 ew_dst
= 64 # override for RA (EA) to 64-bit
2970 self
.gpr
.write(regnum
, output
, is_vec
, ew_dst
)
2971 self
.trace("w:GPR:%d:%d:%d " % (rnum
, offset
, ew_dst
))
2973 def check_step_increment(self
, rc_en
, asmop
, ins_name
):
2974 # check if it is the SVSTATE.src/dest step that needs incrementing
2975 # this is our Sub-Program-Counter loop from 0 to VL-1
2976 if not self
.allow_next_step_inc
:
2977 if self
.is_svp64_mode
:
2978 return (yield from self
.svstate_post_inc(ins_name
))
2980 # XXX only in non-SVP64 mode!
2981 # record state of whether the current operation was an svshape,
2983 # to be able to know if it should apply in the next instruction.
2984 # also (if going to use this instruction) should disable ability
2985 # to interrupt in between. sigh.
2986 self
.last_op_svshape
= asmop
in ['svremap', 'svindex',
2993 log("SVSTATE_NEXT: inc requested, mode",
2994 self
.svstate_next_mode
, self
.allow_next_step_inc
)
2995 yield from self
.svstate_pre_inc()
2996 pre
= yield from self
.update_new_svstate_steps()
2998 # reset at end of loop including exit Vertical Mode
2999 log("SVSTATE_NEXT: end of loop, reset")
3000 self
.svp64_reset_loop()
3001 self
.svstate
.vfirst
= 0
3005 self
.handle_comparison(SelectableInt(0, 64)) # CR0
3007 if self
.allow_next_step_inc
== 2:
3008 log("SVSTATE_NEXT: read")
3009 nia_update
= (yield from self
.svstate_post_inc(ins_name
))
3011 log("SVSTATE_NEXT: post-inc")
3012 # use actual (cached) src/dst-step here to check end
3013 remaps
= self
.get_remap_indices()
3014 remap_idxs
= self
.remap_idxs
3015 vl
= self
.svstate
.vl
3016 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
3017 if self
.allow_next_step_inc
!= 2:
3018 yield from self
.advance_svstate_steps()
3019 #self.namespace['SVSTATE'] = self.svstate.spr
3020 # set CR0 (if Rc=1) based on end
3021 endtest
= 1 if self
.at_loopend() else 0
3023 #results = [SelectableInt(endtest, 64)]
3024 # self.handle_comparison(results) # CR0
3026 # see if svstep was requested, if so, which SVSTATE
3028 if self
.svstate_next_mode
> 0:
3029 shape_idx
= self
.svstate_next_mode
.value
-1
3030 endings
= self
.remap_loopends
[shape_idx
]
3031 cr_field
= SelectableInt((~endings
) << 1 | endtest
, 4)
3032 log("svstep Rc=1, CR0", cr_field
, endtest
)
3033 self
.crl
[0].eq(cr_field
) # CR0
3035 # reset at end of loop including exit Vertical Mode
3036 log("SVSTATE_NEXT: after increments, reset")
3037 self
.svp64_reset_loop()
3038 self
.svstate
.vfirst
= 0
3041 def SVSTATE_NEXT(self
, mode
, submode
, RA
=None):
3042 """explicitly moves srcstep/dststep on to next element, for
3043 "Vertical-First" mode. this function is called from
3044 setvl pseudo-code, as a pseudo-op "svstep"
3046 WARNING: this function uses information that was created EARLIER
3047 due to it being in the middle of a yield, but this function is
3048 *NOT* called from yield (it's called from compiled pseudocode).
3050 self
.allow_next_step_inc
= submode
.value
+ 1
3051 log("SVSTATE_NEXT mode", mode
, submode
, self
.allow_next_step_inc
)
3052 self
.svstate_next_mode
= mode
3053 if self
.svstate_next_mode
> 0 and self
.svstate_next_mode
< 5:
3054 shape_idx
= self
.svstate_next_mode
.value
-1
3055 return SelectableInt(self
.remap_idxs
[shape_idx
], 7)
3056 if self
.svstate_next_mode
== 5:
3057 self
.svstate_next_mode
= 0
3058 return SelectableInt(self
.svstate
.srcstep
, 7)
3059 if self
.svstate_next_mode
== 6:
3060 self
.svstate_next_mode
= 0
3061 return SelectableInt(self
.svstate
.dststep
, 7)
3062 if self
.svstate_next_mode
== 7:
3063 self
.svstate_next_mode
= 0
3064 return SelectableInt(self
.svstate
.ssubstep
, 7)
3065 if self
.svstate_next_mode
== 8:
3066 self
.svstate_next_mode
= 0
3067 return SelectableInt(self
.svstate
.dsubstep
, 7)
3068 return SelectableInt(0, 7)
3070 def get_src_dststeps(self
):
3071 """gets srcstep, dststep, and ssubstep, dsubstep
3073 return (self
.new_srcstep
, self
.new_dststep
,
3074 self
.new_ssubstep
, self
.new_dsubstep
)
3076 def update_svstate_namespace(self
, overwrite_svstate
=True):
3077 if overwrite_svstate
:
3078 # note, do not get the bit-reversed srcstep here!
3079 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
3080 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
3082 # update SVSTATE with new srcstep
3083 self
.svstate
.srcstep
= srcstep
3084 self
.svstate
.dststep
= dststep
3085 self
.svstate
.ssubstep
= ssubstep
3086 self
.svstate
.dsubstep
= dsubstep
3087 self
.namespace
['SVSTATE'] = self
.svstate
3088 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
3089 yield Settle() # let decoder update
3091 def update_new_svstate_steps(self
, overwrite_svstate
=True):
3092 yield from self
.update_svstate_namespace(overwrite_svstate
)
3093 srcstep
= self
.svstate
.srcstep
3094 dststep
= self
.svstate
.dststep
3095 ssubstep
= self
.svstate
.ssubstep
3096 dsubstep
= self
.svstate
.dsubstep
3097 pack
= self
.svstate
.pack
3098 unpack
= self
.svstate
.unpack
3099 vl
= self
.svstate
.vl
3100 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
3101 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
3102 rm_mode
= yield self
.dec2
.rm_dec
.mode
3103 ff_inv
= yield self
.dec2
.rm_dec
.inv
3104 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
3105 log(" srcstep", srcstep
)
3106 log(" dststep", dststep
)
3108 log(" unpack", unpack
)
3109 log(" ssubstep", ssubstep
)
3110 log(" dsubstep", dsubstep
)
3112 log(" subvl", subvl
)
3113 log(" rm_mode", rm_mode
)
3114 log(" sv_mode", sv_mode
)
3116 log(" cr_bit", cr_bit
)
3118 # check if end reached (we let srcstep overrun, above)
3119 # nothing needs doing (TODO zeroing): just do next instruction
3122 return ((ssubstep
== subvl
and srcstep
== vl
) or
3123 (dsubstep
== subvl
and dststep
== vl
))
3125 def svstate_post_inc(self
, insn_name
, vf
=0):
3126 # check if SV "Vertical First" mode is enabled
3127 vfirst
= self
.svstate
.vfirst
3128 log(" SV Vertical First", vf
, vfirst
)
3129 if not vf
and vfirst
== 1:
3130 # SV Branch-Conditional required to be as-if-vector
3131 # because there *is* no destination register
3132 # (SV normally only terminates on 1st scalar reg written
3133 # except in [slightly-misnamed] mapreduce mode)
3134 ffirst
= yield from is_ffirst_mode(self
.dec2
)
3135 if insn_name
.startswith("sv.bc") or ffirst
:
3136 self
.update_pc_next()
3141 # check if it is the SVSTATE.src/dest step that needs incrementing
3142 # this is our Sub-Program-Counter loop from 0 to VL-1
3143 # XXX twin predication TODO
3144 vl
= self
.svstate
.vl
3145 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
3146 mvl
= self
.svstate
.maxvl
3147 srcstep
= self
.svstate
.srcstep
3148 dststep
= self
.svstate
.dststep
3149 ssubstep
= self
.svstate
.ssubstep
3150 dsubstep
= self
.svstate
.dsubstep
3151 pack
= self
.svstate
.pack
3152 unpack
= self
.svstate
.unpack
3153 rm_mode
= yield self
.dec2
.rm_dec
.mode
3154 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
3155 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
3156 out_vec
= not (yield self
.dec2
.no_out_vec
)
3157 in_vec
= not (yield self
.dec2
.no_in_vec
)
3158 rm_mode
= yield self
.dec2
.rm_dec
.mode
3159 log(" svstate.vl", vl
)
3160 log(" svstate.mvl", mvl
)
3161 log(" rm.subvl", subvl
)
3162 log(" svstate.srcstep", srcstep
)
3163 log(" svstate.dststep", dststep
)
3164 log(" svstate.ssubstep", ssubstep
)
3165 log(" svstate.dsubstep", dsubstep
)
3166 log(" svstate.pack", pack
)
3167 log(" svstate.unpack", unpack
)
3168 log(" mode", rm_mode
)
3169 log(" reverse", reverse_gear
)
3170 log(" out_vec", out_vec
)
3171 log(" in_vec", in_vec
)
3172 log(" sv_ptype", sv_ptype
, sv_ptype
== SVPType
.P2
.value
)
3173 log(" rm_mode", rm_mode
)
3174 # check if this was an sv.bc* and if so did it succeed
3175 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
3176 end_loop
= self
.namespace
['end_loop']
3177 log("branch %s end_loop" % insn_name
, end_loop
)
3179 self
.svp64_reset_loop()
3180 self
.update_pc_next()
3182 # check if srcstep needs incrementing by one, stop PC advancing
3183 # but for 2-pred both src/dest have to be checked.
3184 # XXX this might not be true! it may just be LD/ST
3185 if sv_ptype
== SVPType
.P2
.value
:
3186 svp64_is_vector
= (out_vec
or in_vec
)
3188 svp64_is_vector
= out_vec
3189 # also if data-dependent fail-first is used, only in_vec is tested,
3190 # allowing *scalar destinations* to be used as an accumulator.
3191 # effectively this implies /mr (mapreduce mode) is 100% on with ddffirst
3192 # see https://bugs.libre-soc.org/show_bug.cgi?id=1183#c16
3193 ffirst
= yield from is_ffirst_mode(self
.dec2
)
3195 svp64_is_vector
= in_vec
3197 # loops end at the first "hit" (source or dest)
3198 yield from self
.advance_svstate_steps()
3199 loopend
= self
.loopend
3200 log("loopend", svp64_is_vector
, loopend
)
3201 if not svp64_is_vector
or loopend
:
3202 # reset loop to zero and update NIA
3203 self
.svp64_reset_loop()
3208 # still looping, advance and update NIA
3209 self
.namespace
['SVSTATE'] = self
.svstate
3211 # not an SVP64 branch, so fix PC (NIA==CIA) for next loop
3212 # (by default, NIA is CIA+4 if v3.0B or CIA+8 if SVP64)
3213 # this way we keep repeating the same instruction (with new steps)
3214 self
.pc
.NIA
.eq(self
.pc
.CIA
)
3215 self
.namespace
['NIA'] = self
.pc
.NIA
3216 log("end of sub-pc call", self
.namespace
['CIA'], self
.namespace
['NIA'])
3217 return False # DO NOT allow PC update whilst Sub-PC loop running
3219 def update_pc_next(self
):
3220 # UPDATE program counter
3221 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
3222 #self.svstate.spr = self.namespace['SVSTATE']
3223 log("end of call", self
.namespace
['CIA'],
3224 self
.namespace
['NIA'],
3225 self
.namespace
['SVSTATE'])
3227 def svp64_reset_loop(self
):
3228 self
.svstate
.srcstep
= 0
3229 self
.svstate
.dststep
= 0
3230 self
.svstate
.ssubstep
= 0
3231 self
.svstate
.dsubstep
= 0
3232 self
.loopend
= False
3233 log(" svstate.srcstep loop end (PC to update)")
3234 self
.namespace
['SVSTATE'] = self
.svstate
3236 def update_nia(self
):
3237 self
.pc
.update_nia(self
.is_svp64_mode
)
3238 self
.namespace
['NIA'] = self
.pc
.NIA
3242 """Decorator factory.
3244 this decorator will "inject" variables into the function's namespace,
3245 from the *dictionary* in self.namespace. it therefore becomes possible
3246 to make it look like a whole stack of variables which would otherwise
3247 need "self." inserted in front of them (*and* for those variables to be
3248 added to the instance) "appear" in the function.
3250 "self.namespace['SI']" for example becomes accessible as just "SI" but
3251 *only* inside the function, when decorated.
3253 def variable_injector(func
):
3255 def decorator(*args
, **kwargs
):
3257 func_globals
= func
.__globals
__ # Python 2.6+
3258 except AttributeError:
3259 func_globals
= func
.func_globals
# Earlier versions.
3261 context
= args
[0].namespace
# variables to be injected
3262 saved_values
= func_globals
.copy() # Shallow copy of dict.
3263 log("globals before", context
.keys())
3264 func_globals
.update(context
)
3265 result
= func(*args
, **kwargs
)
3266 log("globals after", func_globals
['CIA'], func_globals
['NIA'])
3267 log("args[0]", args
[0].namespace
['CIA'],
3268 args
[0].namespace
['NIA'],
3269 args
[0].namespace
['SVSTATE'])
3270 if 'end_loop' in func_globals
:
3271 log("args[0] end_loop", func_globals
['end_loop'])
3272 args
[0].namespace
= func_globals
3273 #exec (func.__code__, func_globals)
3276 # func_globals = saved_values # Undo changes.
3282 return variable_injector