1 # SPDX-License-Identifier: LGPLv3+
2 # Copyright (C) 2020, 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
3 # Copyright (C) 2020 Michael Nolan
4 # Funded by NLnet http://nlnet.nl
5 """core of the python-based POWER9 simulator
7 this is part of a cycle-accurate POWER9 simulator. its primary purpose is
8 not speed, it is for both learning and educational purposes, as well as
9 a method of verifying the HDL.
13 * https://bugs.libre-soc.org/show_bug.cgi?id=424
16 from collections
import namedtuple
17 from copy
import deepcopy
18 from functools
import wraps
22 from openpower
.syscalls
import ppc_flags
24 from elftools
.elf
.elffile
import ELFFile
# for isinstance
26 from nmigen
.sim
import Settle
27 import openpower
.syscalls
28 from openpower
.consts
import (MSRb
, PIb
, # big-endian (PowerISA versions)
29 SVP64CROffs
, SVP64MODEb
)
30 from openpower
.decoder
.helpers
import (ISACallerHelper
, ISAFPHelpers
, exts
,
31 gtu
, undefined
, copy_assign_rhs
)
32 from openpower
.decoder
.isa
.mem
import Mem
, MemMMap
, MemException
, LoadedELF
33 from openpower
.decoder
.isa
.radixmmu
import RADIX
34 from openpower
.decoder
.isa
.svshape
import SVSHAPE
35 from openpower
.decoder
.isa
.svstate
import SVP64State
36 from openpower
.decoder
.orderedset
import OrderedSet
37 from openpower
.decoder
.power_enums
import (FPTRANS_INSNS
, CRInSel
, CROutSel
,
38 In1Sel
, In2Sel
, In3Sel
, LDSTMode
,
39 MicrOp
, OutSel
, SVMode
,
40 SVP64LDSTmode
, SVP64PredCR
,
41 SVP64PredInt
, SVP64PredMode
,
42 SVP64RMMode
, SVPType
, XER_bits
,
43 insns
, spr_byname
, spr_dict
,
45 from openpower
.insndb
.core
import SVP64Instruction
46 from openpower
.decoder
.power_svp64
import SVP64RM
, decode_extra
47 from openpower
.decoder
.selectable_int
import (FieldSelectableInt
,
48 SelectableInt
, selectconcat
,
49 EFFECTIVELY_UNLIMITED
)
50 from openpower
.consts
import DEFAULT_MSR
51 from openpower
.fpscr
import FPSCRState
52 from openpower
.xer
import XERState
53 from openpower
.util
import LogType
, log
55 LDST_UPDATE_INSNS
= ['ldu', 'lwzu', 'lbzu', 'lhzu', 'lhau', 'lfsu', 'lfdu',
56 'stwu', 'stbu', 'sthu', 'stfsu', 'stfdu', 'stdu',
60 instruction_info
= namedtuple('instruction_info',
61 'func read_regs uninit_regs write_regs ' +
62 'special_regs op_fields form asmregs')
72 # rrright. this is here basically because the compiler pywriter returns
73 # results in a specific priority order. to make sure regs match up they
74 # need partial sorting. sigh.
76 # TODO (lkcl): adjust other registers that should be in a particular order
77 # probably CA, CA32, and CR
105 "overflow": 7, # should definitely be last
109 fregs
= ['FRA', 'FRB', 'FRC', 'FRS', 'FRT']
112 def get_masked_reg(regs
, base
, offs
, ew_bits
):
113 # rrrright. start by breaking down into row/col, based on elwidth
114 gpr_offs
= offs
// (64 // ew_bits
)
115 gpr_col
= offs
% (64 // ew_bits
)
116 # compute the mask based on ew_bits
117 mask
= (1 << ew_bits
) - 1
118 # now select the 64-bit register, but get its value (easier)
119 val
= regs
[base
+ gpr_offs
]
120 # shift down so element we want is at LSB
121 val
>>= gpr_col
* ew_bits
122 # mask so we only return the LSB element
126 def set_masked_reg(regs
, base
, offs
, ew_bits
, value
):
127 # rrrright. start by breaking down into row/col, based on elwidth
128 gpr_offs
= offs
// (64//ew_bits
)
129 gpr_col
= offs
% (64//ew_bits
)
130 # compute the mask based on ew_bits
131 mask
= (1 << ew_bits
)-1
132 # now select the 64-bit register, but get its value (easier)
133 val
= regs
[base
+gpr_offs
]
134 # now mask out the bit we don't want
135 val
= val
& ~
(mask
<< (gpr_col
*ew_bits
))
136 # then wipe the bit we don't want from the value
138 # OR the new value in, shifted up
139 val |
= value
<< (gpr_col
*ew_bits
)
140 regs
[base
+gpr_offs
] = val
143 def create_args(reglist
, extra
=None):
144 retval
= list(OrderedSet(reglist
))
145 retval
.sort(key
=lambda reg
: REG_SORT_ORDER
.get(reg
, 0))
146 if extra
is not None:
147 return [extra
] + retval
151 def create_full_args(*, read_regs
, special_regs
, uninit_regs
, write_regs
,
154 *read_regs
, *uninit_regs
, *write_regs
, *special_regs
], extra
=extra
)
157 def is_ffirst_mode(dec2
):
158 rm_mode
= yield dec2
.rm_dec
.mode
159 return rm_mode
== SVP64RMMode
.FFIRST
.value
163 def __init__(self
, decoder
, isacaller
, svstate
, regfile
):
166 self
.isacaller
= isacaller
167 self
.svstate
= svstate
168 for i
in range(len(regfile
)):
169 self
[i
] = SelectableInt(regfile
[i
], 64)
171 def __call__(self
, ridx
, is_vec
=False, offs
=0, elwidth
=64):
172 if isinstance(ridx
, SelectableInt
):
175 return self
[ridx
+offs
]
176 # rrrright. start by breaking down into row/col, based on elwidth
177 gpr_offs
= offs
// (64//elwidth
)
178 gpr_col
= offs
% (64//elwidth
)
179 # now select the 64-bit register, but get its value (easier)
180 val
= self
[ridx
+gpr_offs
].value
181 # now shift down and mask out
182 val
= val
>> (gpr_col
*elwidth
) & ((1 << elwidth
)-1)
183 # finally, return a SelectableInt at the required elwidth
184 log("GPR call", ridx
, "isvec", is_vec
, "offs", offs
,
185 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
))
186 return SelectableInt(val
, elwidth
)
188 def set_form(self
, form
):
191 def write(self
, rnum
, value
, is_vec
=False, elwidth
=64):
193 if isinstance(rnum
, SelectableInt
):
195 if isinstance(value
, SelectableInt
):
198 if isinstance(rnum
, tuple):
199 rnum
, base
, offs
= rnum
202 # rrrright. start by breaking down into row/col, based on elwidth
203 gpr_offs
= offs
// (64//elwidth
)
204 gpr_col
= offs
% (64//elwidth
)
205 # compute the mask based on elwidth
206 mask
= (1 << elwidth
)-1
207 # now select the 64-bit register, but get its value (easier)
208 val
= self
[base
+gpr_offs
].value
209 # now mask out the bit we don't want
210 val
= val
& ~
(mask
<< (gpr_col
*elwidth
))
211 # then wipe the bit we don't want from the value
213 # OR the new value in, shifted up
214 val |
= value
<< (gpr_col
*elwidth
)
215 # finally put the damn value into the regfile
216 log("GPR write", base
, "isvec", is_vec
, "offs", offs
,
217 "elwid", elwidth
, "offs/col", gpr_offs
, gpr_col
, "val", hex(val
),
219 dict.__setitem
__(self
, base
+gpr_offs
, SelectableInt(val
, 64))
221 def __setitem__(self
, rnum
, value
):
222 # rnum = rnum.value # only SelectableInt allowed
223 log("GPR setitem", rnum
, value
)
224 if isinstance(rnum
, SelectableInt
):
226 dict.__setitem
__(self
, rnum
, value
)
228 def getz(self
, rnum
, rvalue
=None):
229 # rnum = rnum.value # only SelectableInt allowed
230 log("GPR getzero?", rnum
, rvalue
)
231 if rvalue
is not None:
233 return SelectableInt(0, rvalue
.bits
)
236 return SelectableInt(0, 64)
239 def _get_regnum(self
, attr
):
240 getform
= self
.sd
.sigforms
[self
.form
]
241 rnum
= getattr(getform
, attr
)
244 def ___getitem__(self
, attr
):
245 """ XXX currently not used
247 rnum
= self
._get
_regnum
(attr
)
248 log("GPR getitem", attr
, rnum
)
249 return self
.regfile
[rnum
]
251 def dump(self
, printout
=True):
253 for i
in range(len(self
)):
254 res
.append(self
[i
].value
)
256 for i
in range(0, len(res
), 8):
259 s
.append("%08x" % res
[i
+j
])
261 log("reg", "%2d" % i
, s
, kind
=LogType
.InstrInOuts
)
266 def __init__(self
, dec2
, initial_sprs
={}, gpr
=None):
268 self
.gpr
= gpr
# for SVSHAPE[0-3]
270 for key
, v
in initial_sprs
.items():
271 if isinstance(key
, SelectableInt
):
273 key
= special_sprs
.get(key
, key
)
274 if isinstance(key
, int):
277 info
= spr_byname
[key
]
278 if not isinstance(v
, SelectableInt
):
279 v
= SelectableInt(v
, info
.length
)
282 def __getitem__(self
, key
):
284 #log("dict", self.items())
285 # if key in special_sprs get the special spr, otherwise return key
286 if isinstance(key
, SelectableInt
):
288 if isinstance(key
, int):
289 key
= spr_dict
[key
].SPR
290 key
= special_sprs
.get(key
, key
)
291 if key
== 'HSRR0': # HACK!
293 if key
== 'HSRR1': # HACK!
296 res
= dict.__getitem
__(self
, key
)
298 if isinstance(key
, int):
301 info
= spr_byname
[key
]
302 self
[key
] = SelectableInt(0, info
.length
)
303 res
= dict.__getitem
__(self
, key
)
304 #log("spr returning", key, res)
307 def __setitem__(self
, key
, value
):
308 if isinstance(key
, SelectableInt
):
310 if isinstance(key
, int):
311 key
= spr_dict
[key
].SPR
313 key
= special_sprs
.get(key
, key
)
314 if key
== 'HSRR0': # HACK!
315 self
.__setitem
__('SRR0', value
)
316 if key
== 'HSRR1': # HACK!
317 self
.__setitem
__('SRR1', value
)
319 value
= XERState(value
)
320 if key
in ('SVSHAPE0', 'SVSHAPE1', 'SVSHAPE2', 'SVSHAPE3'):
321 value
= SVSHAPE(value
, self
.gpr
)
322 log("setting spr", key
, value
)
323 dict.__setitem
__(self
, key
, value
)
325 def __call__(self
, ridx
):
328 def dump(self
, printout
=True):
330 keys
= list(self
.keys())
333 sprname
= spr_dict
.get(k
, None)
337 sprname
= sprname
.SPR
338 res
.append((sprname
, self
[k
].value
))
340 for sprname
, value
in res
:
341 print(" ", sprname
, hex(value
))
346 def __init__(self
, pc_init
=0):
347 self
.CIA
= SelectableInt(pc_init
, 64)
348 self
.NIA
= self
.CIA
+ SelectableInt(4, 64) # only true for v3.0B!
350 def update_nia(self
, is_svp64
):
351 increment
= 8 if is_svp64
else 4
352 self
.NIA
= self
.CIA
+ SelectableInt(increment
, 64)
354 def update(self
, namespace
, is_svp64
):
355 """updates the program counter (PC) by 4 if v3.0B mode or 8 if SVP64
357 self
.CIA
= namespace
['NIA'].narrow(64)
358 self
.update_nia(is_svp64
)
359 namespace
['CIA'] = self
.CIA
360 namespace
['NIA'] = self
.NIA
364 # See PowerISA Version 3.0 B Book 1
365 # Section 2.3.1 Condition Register pages 30 - 31
367 LT
= FL
= 0 # negative, less than, floating-point less than
368 GT
= FG
= 1 # positive, greater than, floating-point greater than
369 EQ
= FE
= 2 # equal, floating-point equal
370 SO
= FU
= 3 # summary overflow, floating-point unordered
372 def __init__(self
, init
=0):
373 # rev_cr = int('{:016b}'.format(initial_cr)[::-1], 2)
374 # self.cr = FieldSelectableInt(self._cr, list(range(32, 64)))
375 self
.cr
= SelectableInt(init
, 64) # underlying reg
376 # field-selectable versions of Condition Register TODO check bitranges?
379 bits
= tuple(range(i
*4+32, (i
+1)*4+32))
380 _cr
= FieldSelectableInt(self
.cr
, bits
)
384 # decode SVP64 predicate integer to reg number and invert
385 def get_predint(gpr
, mask
):
389 log("get_predint", mask
, SVP64PredInt
.ALWAYS
.value
)
390 if mask
== SVP64PredInt
.ALWAYS
.value
:
391 return 0xffff_ffff_ffff_ffff # 64 bits of 1
392 if mask
== SVP64PredInt
.R3_UNARY
.value
:
393 return 1 << (r3
.value
& 0b111111)
394 if mask
== SVP64PredInt
.R3
.value
:
396 if mask
== SVP64PredInt
.R3_N
.value
:
398 if mask
== SVP64PredInt
.R10
.value
:
400 if mask
== SVP64PredInt
.R10_N
.value
:
402 if mask
== SVP64PredInt
.R30
.value
:
404 if mask
== SVP64PredInt
.R30_N
.value
:
408 # decode SVP64 predicate CR to reg number and invert status
409 def _get_predcr(mask
):
410 if mask
== SVP64PredCR
.LT
.value
:
412 if mask
== SVP64PredCR
.GE
.value
:
414 if mask
== SVP64PredCR
.GT
.value
:
416 if mask
== SVP64PredCR
.LE
.value
:
418 if mask
== SVP64PredCR
.EQ
.value
:
420 if mask
== SVP64PredCR
.NE
.value
:
422 if mask
== SVP64PredCR
.SO
.value
:
424 if mask
== SVP64PredCR
.NS
.value
:
428 # read individual CR fields (0..VL-1), extract the required bit
429 # and construct the mask
430 def get_predcr(crl
, predselect
, vl
):
431 idx
, noninv
= _get_predcr(predselect
)
434 cr
= crl
[i
+SVP64CROffs
.CRPred
]
435 if cr
[idx
].value
== noninv
:
437 log("get_predcr", vl
, idx
, noninv
, i
+SVP64CROffs
.CRPred
,
438 bin(cr
.asint()), cr
[idx
].value
, bin(mask
))
442 # TODO, really should just be using PowerDecoder2
443 def get_idx_map(dec2
, name
):
445 in1_sel
= yield op
.in1_sel
446 in2_sel
= yield op
.in2_sel
447 in3_sel
= yield op
.in3_sel
448 in1
= yield dec2
.e
.read_reg1
.data
449 # identify which regnames map to in1/2/3
450 if name
== 'RA' or name
== 'RA_OR_ZERO':
451 if (in1_sel
== In1Sel
.RA
.value
or
452 (in1_sel
== In1Sel
.RA_OR_ZERO
.value
and in1
!= 0)):
454 if in1_sel
== In1Sel
.RA_OR_ZERO
.value
:
457 if in2_sel
== In2Sel
.RB
.value
:
459 if in3_sel
== In3Sel
.RB
.value
:
461 # XXX TODO, RC doesn't exist yet!
463 if in3_sel
== In3Sel
.RC
.value
:
465 elif name
in ['EA', 'RS']:
466 if in1_sel
== In1Sel
.RS
.value
:
468 if in2_sel
== In2Sel
.RS
.value
:
470 if in3_sel
== In3Sel
.RS
.value
:
473 if in1_sel
== In1Sel
.FRA
.value
:
475 if in3_sel
== In3Sel
.FRA
.value
:
478 if in2_sel
== In2Sel
.FRB
.value
:
481 if in3_sel
== In3Sel
.FRC
.value
:
484 if in1_sel
== In1Sel
.FRS
.value
:
486 if in3_sel
== In3Sel
.FRS
.value
:
489 if in1_sel
== In1Sel
.FRT
.value
:
492 if in1_sel
== In1Sel
.RT
.value
:
497 # TODO, really should just be using PowerDecoder2
498 def get_idx_in(dec2
, name
, ewmode
=False):
499 idx
= yield from get_idx_map(dec2
, name
)
503 in1_sel
= yield op
.in1_sel
504 in2_sel
= yield op
.in2_sel
505 in3_sel
= yield op
.in3_sel
506 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
507 in1
= yield dec2
.e
.read_reg1
.data
508 in2
= yield dec2
.e
.read_reg2
.data
509 in3
= yield dec2
.e
.read_reg3
.data
511 in1_base
= yield dec2
.e
.read_reg1
.base
512 in2_base
= yield dec2
.e
.read_reg2
.base
513 in3_base
= yield dec2
.e
.read_reg3
.base
514 in1_offs
= yield dec2
.e
.read_reg1
.offs
515 in2_offs
= yield dec2
.e
.read_reg2
.offs
516 in3_offs
= yield dec2
.e
.read_reg3
.offs
517 in1
= (in1
, in1_base
, in1_offs
)
518 in2
= (in2
, in2_base
, in2_offs
)
519 in3
= (in3
, in3_base
, in3_offs
)
521 in1_isvec
= yield dec2
.in1_isvec
522 in2_isvec
= yield dec2
.in2_isvec
523 in3_isvec
= yield dec2
.in3_isvec
524 log("get_idx_in in1", name
, in1_sel
, In1Sel
.RA
.value
,
526 log("get_idx_in in2", name
, in2_sel
, In2Sel
.RB
.value
,
528 log("get_idx_in in3", name
, in3_sel
, In3Sel
.RS
.value
,
530 log("get_idx_in FRS in3", name
, in3_sel
, In3Sel
.FRS
.value
,
532 log("get_idx_in FRB in2", name
, in2_sel
, In2Sel
.FRB
.value
,
534 log("get_idx_in FRC in3", name
, in3_sel
, In3Sel
.FRC
.value
,
537 return in1
, in1_isvec
539 return in2
, in2_isvec
541 return in3
, in3_isvec
545 # TODO, really should just be using PowerDecoder2
546 def get_cr_in(dec2
, name
):
548 in_sel
= yield op
.cr_in
549 in_bitfield
= yield dec2
.dec_cr_in
.cr_bitfield
.data
550 sv_cr_in
= yield op
.sv_cr_in
551 spec
= yield dec2
.crin_svdec
.spec
552 sv_override
= yield dec2
.dec_cr_in
.sv_override
553 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
554 in1
= yield dec2
.e
.read_cr1
.data
555 cr_isvec
= yield dec2
.cr_in_isvec
556 log("get_cr_in", in_sel
, CROutSel
.CR0
.value
, in1
, cr_isvec
)
557 log(" sv_cr_in", sv_cr_in
)
558 log(" cr_bf", in_bitfield
)
560 log(" override", sv_override
)
561 # identify which regnames map to in / o2
563 if in_sel
== CRInSel
.BI
.value
:
566 if in_sel
== CRInSel
.BFA
.value
:
567 if name
in ['BA', 'BB']:
568 if in_sel
== CRInSel
.BA_BB
.value
:
570 log("get_cr_in not found", name
)
574 # TODO, really should just be using PowerDecoder2
575 def get_cr_out(dec2
, name
):
577 out_sel
= yield op
.cr_out
578 out_bitfield
= yield dec2
.dec_cr_out
.cr_bitfield
.data
579 sv_cr_out
= yield op
.sv_cr_out
580 spec
= yield dec2
.crout_svdec
.spec
581 sv_override
= yield dec2
.dec_cr_out
.sv_override
582 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
583 out
= yield dec2
.e
.write_cr
.data
584 o_isvec
= yield dec2
.cr_out_isvec
585 log("get_cr_out", out_sel
, CROutSel
.CR0
.value
, out
, o_isvec
)
586 log(" sv_cr_out", sv_cr_out
)
587 log(" cr_bf", out_bitfield
)
589 log(" override", sv_override
)
590 # identify which regnames map to out / o2
592 if out_sel
== CROutSel
.BF
.value
:
595 if out_sel
== CROutSel
.BT
.value
:
598 if out_sel
== CROutSel
.CR0
.value
:
600 if name
== 'CR1': # these are not actually calculated correctly
601 if out_sel
== CROutSel
.CR1
.value
:
603 # check RC1 set? if so return implicit vector, this is a REAL bad hack
604 RC1
= yield dec2
.rm_dec
.RC1
606 log("get_cr_out RC1 mode")
608 return 0, True # XXX TODO: offset CR0 from SVSTATE SPR
610 return 1, True # XXX TODO: offset CR1 from SVSTATE SPR
612 log("get_cr_out not found", name
)
616 # TODO, really should just be using PowerDecoder2
617 def get_out_map(dec2
, name
):
619 out_sel
= yield op
.out_sel
620 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
621 out
= yield dec2
.e
.write_reg
.data
622 # identify which regnames map to out / o2
624 if out_sel
== OutSel
.RA
.value
:
627 if out_sel
== OutSel
.RT
.value
:
629 if out_sel
== OutSel
.RT_OR_ZERO
.value
and out
!= 0:
631 elif name
== 'RT_OR_ZERO':
632 if out_sel
== OutSel
.RT_OR_ZERO
.value
:
635 if out_sel
== OutSel
.FRA
.value
:
638 if out_sel
== OutSel
.FRS
.value
:
641 if out_sel
== OutSel
.FRT
.value
:
646 # TODO, really should just be using PowerDecoder2
647 def get_idx_out(dec2
, name
, ewmode
=False):
649 out_sel
= yield op
.out_sel
650 # get the IN1/2/3 from the decoder (includes SVP64 remap and isvec)
651 out
= yield dec2
.e
.write_reg
.data
652 o_isvec
= yield dec2
.o_isvec
654 offs
= yield dec2
.e
.write_reg
.offs
655 base
= yield dec2
.e
.write_reg
.base
656 out
= (out
, base
, offs
)
657 # identify which regnames map to out / o2
658 ismap
= yield from get_out_map(dec2
, name
)
660 log("get_idx_out", name
, out_sel
, out
, o_isvec
)
662 log("get_idx_out not found", name
, out_sel
, out
, o_isvec
)
666 # TODO, really should just be using PowerDecoder2
667 def get_out2_map(dec2
, name
):
668 # check first if register is activated for write
670 out_sel
= yield op
.out_sel
671 out
= yield dec2
.e
.write_ea
.data
672 out_ok
= yield dec2
.e
.write_ea
.ok
676 if name
in ['EA', 'RA']:
677 if hasattr(op
, "upd"):
678 # update mode LD/ST uses read-reg A also as an output
680 log("get_idx_out2", upd
, LDSTMode
.update
.value
,
681 out_sel
, OutSel
.RA
.value
,
683 if upd
== LDSTMode
.update
.value
:
686 fft_en
= yield dec2
.implicit_rs
688 log("get_idx_out2", out_sel
, OutSel
.RS
.value
,
692 fft_en
= yield dec2
.implicit_rs
694 log("get_idx_out2", out_sel
, OutSel
.FRS
.value
,
700 # TODO, really should just be using PowerDecoder2
701 def get_idx_out2(dec2
, name
, ewmode
=False):
702 # check first if register is activated for write
704 out_sel
= yield op
.out_sel
705 out
= yield dec2
.e
.write_ea
.data
707 offs
= yield dec2
.e
.write_ea
.offs
708 base
= yield dec2
.e
.write_ea
.base
709 out
= (out
, base
, offs
)
710 o_isvec
= yield dec2
.o2_isvec
711 ismap
= yield from get_out2_map(dec2
, name
)
713 log("get_idx_out2", name
, out_sel
, out
, o_isvec
)
719 """deals with svstate looping.
722 def __init__(self
, svstate
):
723 self
.svstate
= svstate
726 def new_iterators(self
):
727 self
.src_it
= self
.src_iterator()
728 self
.dst_it
= self
.dst_iterator()
732 self
.new_ssubstep
= 0
733 self
.new_dsubstep
= 0
734 self
.pred_dst_zero
= 0
735 self
.pred_src_zero
= 0
737 def src_iterator(self
):
738 """source-stepping iterator
740 pack
= self
.svstate
.pack
744 # pack advances subvl in *outer* loop
745 while True: # outer subvl loop
746 while True: # inner vl loop
749 srcmask
= self
.srcmask
750 srcstep
= self
.svstate
.srcstep
751 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
752 if self
.pred_sz
or pred_src_zero
:
753 self
.pred_src_zero
= not pred_src_zero
754 log(" advance src", srcstep
, vl
,
755 self
.svstate
.ssubstep
, subvl
)
756 # yield actual substep/srcstep
757 yield (self
.svstate
.ssubstep
, srcstep
)
758 # the way yield works these could have been modified.
761 srcstep
= self
.svstate
.srcstep
762 log(" advance src check", srcstep
, vl
,
763 self
.svstate
.ssubstep
, subvl
, srcstep
== vl
-1,
764 self
.svstate
.ssubstep
== subvl
)
765 if srcstep
== vl
-1: # end-point
766 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
767 if self
.svstate
.ssubstep
== subvl
: # end-point
768 log(" advance pack stop")
770 break # exit inner loop
771 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance ss
773 if self
.svstate
.ssubstep
== subvl
: # end-point
774 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
775 log(" advance pack stop")
777 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
780 # these cannot be done as for-loops because SVSTATE may change
781 # (srcstep/substep may be modified, interrupted, subvl/vl change)
782 # but they *can* be done as while-loops as long as every SVSTATE
783 # "thing" is re-read every single time a yield gives indices
784 while True: # outer vl loop
785 while True: # inner subvl loop
788 srcmask
= self
.srcmask
789 srcstep
= self
.svstate
.srcstep
790 pred_src_zero
= ((1 << srcstep
) & srcmask
) != 0
791 if self
.pred_sz
or pred_src_zero
:
792 self
.pred_src_zero
= not pred_src_zero
793 log(" advance src", srcstep
, vl
,
794 self
.svstate
.ssubstep
, subvl
)
795 # yield actual substep/srcstep
796 yield (self
.svstate
.ssubstep
, srcstep
)
797 if self
.svstate
.ssubstep
== subvl
: # end-point
798 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
799 break # exit inner loop
800 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
802 if srcstep
== vl
-1: # end-point
803 self
.svstate
.srcstep
= SelectableInt(0, 7) # reset
806 self
.svstate
.srcstep
+= SelectableInt(1, 7) # advance srcstep
808 def dst_iterator(self
):
809 """dest-stepping iterator
811 unpack
= self
.svstate
.unpack
815 # pack advances subvl in *outer* loop
816 while True: # outer subvl loop
817 while True: # inner vl loop
820 dstmask
= self
.dstmask
821 dststep
= self
.svstate
.dststep
822 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
823 if self
.pred_dz
or pred_dst_zero
:
824 self
.pred_dst_zero
= not pred_dst_zero
825 log(" advance dst", dststep
, vl
,
826 self
.svstate
.dsubstep
, subvl
)
827 # yield actual substep/dststep
828 yield (self
.svstate
.dsubstep
, dststep
)
829 # the way yield works these could have been modified.
831 dststep
= self
.svstate
.dststep
832 log(" advance dst check", dststep
, vl
,
833 self
.svstate
.ssubstep
, subvl
)
834 if dststep
== vl
-1: # end-point
835 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
836 if self
.svstate
.dsubstep
== subvl
: # end-point
837 log(" advance unpack stop")
840 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance ds
842 if self
.svstate
.dsubstep
== subvl
: # end-point
843 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
844 log(" advance unpack stop")
846 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
848 # these cannot be done as for-loops because SVSTATE may change
849 # (dststep/substep may be modified, interrupted, subvl/vl change)
850 # but they *can* be done as while-loops as long as every SVSTATE
851 # "thing" is re-read every single time a yield gives indices
852 while True: # outer vl loop
853 while True: # inner subvl loop
855 dstmask
= self
.dstmask
856 dststep
= self
.svstate
.dststep
857 pred_dst_zero
= ((1 << dststep
) & dstmask
) != 0
858 if self
.pred_dz
or pred_dst_zero
:
859 self
.pred_dst_zero
= not pred_dst_zero
860 log(" advance dst", dststep
, self
.svstate
.vl
,
861 self
.svstate
.dsubstep
, subvl
)
862 # yield actual substep/dststep
863 yield (self
.svstate
.dsubstep
, dststep
)
864 if self
.svstate
.dsubstep
== subvl
: # end-point
865 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
867 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
870 if dststep
== vl
-1: # end-point
871 self
.svstate
.dststep
= SelectableInt(0, 7) # reset
873 self
.svstate
.dststep
+= SelectableInt(1, 7) # advance dststep
875 def src_iterate(self
):
876 """source-stepping iterator
880 pack
= self
.svstate
.pack
881 unpack
= self
.svstate
.unpack
882 ssubstep
= self
.svstate
.ssubstep
883 end_ssub
= ssubstep
== subvl
884 end_src
= self
.svstate
.srcstep
== vl
-1
885 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
889 srcstep
= self
.svstate
.srcstep
890 srcmask
= self
.srcmask
892 # pack advances subvl in *outer* loop
894 assert srcstep
<= vl
-1
895 end_src
= srcstep
== vl
-1
900 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
904 srcstep
+= 1 # advance srcstep
905 if not self
.srcstep_skip
:
907 if ((1 << srcstep
) & srcmask
) != 0:
910 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
912 # advance subvl in *inner* loop
915 assert srcstep
<= vl
-1
916 end_src
= srcstep
== vl
-1
917 if end_src
: # end-point
923 if not self
.srcstep_skip
:
925 if ((1 << srcstep
) & srcmask
) != 0:
928 log(" sskip", bin(srcmask
), bin(1 << srcstep
))
929 self
.svstate
.ssubstep
= SelectableInt(0, 2) # reset
932 self
.svstate
.ssubstep
+= SelectableInt(1, 2)
934 self
.svstate
.srcstep
= SelectableInt(srcstep
, 7)
935 log(" advance src", self
.svstate
.srcstep
, self
.svstate
.ssubstep
,
938 def dst_iterate(self
):
939 """dest step iterator
943 pack
= self
.svstate
.pack
944 unpack
= self
.svstate
.unpack
945 dsubstep
= self
.svstate
.dsubstep
946 end_dsub
= dsubstep
== subvl
947 dststep
= self
.svstate
.dststep
948 end_dst
= dststep
== vl
-1
949 dstmask
= self
.dstmask
950 log(" pack/unpack/subvl", pack
, unpack
, subvl
,
955 # unpack advances subvl in *outer* loop
957 assert dststep
<= vl
-1
958 end_dst
= dststep
== vl
-1
963 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
967 dststep
+= 1 # advance dststep
968 if not self
.dststep_skip
:
970 if ((1 << dststep
) & dstmask
) != 0:
973 log(" dskip", bin(dstmask
), bin(1 << dststep
))
975 # advance subvl in *inner* loop
978 assert dststep
<= vl
-1
979 end_dst
= dststep
== vl
-1
980 if end_dst
: # end-point
986 if not self
.dststep_skip
:
988 if ((1 << dststep
) & dstmask
) != 0:
991 log(" dskip", bin(dstmask
), bin(1 << dststep
))
992 self
.svstate
.dsubstep
= SelectableInt(0, 2) # reset
995 self
.svstate
.dsubstep
+= SelectableInt(1, 2)
997 self
.svstate
.dststep
= SelectableInt(dststep
, 7)
998 log(" advance dst", self
.svstate
.dststep
, self
.svstate
.dsubstep
,
1001 def at_loopend(self
):
1002 """tells if this is the last possible element. uses the cached values
1003 for src/dst-step and sub-steps
1006 vl
= self
.svstate
.vl
1007 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
1008 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
1009 end_ssub
= ssubstep
== subvl
1010 end_dsub
= dsubstep
== subvl
1011 if srcstep
== vl
-1 and end_ssub
:
1013 if dststep
== vl
-1 and end_dsub
:
1017 def advance_svstate_steps(self
):
1018 """ advance sub/steps. note that Pack/Unpack *INVERTS* the order.
1019 TODO when Pack/Unpack is set, substep becomes the *outer* loop
1021 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
1022 if self
.loopend
: # huhn??
1027 def read_src_mask(self
):
1028 """read/update pred_sz and src mask
1030 # get SVSTATE VL (oh and print out some debug stuff)
1031 vl
= self
.svstate
.vl
1032 srcstep
= self
.svstate
.srcstep
1033 ssubstep
= self
.svstate
.ssubstep
1035 # get predicate mask (all 64 bits)
1036 srcmask
= 0xffff_ffff_ffff_ffff
1038 pmode
= yield self
.dec2
.rm_dec
.predmode
1039 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1040 srcpred
= yield self
.dec2
.rm_dec
.srcpred
1041 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1042 pred_sz
= yield self
.dec2
.rm_dec
.pred_sz
1043 if pmode
== SVP64PredMode
.INT
.value
:
1044 srcmask
= dstmask
= get_predint(self
.gpr
, dstpred
)
1045 if sv_ptype
== SVPType
.P2
.value
:
1046 srcmask
= get_predint(self
.gpr
, srcpred
)
1047 elif pmode
== SVP64PredMode
.CR
.value
:
1048 srcmask
= dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1049 if sv_ptype
== SVPType
.P2
.value
:
1050 srcmask
= get_predcr(self
.crl
, srcpred
, vl
)
1051 # work out if the ssubsteps are completed
1052 ssubstart
= ssubstep
== 0
1053 log(" pmode", pmode
)
1054 log(" ptype", sv_ptype
)
1055 log(" srcpred", bin(srcpred
))
1056 log(" srcmask", bin(srcmask
))
1057 log(" pred_sz", bin(pred_sz
))
1058 log(" ssubstart", ssubstart
)
1060 # store all that above
1061 self
.srcstep_skip
= False
1062 self
.srcmask
= srcmask
1063 self
.pred_sz
= pred_sz
1064 self
.new_ssubstep
= ssubstep
1065 log(" new ssubstep", ssubstep
)
1066 # until the predicate mask has a "1" bit... or we run out of VL
1067 # let srcstep==VL be the indicator to move to next instruction
1069 self
.srcstep_skip
= True
1071 def read_dst_mask(self
):
1072 """same as read_src_mask - check and record everything needed
1074 # get SVSTATE VL (oh and print out some debug stuff)
1075 # yield Delay(1e-10) # make changes visible
1076 vl
= self
.svstate
.vl
1077 dststep
= self
.svstate
.dststep
1078 dsubstep
= self
.svstate
.dsubstep
1080 # get predicate mask (all 64 bits)
1081 dstmask
= 0xffff_ffff_ffff_ffff
1083 pmode
= yield self
.dec2
.rm_dec
.predmode
1084 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
1085 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
1086 dstpred
= yield self
.dec2
.rm_dec
.dstpred
1087 pred_dz
= yield self
.dec2
.rm_dec
.pred_dz
1088 if pmode
== SVP64PredMode
.INT
.value
:
1089 dstmask
= get_predint(self
.gpr
, dstpred
)
1090 elif pmode
== SVP64PredMode
.CR
.value
:
1091 dstmask
= get_predcr(self
.crl
, dstpred
, vl
)
1092 # work out if the ssubsteps are completed
1093 dsubstart
= dsubstep
== 0
1094 log(" pmode", pmode
)
1095 log(" ptype", sv_ptype
)
1096 log(" dstpred", bin(dstpred
))
1097 log(" dstmask", bin(dstmask
))
1098 log(" pred_dz", bin(pred_dz
))
1099 log(" dsubstart", dsubstart
)
1101 self
.dststep_skip
= False
1102 self
.dstmask
= dstmask
1103 self
.pred_dz
= pred_dz
1104 self
.new_dsubstep
= dsubstep
1105 log(" new dsubstep", dsubstep
)
1107 self
.dststep_skip
= True
1109 def svstate_pre_inc(self
):
1110 """check if srcstep/dststep need to skip over masked-out predicate bits
1111 note that this is not supposed to do anything to substep,
1112 it is purely for skipping masked-out bits
1115 self
.subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
1116 yield from self
.read_src_mask()
1117 yield from self
.read_dst_mask()
1124 srcstep
= self
.svstate
.srcstep
1125 srcmask
= self
.srcmask
1126 pred_src_zero
= self
.pred_sz
1127 vl
= self
.svstate
.vl
1128 # srcstep-skipping opportunity identified
1129 if self
.srcstep_skip
:
1130 # cannot do this with sv.bc - XXX TODO
1133 while (((1 << srcstep
) & srcmask
) == 0) and (srcstep
!= vl
):
1134 log(" sskip", bin(1 << srcstep
))
1137 # now work out if the relevant mask bits require zeroing
1139 pred_src_zero
= ((1 << srcstep
) & srcmask
) == 0
1141 # store new srcstep / dststep
1142 self
.new_srcstep
= srcstep
1143 self
.pred_src_zero
= pred_src_zero
1144 log(" new srcstep", srcstep
)
1147 # dststep-skipping opportunity identified
1148 dststep
= self
.svstate
.dststep
1149 dstmask
= self
.dstmask
1150 pred_dst_zero
= self
.pred_dz
1151 vl
= self
.svstate
.vl
1152 if self
.dststep_skip
:
1153 # cannot do this with sv.bc - XXX TODO
1156 while (((1 << dststep
) & dstmask
) == 0) and (dststep
!= vl
):
1157 log(" dskip", bin(1 << dststep
))
1160 # now work out if the relevant mask bits require zeroing
1162 pred_dst_zero
= ((1 << dststep
) & dstmask
) == 0
1164 # store new srcstep / dststep
1165 self
.new_dststep
= dststep
1166 self
.pred_dst_zero
= pred_dst_zero
1167 log(" new dststep", dststep
)
1170 class ExitSyscallCalled(Exception):
1174 class SyscallEmulator(openpower
.syscalls
.Dispatcher
):
1175 def __init__(self
, isacaller
):
1176 self
.__isacaller
= isacaller
1178 host
= os
.uname().machine
1179 bits
= (64 if (sys
.maxsize
> (2**32)) else 32)
1180 host
= openpower
.syscalls
.architecture(arch
=host
, bits
=bits
)
1182 return super().__init
__(guest
="ppc64", host
=host
)
1184 def __call__(self
, identifier
, *arguments
):
1185 (identifier
, *arguments
) = map(int, (identifier
, *arguments
))
1186 return super().__call
__(identifier
, *arguments
)
1188 def sys_exit_group(self
, status
, *rest
):
1189 self
.__isacaller
.halted
= True
1190 raise ExitSyscallCalled(status
)
1192 def sys_write(self
, fd
, buf
, count
, *rest
):
1194 buf
= self
.__isacaller
.mem
.get_ctypes(buf
, count
, is_write
=False)
1198 return os
.write(fd
, buf
)
1199 except OSError as e
:
1202 def sys_writev(self
, fd
, iov
, iovcnt
, *rest
):
1204 if iovcnt
< 0 or iovcnt
> IOV_MAX
:
1205 return -errno
.EINVAL
1206 struct_iovec
= struct
.Struct("<QQ")
1209 iov
= self
.__isacaller
.mem
.get_ctypes(
1210 iov
, struct_iovec
.size
* iovcnt
, is_write
=False)
1211 iov
= list(struct_iovec
.iter_unpack(iov
))
1214 for i
, iovec
in enumerate(iov
):
1215 iov_base
, iov_len
= iovec
1216 iov
[i
] = self
.__isacaller
.mem
.get_ctypes(
1217 iov_base
, iov_len
, is_write
=False)
1218 except (ValueError, MemException
):
1219 return -errno
.EFAULT
1221 return os
.writev(fd
, iov
)
1222 except OSError as e
:
1225 def sys_read(self
, fd
, buf
, count
, *rest
):
1227 buf
= self
.__isacaller
.mem
.get_ctypes(buf
, count
, is_write
=True)
1231 return os
.readv(fd
, [buf
])
1232 except OSError as e
:
1235 def sys_mmap(self
, addr
, length
, prot
, flags
, fd
, offset
, *rest
):
1236 return self
.__isacaller
.mem
.mmap_syscall(
1237 addr
, length
, prot
, flags
, fd
, offset
, is_mmap2
=False)
1239 def sys_mmap2(self
, addr
, length
, prot
, flags
, fd
, offset
, *rest
):
1240 return self
.__isacaller
.mem
.mmap_syscall(
1241 addr
, length
, prot
, flags
, fd
, offset
, is_mmap2
=True)
1243 def sys_brk(self
, addr
, *rest
):
1244 return self
.__isacaller
.mem
.brk_syscall(addr
)
1246 def sys_munmap(self
, addr
, length
, *rest
):
1247 return -errno
.ENOSYS
# TODO: implement
1249 def sys_mprotect(self
, addr
, length
, prot
, *rest
):
1250 return -errno
.ENOSYS
# TODO: implement
1252 def sys_pkey_mprotect(self
, addr
, length
, prot
, pkey
, *rest
):
1253 return -errno
.ENOSYS
# TODO: implement
1255 def sys_openat(self
, dirfd
, pathname
, flags
, mode
, *rest
):
1257 path
= self
.__isacaller
.mem
.read_cstr(pathname
)
1258 except (ValueError, MemException
):
1259 return -errno
.EFAULT
1261 if dirfd
== ppc_flags
.AT_FDCWD
:
1262 return os
.open(path
, flags
, mode
)
1264 return os
.open(path
, flags
, mode
, dir_fd
=dirfd
)
1265 except OSError as e
:
1271 nodename
= uname
.nodename
.encode()
1272 release
= b
'5.6.0-1-powerpc64le'
1273 version
= b
'#1 SMP Debian 5.6.7-1 (2020-04-29)'
1274 machine
= b
'ppc64le'
1276 return sysname
, nodename
, release
, version
, machine
, domainname
1278 def sys_uname(self
, buf
, *rest
):
1279 s
= struct
.Struct("<65s65s65s65s65s")
1281 buf
= self
.__isacaller
.mem
.get_ctypes(buf
, s
.size
, is_write
=True)
1282 except (ValueError, MemException
):
1283 return -errno
.EFAULT
1284 sysname
, nodename
, release
, version
, machine
, domainname
= \
1286 s
.pack_into(buf
, 0, sysname
, nodename
, release
, version
, machine
)
1289 def sys_newuname(self
, buf
, *rest
):
1290 name_len
= ppc_flags
.__NEW
_UTS
_LEN
+ 1
1291 s
= struct
.Struct("<%ds%ds%ds%ds%ds%ds" % ((name_len
,) * 6))
1293 buf
= self
.__isacaller
.mem
.get_ctypes(buf
, s
.size
, is_write
=True)
1294 except (ValueError, MemException
):
1295 return -errno
.EFAULT
1296 sysname
, nodename
, release
, version
, machine
, domainname
= \
1299 sysname
, nodename
, release
, version
, machine
, domainname
)
1302 def sys_readlink(self
, pathname
, buf
, bufsiz
, *rest
):
1303 dirfd
= ppc_flags
.AT_FDCWD
1304 return self
.sys_readlinkat(dirfd
, pathname
, buf
, bufsiz
)
1306 def sys_readlinkat(self
, dirfd
, pathname
, buf
, bufsiz
, *rest
):
1308 path
= self
.__isacaller
.mem
.read_cstr(pathname
)
1310 buf
= self
.__isacaller
.mem
.get_ctypes(
1311 buf
, bufsiz
, is_write
=True)
1314 except (ValueError, MemException
):
1315 return -errno
.EFAULT
1317 if dirfd
== ppc_flags
.AT_FDCWD
:
1318 result
= os
.readlink(path
)
1320 result
= os
.readlink(path
, dir_fd
=dirfd
)
1321 retval
= min(len(result
), len(buf
))
1322 buf
[:retval
] = result
[:retval
]
1324 except OSError as e
:
1328 class ISACaller(ISACallerHelper
, ISAFPHelpers
, StepLoop
):
1329 # decoder2 - an instance of power_decoder2
1330 # regfile - a list of initial values for the registers
1331 # initial_{etc} - initial values for SPRs, Condition Register, Mem, MSR
1332 # respect_pc - tracks the program counter. requires initial_insns
1333 def __init__(self
, decoder2
, regfile
, initial_sprs
=None, initial_cr
=0,
1334 initial_mem
=None, initial_msr
=0,
1347 use_syscall_emu
=False,
1348 emulating_mmap
=False,
1349 real_page_size
=None):
1351 self
.syscall
= SyscallEmulator(isacaller
=self
)
1352 if not use_mmap_mem
:
1353 log("forcing use_mmap_mem due to use_syscall_emu active")
1358 # we will eventually be able to load ELF files without use_syscall_emu
1359 # (e.g. the linux kernel), so do it in a separate if block
1360 if isinstance(initial_insns
, ELFFile
):
1361 if not use_mmap_mem
:
1362 log("forcing use_mmap_mem due to loading an ELF file")
1364 if not emulating_mmap
:
1365 log("forcing emulating_mmap due to loading an ELF file")
1366 emulating_mmap
= True
1368 # trace log file for model output. if None do nothing
1369 self
.insnlog
= insnlog
1370 self
.insnlog_is_file
= hasattr(insnlog
, "write")
1371 if not self
.insnlog_is_file
and self
.insnlog
:
1372 self
.insnlog
= open(self
.insnlog
, "w")
1374 self
.bigendian
= bigendian
1376 self
.is_svp64_mode
= False
1377 self
.respect_pc
= respect_pc
1378 if initial_sprs
is None:
1380 if initial_mem
is None:
1382 if fpregfile
is None:
1383 fpregfile
= [0] * 32
1384 if initial_insns
is None:
1386 assert self
.respect_pc
== False, "instructions required to honor pc"
1387 if initial_msr
is None:
1388 initial_msr
= DEFAULT_MSR
1390 log("ISACaller insns", respect_pc
, initial_insns
, disassembly
)
1391 log("ISACaller initial_msr", initial_msr
)
1393 # "fake program counter" mode (for unit testing)
1397 if isinstance(initial_mem
, tuple):
1398 self
.fake_pc
= initial_mem
[0]
1399 disasm_start
= self
.fake_pc
1401 disasm_start
= initial_pc
1403 # disassembly: we need this for now (not given from the decoder)
1404 self
.disassembly
= {}
1406 for i
, code
in enumerate(disassembly
):
1407 self
.disassembly
[i
*4 + disasm_start
] = code
1409 # set up registers, instruction memory, data memory, PC, SPRs, MSR, CR
1410 self
.svp64rm
= SVP64RM()
1411 if initial_svstate
is None:
1413 if isinstance(initial_svstate
, int):
1414 initial_svstate
= SVP64State(initial_svstate
)
1415 # SVSTATE, MSR and PC
1416 StepLoop
.__init
__(self
, initial_svstate
)
1417 self
.msr
= SelectableInt(initial_msr
, 64) # underlying reg
1419 # GPR FPR SPR registers
1420 initial_sprs
= deepcopy(initial_sprs
) # so as not to get modified
1421 self
.gpr
= GPR(decoder2
, self
, self
.svstate
, regfile
)
1422 self
.fpr
= GPR(decoder2
, self
, self
.svstate
, fpregfile
)
1423 # initialise SPRs before MMU
1424 self
.spr
= SPR(decoder2
, initial_sprs
, gpr
=self
.gpr
)
1426 # set up 4 dummy SVSHAPEs if they aren't already set up
1428 sname
= 'SVSHAPE%d' % i
1429 val
= self
.spr
.get(sname
, 0)
1430 # make sure it's an SVSHAPE -- conversion done by SPR.__setitem__
1431 self
.spr
[sname
] = val
1432 self
.last_op_svshape
= False
1436 self
.mem
= MemMMap(row_bytes
=8,
1437 initial_mem
=initial_mem
,
1439 emulating_mmap
=emulating_mmap
)
1440 self
.imem
= self
.mem
1441 lelf
= self
.mem
.initialize(row_bytes
=4, initial_mem
=initial_insns
)
1442 if isinstance(lelf
, LoadedELF
): # stuff parsed from ELF
1443 initial_pc
= lelf
.pc
1444 for k
, v
in lelf
.gprs
.items():
1445 self
.gpr
[k
] = SelectableInt(v
, 64)
1446 initial_fpscr
= lelf
.fpscr
1447 self
.mem
.log_fancy(kind
=LogType
.InstrInOuts
)
1449 self
.mem
= Mem(row_bytes
=8, initial_mem
=initial_mem
,
1451 self
.mem
.log_fancy(kind
=LogType
.InstrInOuts
)
1452 self
.imem
= Mem(row_bytes
=4, initial_mem
=initial_insns
)
1453 # MMU mode, redirect underlying Mem through RADIX
1455 self
.mem
= RADIX(self
.mem
, self
)
1457 self
.imem
= RADIX(self
.imem
, self
)
1459 # TODO, needed here:
1460 # FPR (same as GPR except for FP nums)
1461 # 4.2.2 p124 FPSCR (definitely "separate" - not in SPR)
1462 # note that mffs, mcrfs, mtfsf "manage" this FPSCR
1463 self
.fpscr
= FPSCRState(initial_fpscr
)
1465 # 2.3.1 CR (and sub-fields CR0..CR6 - CR0 SO comes from XER.SO)
1466 # note that mfocrf, mfcr, mtcr, mtocrf, mcrxrx "manage" CRs
1468 # 2.3.2 LR (actually SPR #8) -- Done
1469 # 2.3.3 CTR (actually SPR #9) -- Done
1470 # 2.3.4 TAR (actually SPR #815)
1471 # 3.2.2 p45 XER (actually SPR #1) -- Done
1472 # 3.2.3 p46 p232 VRSAVE (actually SPR #256)
1474 # create CR then allow portions of it to be "selectable" (below)
1475 self
.cr_fields
= CRFields(initial_cr
)
1476 self
.cr
= self
.cr_fields
.cr
1477 self
.cr_backup
= 0 # sigh, dreadful hack: for fail-first (VLi)
1479 # "undefined", just set to variable-bit-width int (use exts "max")
1480 # self.undefined = SelectableInt(0, EFFECTIVELY_UNLIMITED)
1483 self
.namespace
.update(self
.spr
)
1484 self
.namespace
.update({'GPR': self
.gpr
,
1488 'memassign': self
.memassign
,
1491 'SVSTATE': self
.svstate
,
1492 'SVSHAPE0': self
.spr
['SVSHAPE0'],
1493 'SVSHAPE1': self
.spr
['SVSHAPE1'],
1494 'SVSHAPE2': self
.spr
['SVSHAPE2'],
1495 'SVSHAPE3': self
.spr
['SVSHAPE3'],
1498 'FPSCR': self
.fpscr
,
1499 'undefined': undefined
,
1500 'mode_is_64bit': True,
1501 'SO': XER_bits
['SO'],
1502 'XLEN': 64, # elwidth overrides
1506 if real_page_size
is None:
1507 # PowerISA v3.1B Book III Section 6.7 page 1191 (1217)
1508 # defines real page size as 2 ** 12 bytes (4KiB)
1509 real_page_size
= 2 ** 12
1510 self
.real_page_size
= real_page_size
1511 self
.reserve_addr
= SelectableInt(0, self
.XLEN
)
1512 self
.reserve
= SelectableInt(0, 1)
1513 self
.reserve_length
= SelectableInt(0, 4)
1515 self
.namespace
.update({'RESERVE': self
.RESERVE
,
1516 'RESERVE_ADDR': self
.RESERVE_ADDR
,
1517 'RESERVE_LENGTH': self
.RESERVE_LENGTH
,
1518 'REAL_PAGE_SIZE': self
.REAL_PAGE_SIZE
,
1521 for name
in BFP_FLAG_NAMES
:
1522 setattr(self
, name
, 0)
1524 # update pc to requested start point
1525 self
.set_pc(initial_pc
)
1527 # field-selectable versions of Condition Register
1528 self
.crl
= self
.cr_fields
.crl
1530 self
.namespace
["CR%d" % i
] = self
.crl
[i
]
1532 self
.decoder
= decoder2
.dec
1533 self
.dec2
= decoder2
1535 super().__init
__(XLEN
=self
.namespace
["XLEN"], FPSCR
=self
.fpscr
)
1537 def trace(self
, out
):
1538 if self
.insnlog
is None:
1540 self
.insnlog
.write(out
)
1544 return self
.namespace
["XLEN"]
1551 def RESERVE_LENGTH(self
):
1552 return self
.reserve_length
1555 def RESERVE_ADDR(self
):
1556 return self
.reserve_addr
1559 def REAL_PAGE_SIZE(self
):
1560 return self
.real_page_size
1562 def real_addr(self
, EA
):
1563 """ get the "real address to which `EA` maps"
1565 Specified in PowerISA v3.1B Book II Section 1.7.2.1 page 1049 (1075)
1567 # FIXME: translate EA to a physical address
1574 def call_trap(self
, trap_addr
, trap_bit
):
1575 """calls TRAP and sets up NIA to the new execution location.
1576 next instruction will begin at trap_addr.
1578 self
.TRAP(trap_addr
, trap_bit
)
1579 self
.namespace
['NIA'] = self
.trap_nia
1580 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1582 def TRAP(self
, trap_addr
=0x700, trap_bit
=PIb
.TRAP
):
1583 """TRAP> saves PC, MSR (and TODO SVSTATE), and updates MSR
1585 TRAP function is callable from inside the pseudocode itself,
1586 hence the default arguments. when calling from inside ISACaller
1587 it is best to use call_trap()
1589 trap_addr: int | SelectableInt
1590 the address to go to (before any modifications from `KAIVB`)
1591 trap_bit: int | None
1592 the bit in `SRR1` to set, `None` means don't set any bits.
1594 if isinstance(trap_addr
, SelectableInt
):
1595 trap_addr
= trap_addr
.value
1596 # https://bugs.libre-soc.org/show_bug.cgi?id=859
1597 kaivb
= self
.spr
['KAIVB'].value
1598 msr
= self
.namespace
['MSR'].value
1599 log("TRAP:", hex(trap_addr
), hex(msr
), "kaivb", hex(kaivb
))
1600 # store CIA(+4?) in SRR0, set NIA to 0x700
1601 # store MSR in SRR1, set MSR to um errr something, have to check spec
1602 # store SVSTATE (if enabled) in SVSRR0
1603 self
.spr
['SRR0'].value
= self
.pc
.CIA
.value
1604 self
.spr
['SRR1'].value
= msr
1605 if self
.is_svp64_mode
:
1606 self
.spr
['SVSRR0'] = self
.namespace
['SVSTATE'].value
1607 self
.trap_nia
= SelectableInt(trap_addr |
(kaivb
& ~
0x1fff), 64)
1608 if trap_bit
is not None:
1609 self
.spr
['SRR1'][trap_bit
] = 1 # change *copy* of MSR in SRR1
1611 # set exception bits. TODO: this should, based on the address
1612 # in figure 66 p1065 V3.0B and the table figure 65 p1063 set these
1613 # bits appropriately. however it turns out that *for now* in all
1614 # cases (all trap_addrs) the exact same thing is needed.
1615 self
.msr
[MSRb
.IR
] = 0
1616 self
.msr
[MSRb
.DR
] = 0
1617 self
.msr
[MSRb
.FE0
] = 0
1618 self
.msr
[MSRb
.FE1
] = 0
1619 self
.msr
[MSRb
.EE
] = 0
1620 self
.msr
[MSRb
.RI
] = 0
1621 self
.msr
[MSRb
.SF
] = 1
1622 self
.msr
[MSRb
.TM
] = 0
1623 self
.msr
[MSRb
.VEC
] = 0
1624 self
.msr
[MSRb
.VSX
] = 0
1625 self
.msr
[MSRb
.PR
] = 0
1626 self
.msr
[MSRb
.FP
] = 0
1627 self
.msr
[MSRb
.PMM
] = 0
1628 self
.msr
[MSRb
.TEs
] = 0
1629 self
.msr
[MSRb
.TEe
] = 0
1630 self
.msr
[MSRb
.UND
] = 0
1631 self
.msr
[MSRb
.LE
] = 1
1633 def memassign(self
, ea
, sz
, val
):
1634 self
.mem
.memassign(ea
, sz
, val
)
1636 def prep_namespace(self
, insn_name
, info
, xlen
):
1637 # TODO: get field names from form in decoder*1* (not decoder2)
1638 # decoder2 is hand-created, and decoder1.sigform is auto-generated
1640 # then "yield" fields only from op_fields rather than hard-coded
1642 formname
, op_fields
= info
.form
, info
.op_fields
1643 fields
= self
.decoder
.sigforms
[formname
]
1644 log("prep_namespace", formname
, op_fields
, insn_name
)
1645 for name
in op_fields
:
1646 # CR immediates. deal with separately. needs modifying
1648 crlen5
= ['BC', 'BA', 'BB', 'BT', 'BI'] # 5-bit
1649 crlen3
= ['BF', 'BFA'] # 3-bit (BF: bit-field)
1650 if self
.is_svp64_mode
and name
in crlen5
:
1651 # 5-bit, must reconstruct the value
1653 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, name
)
1655 regnum
, is_vec
= yield from get_cr_in(self
.dec2
, name
)
1656 sig
= getattr(fields
, name
)
1658 # low 2 LSBs (CR field selector) remain same, CR num extended
1659 assert regnum
<= 7, "sigh, TODO, 128 CR fields"
1660 val
= (val
& 0b11) |
(regnum
<< 2)
1661 elif self
.is_svp64_mode
and name
in crlen3
:
1663 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, name
)
1665 regnum
, is_vec
= yield from get_cr_in(self
.dec2
, name
)
1666 log('hack %s' % name
, regnum
, is_vec
)
1669 sig
= getattr(fields
, name
)
1671 # these are all opcode fields involved in index-selection of CR,
1672 # and need to do "standard" arithmetic. CR[BA+32] for example
1673 # would, if using SelectableInt, only be 5-bit.
1674 if name
not in crlen3
and name
not in crlen5
:
1675 val
= SelectableInt(val
, sig
.width
)
1677 # finally put the field into the namespace
1678 self
.namespace
[name
] = val
1680 self
.namespace
['XER'] = self
.spr
['XER']
1681 self
.namespace
['CA'] = self
.spr
['XER'][XER_bits
['CA']].value
1682 self
.namespace
['CA32'] = self
.spr
['XER'][XER_bits
['CA32']].value
1683 self
.namespace
['OV'] = self
.spr
['XER'][XER_bits
['OV']].value
1684 self
.namespace
['OV32'] = self
.spr
['XER'][XER_bits
['OV32']].value
1685 self
.namespace
['XLEN'] = xlen
1686 self
.namespace
['RESERVE'] = self
.reserve
1687 self
.namespace
['RESERVE_ADDR'] = self
.reserve_addr
1688 self
.namespace
['RESERVE_LENGTH'] = self
.reserve_length
1690 # add some SVSTATE convenience variables
1691 vl
= self
.svstate
.vl
1692 srcstep
= self
.svstate
.srcstep
1693 self
.namespace
['VL'] = vl
1694 self
.namespace
['srcstep'] = srcstep
1696 # take a copy of the CR field value: if non-VLi fail-first fails
1697 # this is because the pseudocode writes *directly* to CR. sigh
1698 self
.cr_backup
= self
.cr
.value
1700 # sv.bc* need some extra fields
1701 if not self
.is_svp64_mode
or not insn_name
.startswith("sv.bc"):
1704 # blegh grab bits manually
1705 mode
= yield self
.dec2
.rm_dec
.rm_in
.mode
1706 # convert to SelectableInt before test
1707 mode
= SelectableInt(mode
, 5)
1708 bc_vlset
= mode
[SVP64MODEb
.BC_VLSET
] != 0
1709 bc_vli
= mode
[SVP64MODEb
.BC_VLI
] != 0
1710 bc_snz
= mode
[SVP64MODEb
.BC_SNZ
] != 0
1711 bc_vsb
= yield self
.dec2
.rm_dec
.bc_vsb
1712 bc_ctrtest
= yield self
.dec2
.rm_dec
.bc_ctrtest
1713 bc_lru
= yield self
.dec2
.rm_dec
.bc_lru
1714 bc_gate
= yield self
.dec2
.rm_dec
.bc_gate
1715 sz
= yield self
.dec2
.rm_dec
.pred_sz
1716 self
.namespace
['mode'] = SelectableInt(mode
, 5)
1717 self
.namespace
['ALL'] = SelectableInt(bc_gate
, 1)
1718 self
.namespace
['VSb'] = SelectableInt(bc_vsb
, 1)
1719 self
.namespace
['LRu'] = SelectableInt(bc_lru
, 1)
1720 self
.namespace
['CTRtest'] = SelectableInt(bc_ctrtest
, 1)
1721 self
.namespace
['VLSET'] = SelectableInt(bc_vlset
, 1)
1722 self
.namespace
['VLI'] = SelectableInt(bc_vli
, 1)
1723 self
.namespace
['sz'] = SelectableInt(sz
, 1)
1724 self
.namespace
['SNZ'] = SelectableInt(bc_snz
, 1)
1726 def get_kludged_op_add_ca_ov(self
, inputs
, inp_ca_ov
):
1727 """ this was not at all necessary to do. this function massively
1728 duplicates - in a laborious and complex fashion - the contents of
1729 the CSV files that were extracted two years ago from microwatt's
1730 source code. A-inversion is the "inv A" column, output inversion
1731 is the "inv out" column, carry-in equal to 0 or 1 or CA is the
1734 all of that information is available in
1735 self.instrs[ins_name].op_fields
1736 where info is usually assigned to self.instrs[ins_name]
1738 https://git.libre-soc.org/?p=openpower-isa.git;a=blob;f=openpower/isatables/minor_31.csv;hb=HEAD
1740 the immediate constants are *also* decoded correctly and placed
1741 usually by DecodeIn2Imm into operand2, as part of power_decoder2.py
1743 def ca(a
, b
, ca_in
, width
):
1744 mask
= (1 << width
) - 1
1745 y
= (a
& mask
) + (b
& mask
) + ca_in
1748 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1749 insn
= insns
.get(asmcode
)
1750 SI
= yield self
.dec2
.dec
.SI
1753 inputs
= [i
.value
for i
in inputs
]
1756 if insn
in ("add", "addo", "addc", "addco"):
1760 elif insn
== "addic" or insn
== "addic.":
1764 elif insn
in ("subf", "subfo", "subfc", "subfco"):
1768 elif insn
== "subfic":
1772 elif insn
== "adde" or insn
== "addeo":
1776 elif insn
== "subfe" or insn
== "subfeo":
1780 elif insn
== "addme" or insn
== "addmeo":
1784 elif insn
== "addze" or insn
== "addzeo":
1788 elif insn
== "subfme" or insn
== "subfmeo":
1792 elif insn
== "subfze" or insn
== "subfzeo":
1796 elif insn
== "addex":
1797 # CA[32] aren't actually written, just generate so we have
1798 # something to return
1799 ca64
= ov64
= ca(inputs
[0], inputs
[1], OV
, 64)
1800 ca32
= ov32
= ca(inputs
[0], inputs
[1], OV
, 32)
1801 return ca64
, ca32
, ov64
, ov32
1802 elif insn
== "neg" or insn
== "nego":
1807 raise NotImplementedError(
1808 "op_add kludge unimplemented instruction: ", asmcode
, insn
)
1810 ca64
= ca(a
, b
, ca_in
, 64)
1811 ca32
= ca(a
, b
, ca_in
, 32)
1812 ov64
= ca64
!= ca(a
, b
, ca_in
, 63)
1813 ov32
= ca32
!= ca(a
, b
, ca_in
, 31)
1814 return ca64
, ca32
, ov64
, ov32
1816 def handle_carry_(self
, inputs
, output
, ca
, ca32
, inp_ca_ov
):
1817 if ca
is not None and ca32
is not None:
1819 op
= yield self
.dec2
.e
.do
.insn_type
1820 if op
== MicrOp
.OP_ADD
.value
and ca
is None and ca32
is None:
1821 retval
= yield from self
.get_kludged_op_add_ca_ov(
1823 ca
, ca32
, ov
, ov32
= retval
1824 asmcode
= yield self
.dec2
.dec
.op
.asmcode
1825 if insns
.get(asmcode
) == 'addex':
1826 # TODO: if 32-bit mode, set ov to ov32
1827 self
.spr
['XER'][XER_bits
['OV']] = ov
1828 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1829 log(f
"write OV/OV32 OV={ov} OV32={ov32}",
1830 kind
=LogType
.InstrInOuts
)
1832 # TODO: if 32-bit mode, set ca to ca32
1833 self
.spr
['XER'][XER_bits
['CA']] = ca
1834 self
.spr
['XER'][XER_bits
['CA32']] = ca32
1835 log(f
"write CA/CA32 CA={ca} CA32={ca32}",
1836 kind
=LogType
.InstrInOuts
)
1838 inv_a
= yield self
.dec2
.e
.do
.invert_in
1840 inputs
[0] = ~inputs
[0]
1842 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1844 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1845 inputs
.append(SelectableInt(imm
, 64))
1848 log("gt input", x
, output
)
1849 gt
= (gtu(x
, output
))
1852 cy
= 1 if any(gts
) else 0
1854 if ca
is None: # already written
1855 self
.spr
['XER'][XER_bits
['CA']] = cy
1858 # ARGH... different for OP_ADD... *sigh*...
1859 op
= yield self
.dec2
.e
.do
.insn_type
1860 if op
== MicrOp
.OP_ADD
.value
:
1861 res32
= (output
.value
& (1 << 32)) != 0
1862 a32
= (inputs
[0].value
& (1 << 32)) != 0
1863 if len(inputs
) >= 2:
1864 b32
= (inputs
[1].value
& (1 << 32)) != 0
1867 cy32
= res32 ^ a32 ^ b32
1868 log("CA32 ADD", cy32
)
1872 log("input", x
, output
)
1873 log(" x[32:64]", x
, x
[32:64])
1874 log(" o[32:64]", output
, output
[32:64])
1875 gt
= (gtu(x
[32:64], output
[32:64])) == SelectableInt(1, 1)
1877 cy32
= 1 if any(gts
) else 0
1878 log("CA32", cy32
, gts
)
1879 if ca32
is None: # already written
1880 self
.spr
['XER'][XER_bits
['CA32']] = cy32
1882 def handle_overflow(self
, inputs
, output
, div_overflow
, inp_ca_ov
):
1883 op
= yield self
.dec2
.e
.do
.insn_type
1884 if op
== MicrOp
.OP_ADD
.value
:
1885 retval
= yield from self
.get_kludged_op_add_ca_ov(
1887 ca
, ca32
, ov
, ov32
= retval
1888 # TODO: if 32-bit mode, set ov to ov32
1889 self
.spr
['XER'][XER_bits
['OV']] = ov
1890 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1891 self
.spr
['XER'][XER_bits
['SO']] |
= ov
1893 if hasattr(self
.dec2
.e
.do
, "invert_in"):
1894 inv_a
= yield self
.dec2
.e
.do
.invert_in
1896 inputs
[0] = ~inputs
[0]
1898 imm_ok
= yield self
.dec2
.e
.do
.imm_data
.ok
1900 imm
= yield self
.dec2
.e
.do
.imm_data
.data
1901 inputs
.append(SelectableInt(imm
, 64))
1902 log("handle_overflow", inputs
, output
, div_overflow
)
1903 if len(inputs
) < 2 and div_overflow
is None:
1906 # div overflow is different: it's returned by the pseudo-code
1907 # because it's more complex than can be done by analysing the output
1908 if div_overflow
is not None:
1909 ov
, ov32
= div_overflow
, div_overflow
1910 # arithmetic overflow can be done by analysing the input and output
1911 elif len(inputs
) >= 2:
1913 input_sgn
= [exts(x
.value
, x
.bits
) < 0 for x
in inputs
]
1914 output_sgn
= exts(output
.value
, output
.bits
) < 0
1915 ov
= 1 if input_sgn
[0] == input_sgn
[1] and \
1916 output_sgn
!= input_sgn
[0] else 0
1919 input32_sgn
= [exts(x
.value
, 32) < 0 for x
in inputs
]
1920 output32_sgn
= exts(output
.value
, 32) < 0
1921 ov32
= 1 if input32_sgn
[0] == input32_sgn
[1] and \
1922 output32_sgn
!= input32_sgn
[0] else 0
1924 # now update XER OV/OV32/SO
1925 so
= self
.spr
['XER'][XER_bits
['SO']]
1926 new_so
= so | ov
# sticky overflow ORs in old with new
1927 self
.spr
['XER'][XER_bits
['OV']] = ov
1928 self
.spr
['XER'][XER_bits
['OV32']] = ov32
1929 self
.spr
['XER'][XER_bits
['SO']] = new_so
1930 log(" set overflow", ov
, ov32
, so
, new_so
)
1932 def handle_comparison(self
, out
, cr_idx
=0, overflow
=None, no_so
=False):
1933 assert isinstance(out
, SelectableInt
), \
1934 "out zero not a SelectableInt %s" % repr(outputs
)
1935 log("handle_comparison", out
.bits
, hex(out
.value
))
1936 # TODO - XXX *processor* in 32-bit mode
1937 # https://bugs.libre-soc.org/show_bug.cgi?id=424
1939 # o32 = exts(out.value, 32)
1940 # print ("handle_comparison exts 32 bit", hex(o32))
1941 out
= exts(out
.value
, out
.bits
)
1942 log("handle_comparison exts", hex(out
))
1943 # create the three main CR flags, EQ GT LT
1944 zero
= SelectableInt(out
== 0, 1)
1945 positive
= SelectableInt(out
> 0, 1)
1946 negative
= SelectableInt(out
< 0, 1)
1947 # get (or not) XER.SO. for setvl this is important *not* to read SO
1949 SO
= SelectableInt(1, 0)
1951 SO
= self
.spr
['XER'][XER_bits
['SO']]
1952 log("handle_comparison SO", SO
.value
,
1953 "overflow", overflow
,
1955 "+ve", positive
.value
,
1956 "-ve", negative
.value
)
1957 # alternative overflow checking (setvl mainly at the moment)
1958 if overflow
is not None and overflow
== 1:
1959 SO
= SelectableInt(1, 1)
1960 # create the four CR field values and set the required CR field
1961 cr_field
= selectconcat(negative
, positive
, zero
, SO
)
1962 log("handle_comparison cr_field", self
.cr
, cr_idx
, cr_field
)
1963 self
.crl
[cr_idx
].eq(cr_field
)
1966 def set_pc(self
, pc_val
):
1967 self
.namespace
['NIA'] = SelectableInt(pc_val
, 64)
1968 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
1970 def get_next_insn(self
):
1971 """check instruction
1974 pc
= self
.pc
.CIA
.value
1977 ins
= self
.imem
.ld(pc
, 4, False, True, instr_fetch
=True)
1979 raise KeyError("no instruction at 0x%x" % pc
)
1982 def setup_one(self
):
1983 """set up one instruction
1985 pc
, insn
= self
.get_next_insn()
1986 yield from self
.setup_next_insn(pc
, insn
)
1988 # cache since it's really slow to construct
1989 __PREFIX_CACHE
= SVP64Instruction
.Prefix(SelectableInt(value
=0, bits
=32))
1991 def __decode_prefix(self
, opcode
):
1992 pfx
= self
.__PREFIX
_CACHE
1993 pfx
.storage
.eq(opcode
)
1996 def setup_next_insn(self
, pc
, ins
):
1997 """set up next instruction
2000 log("setup: 0x%x 0x%x %s" % (pc
, ins
& 0xffffffff, bin(ins
)))
2001 log("CIA NIA", self
.respect_pc
, self
.pc
.CIA
.value
, self
.pc
.NIA
.value
)
2003 yield self
.dec2
.sv_rm
.eq(0)
2004 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff)
2005 yield self
.dec2
.dec
.bigendian
.eq(self
.bigendian
)
2006 yield self
.dec2
.state
.msr
.eq(self
.msr
.value
)
2007 yield self
.dec2
.state
.pc
.eq(pc
)
2008 if self
.svstate
is not None:
2009 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2011 # SVP64. first, check if the opcode is EXT001, and SVP64 id bits set
2013 opcode
= yield self
.dec2
.dec
.opcode_in
2014 opcode
= SelectableInt(value
=opcode
, bits
=32)
2015 pfx
= self
.__decode
_prefix
(opcode
)
2016 log("prefix test: opcode:", pfx
.PO
, bin(pfx
.PO
), pfx
.id)
2017 self
.is_svp64_mode
= bool((pfx
.PO
== 0b000001) and (pfx
.id == 0b11))
2018 self
.pc
.update_nia(self
.is_svp64_mode
)
2020 yield self
.dec2
.is_svp64_mode
.eq(self
.is_svp64_mode
)
2021 self
.namespace
['NIA'] = self
.pc
.NIA
2022 self
.namespace
['SVSTATE'] = self
.svstate
2023 if not self
.is_svp64_mode
:
2026 # in SVP64 mode. decode/print out svp64 prefix, get v3.0B instruction
2027 log("svp64.rm", bin(pfx
.rm
))
2028 log(" svstate.vl", self
.svstate
.vl
)
2029 log(" svstate.mvl", self
.svstate
.maxvl
)
2030 ins
= self
.imem
.ld(pc
+4, 4, False, True, instr_fetch
=True)
2031 log(" svsetup: 0x%x 0x%x %s" % (pc
+4, ins
& 0xffffffff, bin(ins
)))
2032 yield self
.dec2
.dec
.raw_opcode_in
.eq(ins
& 0xffffffff) # v3.0B suffix
2033 yield self
.dec2
.sv_rm
.eq(int(pfx
.rm
)) # svp64 prefix
2036 def execute_one(self
):
2037 """execute one instruction
2039 # get the disassembly code for this instruction
2040 if not self
.disassembly
:
2041 code
= yield from self
.get_assembly_name()
2044 if self
.is_svp64_mode
:
2045 offs
, dbg
= 4, "svp64 "
2046 code
= self
.disassembly
[self
._pc
+offs
]
2047 log(" %s sim-execute" % dbg
, hex(self
._pc
), code
)
2048 opname
= code
.split(' ')[0]
2050 yield from self
.call(opname
) # execute the instruction
2051 except MemException
as e
: # check for memory errors
2052 if e
.args
[0] == 'unaligned': # alignment error
2053 # run a Trap but set DAR first
2054 print("memory unaligned exception, DAR", e
.dar
, repr(e
))
2055 self
.spr
['DAR'] = SelectableInt(e
.dar
, 64)
2056 self
.call_trap(0x600, PIb
.PRIV
) # 0x600, privileged
2058 elif e
.args
[0] == 'invalid': # invalid
2059 # run a Trap but set DAR first
2060 log("RADIX MMU memory invalid error, mode %s" % e
.mode
)
2061 if e
.mode
== 'EXECUTE':
2062 # XXX TODO: must set a few bits in SRR1,
2063 # see microwatt loadstore1.vhdl
2064 # if m_in.segerr = '0' then
2065 # v.srr1(47 - 33) := m_in.invalid;
2066 # v.srr1(47 - 35) := m_in.perm_error; -- noexec fault
2067 # v.srr1(47 - 44) := m_in.badtree;
2068 # v.srr1(47 - 45) := m_in.rc_error;
2069 # v.intr_vec := 16#400#;
2071 # v.intr_vec := 16#480#;
2072 self
.call_trap(0x400, PIb
.PRIV
) # 0x400, privileged
2074 self
.call_trap(0x300, PIb
.PRIV
) # 0x300, privileged
2076 # not supported yet:
2077 raise e
# ... re-raise
2079 # append to the trace log file
2080 self
.trace(" # %s\n" % code
)
2082 log("gprs after code", code
)
2085 for i
in range(len(self
.crl
)):
2086 crs
.append(bin(self
.crl
[i
].asint()))
2087 log("crs", " ".join(crs
))
2088 log("vl,maxvl", self
.svstate
.vl
, self
.svstate
.maxvl
)
2090 # don't use this except in special circumstances
2091 if not self
.respect_pc
:
2094 log("execute one, CIA NIA", hex(self
.pc
.CIA
.value
),
2095 hex(self
.pc
.NIA
.value
))
2097 def get_assembly_name(self
):
2098 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
2099 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
2100 dec_insn
= yield self
.dec2
.e
.do
.insn
2101 insn_1_11
= yield self
.dec2
.e
.do
.insn
[1:11]
2102 asmcode
= yield self
.dec2
.dec
.op
.asmcode
2103 int_op
= yield self
.dec2
.dec
.op
.internal_op
2104 log("get assembly name asmcode", asmcode
, int_op
,
2105 hex(dec_insn
), bin(insn_1_11
))
2106 asmop
= insns
.get(asmcode
, None)
2108 # sigh reconstruct the assembly instruction name
2109 if hasattr(self
.dec2
.e
.do
, "oe"):
2110 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
2111 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
2115 if hasattr(self
.dec2
.e
.do
, "rc"):
2116 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
2117 rc_ok
= yield self
.dec2
.e
.do
.rc
.ok
2121 # annoying: ignore rc_ok if RC1 is set (for creating *assembly name*)
2122 RC1
= yield self
.dec2
.rm_dec
.RC1
2126 # grrrr have to special-case MUL op (see DecodeOE)
2127 log("ov %d en %d rc %d en %d op %d" %
2128 (ov_ok
, ov_en
, rc_ok
, rc_en
, int_op
))
2129 if int_op
in [MicrOp
.OP_MUL_H64
.value
, MicrOp
.OP_MUL_H32
.value
]:
2134 if not asmop
.endswith("."): # don't add "." to "andis."
2137 if hasattr(self
.dec2
.e
.do
, "lk"):
2138 lk
= yield self
.dec2
.e
.do
.lk
2141 log("int_op", int_op
)
2142 if int_op
in [MicrOp
.OP_B
.value
, MicrOp
.OP_BC
.value
]:
2143 AA
= yield self
.dec2
.dec
.fields
.FormI
.AA
[0:-1]
2147 spr_msb
= yield from self
.get_spr_msb()
2148 if int_op
== MicrOp
.OP_MFCR
.value
:
2153 # XXX TODO: for whatever weird reason this doesn't work
2154 # https://bugs.libre-soc.org/show_bug.cgi?id=390
2155 if int_op
== MicrOp
.OP_MTCRF
.value
:
2162 def reset_remaps(self
):
2163 self
.remap_loopends
= [0] * 4
2164 self
.remap_idxs
= [0, 1, 2, 3]
2166 def get_remap_indices(self
):
2167 """WARNING, this function stores remap_idxs and remap_loopends
2168 in the class for later use. this to avoid problems with yield
2170 # go through all iterators in lock-step, advance to next remap_idx
2171 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
2172 # get four SVSHAPEs. here we are hard-coding
2174 SVSHAPE0
= self
.spr
['SVSHAPE0']
2175 SVSHAPE1
= self
.spr
['SVSHAPE1']
2176 SVSHAPE2
= self
.spr
['SVSHAPE2']
2177 SVSHAPE3
= self
.spr
['SVSHAPE3']
2178 # set up the iterators
2179 remaps
= [(SVSHAPE0
, SVSHAPE0
.get_iterator()),
2180 (SVSHAPE1
, SVSHAPE1
.get_iterator()),
2181 (SVSHAPE2
, SVSHAPE2
.get_iterator()),
2182 (SVSHAPE3
, SVSHAPE3
.get_iterator()),
2186 for i
, (shape
, remap
) in enumerate(remaps
):
2187 # zero is "disabled"
2188 if shape
.value
== 0x0:
2189 self
.remap_idxs
[i
] = 0
2190 # pick src or dststep depending on reg num (0-2=in, 3-4=out)
2191 step
= dststep
if (i
in [3, 4]) else srcstep
2192 # this is terrible. O(N^2) looking for the match. but hey.
2193 for idx
, (remap_idx
, loopends
) in enumerate(remap
):
2196 self
.remap_idxs
[i
] = remap_idx
2197 self
.remap_loopends
[i
] = loopends
2198 dbg
.append((i
, step
, remap_idx
, loopends
))
2199 for (i
, step
, remap_idx
, loopends
) in dbg
:
2200 log("SVSHAPE %d idx, end" % i
, step
, remap_idx
, bin(loopends
))
2203 def get_spr_msb(self
):
2204 dec_insn
= yield self
.dec2
.e
.do
.insn
2205 return dec_insn
& (1 << 20) != 0 # sigh - XFF.spr[-1]?
2207 def call(self
, name
, syscall_emu_active
=False):
2208 """call(opcode) - the primary execution point for instructions
2210 self
.last_st_addr
= None # reset the last known store address
2211 self
.last_ld_addr
= None # etc.
2213 ins_name
= name
.strip() # remove spaces if not already done so
2215 log("halted - not executing", ins_name
)
2218 # TODO, asmregs is from the spec, e.g. add RT,RA,RB
2219 # see http://bugs.libre-riscv.org/show_bug.cgi?id=282
2220 asmop
= yield from self
.get_assembly_name()
2221 log("call", ins_name
, asmop
,
2222 kind
=LogType
.InstrInOuts
)
2224 # sv.setvl is *not* a loop-function. sigh
2225 log("is_svp64_mode", self
.is_svp64_mode
, asmop
)
2228 int_op
= yield self
.dec2
.dec
.op
.internal_op
2229 spr_msb
= yield from self
.get_spr_msb()
2231 instr_is_privileged
= False
2232 if int_op
in [MicrOp
.OP_ATTN
.value
,
2233 MicrOp
.OP_MFMSR
.value
,
2234 MicrOp
.OP_MTMSR
.value
,
2235 MicrOp
.OP_MTMSRD
.value
,
2237 MicrOp
.OP_RFID
.value
]:
2238 instr_is_privileged
= True
2239 if int_op
in [MicrOp
.OP_MFSPR
.value
,
2240 MicrOp
.OP_MTSPR
.value
] and spr_msb
:
2241 instr_is_privileged
= True
2243 # check MSR priv bit and whether op is privileged: if so, throw trap
2244 PR
= self
.msr
[MSRb
.PR
]
2245 log("is priv", instr_is_privileged
, hex(self
.msr
.value
), PR
)
2246 if instr_is_privileged
and PR
== 1:
2247 self
.call_trap(0x700, PIb
.PRIV
)
2250 # check halted condition
2251 if ins_name
== 'attn':
2255 # User mode system call emulation consists of several steps:
2256 # 1. Detect whether instruction is sc or scv.
2257 # 2. Call the HDL implementation which invokes trap.
2258 # 3. Reroute the guest system call to host system call.
2259 # 4. Force return from the interrupt as if we had guest OS.
2260 # FIXME: enable PPC_FEATURE2_SCV in mem.py DEFAULT_AT_HWCAP2 when
2261 # scv emulation works.
2262 if ((asmop
in ("sc", "scv")) and
2263 (self
.syscall
is not None) and
2264 not syscall_emu_active
):
2265 # Memoize PC and trigger an interrupt
2267 pc
= self
.pc
.CIA
.value
2270 yield from self
.call(asmop
, syscall_emu_active
=True)
2272 # Reroute the syscall to host OS
2273 identifier
= self
.gpr(0)
2274 arguments
= map(self
.gpr
, range(3, 9))
2275 result
= self
.syscall(identifier
, *arguments
)
2276 self
.gpr
.write(3, result
, False, self
.namespace
["XLEN"])
2278 # Return from interrupt
2279 yield from self
.call("rfid", syscall_emu_active
=True)
2281 elif ((name
in ("rfid", "hrfid")) and syscall_emu_active
):
2284 # check illegal instruction
2286 if ins_name
not in ['mtcrf', 'mtocrf']:
2287 illegal
= ins_name
!= asmop
2289 # list of instructions not being supported by binutils (.long)
2290 dotstrp
= asmop
[:-1] if asmop
[-1] == '.' else asmop
2291 if dotstrp
in [*FPTRANS_INSNS
,
2293 'ffmadds', 'fdmadds', 'ffadds',
2295 "brh", "brw", "brd",
2296 'setvl', 'svindex', 'svremap', 'svstep',
2297 'svshape', 'svshape2',
2298 'binlog', 'crbinlog', 'crfbinlog',
2299 'crternlogi', 'crfternlogi', 'ternlogi',
2300 'bmask', 'cprop', 'gbbd',
2301 'absdu', 'absds', 'absdacs', 'absdacu', 'avgadd',
2302 'fmvis', 'fishmv', 'pcdec', "maddedu", "divmod2du",
2303 "dsld", "dsrd", "maddedus",
2304 "sadd", "saddw", "sadduw",
2309 "maddsubrs", "maddrs", "msubrs",
2310 "cfuged", "cntlzdm", "cnttzdm", "pdepd", "pextd",
2311 "setbc", "setbcr", "setnbc", "setnbcr",
2316 # match against instructions treated as nop, see nop below
2317 if asmop
.startswith("dcbt"):
2321 # branch-conditional redirects to sv.bc
2322 if asmop
.startswith('bc') and self
.is_svp64_mode
:
2323 ins_name
= 'sv.%s' % ins_name
2325 # ld-immediate-with-pi mode redirects to ld-with-postinc
2326 ldst_imm_postinc
= False
2327 if 'u' in ins_name
and self
.is_svp64_mode
:
2328 ldst_pi
= yield self
.dec2
.rm_dec
.ldst_postinc
2330 ins_name
= ins_name
.replace("u", "up")
2331 ldst_imm_postinc
= True
2332 log(" enable ld/st postinc", ins_name
)
2334 log(" post-processed name", dotstrp
, ins_name
, asmop
)
2336 # illegal instructions call TRAP at 0x700
2338 print("illegal", ins_name
, asmop
)
2339 self
.call_trap(0x700, PIb
.ILLEG
)
2340 print("name %s != %s - calling ILLEGAL trap, PC: %x" %
2341 (ins_name
, asmop
, self
.pc
.CIA
.value
))
2344 # this is for setvl "Vertical" mode: if set true,
2345 # srcstep/dststep is explicitly advanced. mode says which SVSTATE to
2346 # test for Rc=1 end condition. 3 bits of all 3 loops are put into CR0
2347 self
.allow_next_step_inc
= False
2348 self
.svstate_next_mode
= 0
2350 # nop has to be supported, we could let the actual op calculate
2351 # but PowerDecoder has a pattern for nop
2352 if ins_name
== 'nop':
2353 self
.update_pc_next()
2356 # get elwidths, defaults to 64
2360 if self
.is_svp64_mode
:
2361 ew_src
= yield self
.dec2
.rm_dec
.ew_src
2362 ew_dst
= yield self
.dec2
.rm_dec
.ew_dst
2363 ew_src
= 8 << (3-int(ew_src
)) # convert to bitlength
2364 ew_dst
= 8 << (3-int(ew_dst
)) # convert to bitlength
2365 xlen
= max(ew_src
, ew_dst
)
2366 log("elwidth", ew_src
, ew_dst
)
2367 log("XLEN:", self
.is_svp64_mode
, xlen
)
2369 # look up instruction in ISA.instrs, prepare namespace
2370 if ins_name
== 'pcdec': # grrrr yes there are others ("stbcx." etc.)
2371 info
= self
.instrs
[ins_name
+"."]
2372 elif asmop
[-1] == '.' and asmop
in self
.instrs
:
2373 info
= self
.instrs
[asmop
]
2375 info
= self
.instrs
[ins_name
]
2376 yield from self
.prep_namespace(ins_name
, info
, xlen
)
2378 # dict retains order
2379 inputs
= dict.fromkeys(create_full_args(
2380 read_regs
=info
.read_regs
, special_regs
=info
.special_regs
,
2381 uninit_regs
=info
.uninit_regs
, write_regs
=info
.write_regs
))
2383 # preserve order of register names
2384 write_without_special_regs
= OrderedSet(info
.write_regs
)
2385 write_without_special_regs
-= OrderedSet(info
.special_regs
)
2386 input_names
= create_args([
2387 *info
.read_regs
, *info
.uninit_regs
, *write_without_special_regs
])
2388 log("input names", input_names
)
2390 # get SVP64 entry for the current instruction
2391 sv_rm
= self
.svp64rm
.instrs
.get(ins_name
)
2392 if sv_rm
is not None:
2393 dest_cr
, src_cr
, src_byname
, dest_byname
= decode_extra(sv_rm
)
2395 dest_cr
, src_cr
, src_byname
, dest_byname
= False, False, {}, {}
2396 log("sv rm", sv_rm
, dest_cr
, src_cr
, src_byname
, dest_byname
)
2398 # see if srcstep/dststep need skipping over masked-out predicate bits
2399 # svstep also needs advancement because it calls SVSTATE_NEXT.
2400 # bit the remaps get computed just after pre_inc moves them on
2401 # with remap_set_steps substituting for PowerDecider2 not doing it,
2402 # and SVSTATE_NEXT not being able to.use yield, the preinc on
2403 # svstep is necessary for now.
2405 if (self
.is_svp64_mode
or ins_name
in ['svstep']):
2406 yield from self
.svstate_pre_inc()
2407 if self
.is_svp64_mode
:
2408 pre
= yield from self
.update_new_svstate_steps()
2410 self
.svp64_reset_loop()
2412 self
.update_pc_next()
2414 srcstep
, dststep
, ssubstep
, dsubstep
= self
.get_src_dststeps()
2415 pred_dst_zero
= self
.pred_dst_zero
2416 pred_src_zero
= self
.pred_src_zero
2417 vl
= self
.svstate
.vl
2418 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2420 # VL=0 in SVP64 mode means "do nothing: skip instruction"
2421 if self
.is_svp64_mode
and vl
== 0:
2422 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
2423 log("SVP64: VL=0, end of call", self
.namespace
['CIA'],
2424 self
.namespace
['NIA'], kind
=LogType
.InstrInOuts
)
2427 # for when SVREMAP is active, using pre-arranged schedule.
2428 # note: modifying PowerDecoder2 needs to "settle"
2429 remap_en
= self
.svstate
.SVme
2430 persist
= self
.svstate
.RMpst
2431 active
= (persist
or self
.last_op_svshape
) and remap_en
!= 0
2432 if self
.is_svp64_mode
:
2433 yield self
.dec2
.remap_active
.eq(remap_en
if active
else 0)
2435 if persist
or self
.last_op_svshape
:
2436 remaps
= self
.get_remap_indices()
2437 if self
.is_svp64_mode
and (persist
or self
.last_op_svshape
):
2438 yield from self
.remap_set_steps(remaps
)
2439 # after that, settle down (combinatorial) to let Vector reg numbers
2440 # work themselves out
2442 if self
.is_svp64_mode
:
2443 remap_active
= yield self
.dec2
.remap_active
2445 remap_active
= False
2446 log("remap active", bin(remap_active
), self
.is_svp64_mode
)
2448 # LDST does *not* allow elwidth overrides on RA (Effective Address).
2449 # this has to be detected. XXX TODO: RB for ldst-idx *may* need
2450 # conversion (to 64-bit) also.
2451 # see write reg this *HAS* to also override XLEN to 64 on LDST/Update
2452 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2453 is_ldst
= (sv_mode
in [SVMode
.LDST_IDX
.value
, SVMode
.LDST_IMM
.value
] \
2454 and self
.is_svp64_mode
)
2455 log("is_ldst", sv_mode
, is_ldst
)
2457 # main input registers (RT, RA ...)
2458 for name
in input_names
:
2459 if name
== "overflow":
2460 inputs
[name
] = SelectableInt(0, 1)
2461 elif name
.startswith("RESERVE"):
2462 inputs
[name
] = getattr(self
, name
)
2463 elif name
== "FPSCR":
2464 inputs
[name
] = self
.FPSCR
2465 elif name
in ("CA", "CA32", "OV", "OV32"):
2466 inputs
[name
] = self
.spr
['XER'][XER_bits
[name
]]
2468 inputs
[name
] = self
.crl
[0]
2469 elif name
in spr_byname
:
2470 inputs
[name
] = self
.spr
[name
]
2471 elif is_ldst
and name
== 'RA':
2472 regval
= (yield from self
.get_input(name
, ew_src
, 64))
2473 log("EA (RA) regval name", name
, regval
)
2474 inputs
[name
] = regval
2476 regval
= (yield from self
.get_input(name
, ew_src
, xlen
))
2477 log("regval name", name
, regval
)
2478 inputs
[name
] = regval
2480 # arrrrgh, awful hack, to get _RT into namespace
2481 if ins_name
in ['setvl', 'svstep']:
2483 RT
= yield self
.dec2
.dec
.RT
2484 self
.namespace
[regname
] = SelectableInt(RT
, 5)
2486 self
.namespace
["RT"] = SelectableInt(0, 5)
2487 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, "RT")
2488 log('hack input reg %s %s' % (name
, str(regnum
)), is_vec
)
2490 # in SVP64 mode for LD/ST work out immediate
2491 # XXX TODO: replace_ds for DS-Form rather than D-Form.
2492 # use info.form to detect
2493 if self
.is_svp64_mode
and not ldst_imm_postinc
:
2494 yield from self
.check_replace_d(info
, remap_active
)
2496 # "special" registers
2497 for special
in info
.special_regs
:
2498 if special
in special_sprs
:
2499 inputs
[special
] = self
.spr
[special
]
2501 inputs
[special
] = self
.namespace
[special
]
2503 # clear trap (trap) NIA
2504 self
.trap_nia
= None
2506 # check if this was an sv.bc* and create an indicator that
2507 # this is the last check to be made as a loop. combined with
2508 # the ALL/ANY mode we can early-exit. note that BI (to test)
2509 # is an input so there is no termination if BI is scalar
2510 # (because early-termination is for *output* scalars)
2511 if self
.is_svp64_mode
and ins_name
.startswith("sv.bc"):
2512 end_loop
= srcstep
== vl
-1 or dststep
== vl
-1
2513 self
.namespace
['end_loop'] = SelectableInt(end_loop
, 1)
2515 inp_ca_ov
= (self
.spr
['XER'][XER_bits
['CA']].value
,
2516 self
.spr
['XER'][XER_bits
['OV']].value
)
2518 for k
, v
in inputs
.items():
2520 v
= SelectableInt(0, self
.XLEN
)
2521 # prevent pseudo-code from modifying input registers
2522 v
= copy_assign_rhs(v
)
2523 if isinstance(v
, SelectableInt
):
2527 # execute actual instruction here (finally)
2528 log("inputs", inputs
)
2529 inputs
= list(inputs
.values())
2530 results
= info
.func(self
, *inputs
)
2531 output_names
= create_args(info
.write_regs
)
2533 # record .ok before anything after the pseudo-code can modify it
2535 for out
, n
in zip(results
or [], output_names
):
2538 if isinstance(out
, SelectableInt
):
2540 log("results", outs
)
2541 log("results ok", outs_ok
)
2543 # "inject" decorator takes namespace from function locals: we need to
2544 # overwrite NIA being overwritten (sigh)
2545 if self
.trap_nia
is not None:
2546 self
.namespace
['NIA'] = self
.trap_nia
2548 log("after func", self
.namespace
['CIA'], self
.namespace
['NIA'])
2550 # check if op was a LD/ST so that debugging can check the
2552 if int_op
in [MicrOp
.OP_STORE
.value
,
2554 self
.last_st_addr
= self
.mem
.last_st_addr
2555 if int_op
in [MicrOp
.OP_LOAD
.value
,
2557 self
.last_ld_addr
= self
.mem
.last_ld_addr
2558 log("op", int_op
, MicrOp
.OP_STORE
.value
, MicrOp
.OP_LOAD
.value
,
2559 self
.last_st_addr
, self
.last_ld_addr
)
2561 # detect if CA/CA32 already in outputs (sra*, basically)
2563 ca32
= outs
.get("CA32")
2565 log("carry already done?", ca
, ca32
, output_names
)
2566 # soc test_pipe_caller tests don't have output_carry
2567 has_output_carry
= hasattr(self
.dec2
.e
.do
, "output_carry")
2568 carry_en
= has_output_carry
and (yield self
.dec2
.e
.do
.output_carry
)
2570 yield from self
.handle_carry_(
2571 inputs
, results
[0], ca
, ca32
, inp_ca_ov
=inp_ca_ov
)
2573 # get output named "overflow" and "CR0"
2574 overflow
= outs
.get('overflow')
2575 cr0
= outs
.get('CR0')
2576 cr1
= outs
.get('CR1')
2578 # soc test_pipe_caller tests don't have oe
2579 has_oe
= hasattr(self
.dec2
.e
.do
, "oe")
2580 # yeah just no. not in parallel processing
2581 if has_oe
and not self
.is_svp64_mode
:
2582 # detect if overflow was in return result
2583 ov_en
= yield self
.dec2
.e
.do
.oe
.oe
2584 ov_ok
= yield self
.dec2
.e
.do
.oe
.ok
2585 log("internal overflow", ins_name
, overflow
, "en?", ov_en
, ov_ok
)
2587 yield from self
.handle_overflow(
2588 inputs
, results
[0], overflow
, inp_ca_ov
=inp_ca_ov
)
2590 # only do SVP64 dest predicated Rc=1 if dest-pred is not enabled
2592 if not self
.is_svp64_mode
or not pred_dst_zero
:
2593 if hasattr(self
.dec2
.e
.do
, "rc"):
2594 rc_en
= yield self
.dec2
.e
.do
.rc
.rc
2595 # don't do Rc=1 for svstep it is handled explicitly.
2596 # XXX TODO: now that CR0 is supported, sort out svstep's pseudocode
2597 # to write directly to CR0 instead of in ISACaller. hooyahh.
2598 if rc_en
and ins_name
not in ['svstep']:
2599 if outs_ok
.get('FPSCR', False):
2600 FPSCR
= outs
['FPSCR']
2603 yield from self
.do_rc_ov(
2604 ins_name
, results
[0], overflow
, cr0
, cr1
, FPSCR
)
2607 ffirst_hit
= False, False
2608 if self
.is_svp64_mode
:
2609 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2610 is_cr
= sv_mode
== SVMode
.CROP
.value
2611 chk
= rc_en
or is_cr
2612 if outs_ok
.get('CR', False):
2613 # early write so check_ffirst can see value
2614 self
.namespace
['CR'].eq(outs
['CR'])
2615 ffirst_hit
= (yield from self
.check_ffirst(info
, chk
, srcstep
))
2617 # any modified return results?
2618 yield from self
.do_outregs(
2619 info
, outs
, carry_en
, ffirst_hit
, ew_dst
, outs_ok
)
2621 # check if a FP Exception occurred. TODO for DD-FFirst, check VLi
2622 # and raise the exception *after* if VLi=1 but if VLi=0 then
2623 # truncate and make the exception "disappear".
2624 if self
.FPSCR
.FEX
and (self
.msr
[MSRb
.FE0
] or self
.msr
[MSRb
.FE1
]):
2625 self
.call_trap(0x700, PIb
.FP
)
2628 yield from self
.do_nia(asmop
, ins_name
, rc_en
, ffirst_hit
)
2630 def check_ffirst(self
, info
, rc_en
, srcstep
):
2631 """fail-first mode: checks a bit of Rc Vector, truncates VL
2633 rm_mode
= yield self
.dec2
.rm_dec
.mode
2634 ff_inv
= yield self
.dec2
.rm_dec
.inv
2635 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
2636 RC1
= yield self
.dec2
.rm_dec
.RC1
2637 vli_
= yield self
.dec2
.rm_dec
.vli
# VL inclusive if truncated
2638 log(" ff rm_mode", rc_en
, rm_mode
, SVP64RMMode
.FFIRST
.value
)
2642 log(" cr_bit", cr_bit
)
2643 log(" rc_en", rc_en
)
2644 ffirst
= yield from is_ffirst_mode(self
.dec2
)
2645 if not rc_en
or not ffirst
:
2647 # get the CR vevtor, do BO-test
2649 log("asmregs", info
.asmregs
[0], info
.write_regs
)
2650 if 'CR' in info
.write_regs
and 'BF' in info
.asmregs
[0]:
2652 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, crf
)
2653 crtest
= self
.crl
[regnum
]
2654 ffirst_hit
= crtest
[cr_bit
] != ff_inv
2655 log("cr test", crf
, regnum
, int(crtest
), crtest
, cr_bit
, ff_inv
)
2656 log("cr test?", ffirst_hit
)
2659 # Fail-first activated, truncate VL
2660 vli
= SelectableInt(int(vli_
), 7)
2661 self
.svstate
.vl
= srcstep
+ vli
2662 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
2663 yield Settle() # let decoder update
2666 def do_rc_ov(self
, ins_name
, result
, overflow
, cr0
, cr1
, FPSCR
):
2667 cr_out
= yield self
.dec2
.op
.cr_out
2668 if cr_out
== CROutSel
.CR1
.value
:
2672 regnum
, is_vec
= yield from get_cr_out(self
.dec2
, rc_reg
)
2673 # hang on... for `setvl` actually you want to test SVSTATE.VL
2674 is_setvl
= ins_name
in ('svstep', 'setvl')
2676 result
= SelectableInt(result
.vl
, 64)
2678 # overflow = None # do not override overflow except in setvl
2682 cr1
= int(FPSCR
.FX
) << 3
2683 cr1 |
= int(FPSCR
.FEX
) << 2
2684 cr1 |
= int(FPSCR
.VX
) << 1
2685 cr1 |
= int(FPSCR
.OX
)
2686 log("default fp cr1", cr1
)
2688 log("explicit cr1", cr1
)
2689 self
.crl
[regnum
].eq(cr1
)
2691 # if there was not an explicit CR0 in the pseudocode,
2693 c
= self
.handle_comparison(result
, regnum
, overflow
, no_so
=is_setvl
)
2694 log("implicit cr0 %d" % regnum
, c
)
2696 # otherwise we just blat CR0 into the required regnum
2697 log("explicit cr0 %d" % regnum
, cr0
)
2698 self
.crl
[regnum
].eq(cr0
)
2700 def do_outregs(self
, info
, outs
, ca_en
, ffirst_hit
, ew_dst
, outs_ok
):
2701 ffirst_hit
, vli
= ffirst_hit
2702 # write out any regs for this instruction, but only if fail-first is ok
2703 # XXX TODO: allow CR-vector to be written out even if ffirst fails
2704 if not ffirst_hit
or vli
:
2705 for name
, output
in outs
.items():
2706 if not outs_ok
[name
]:
2707 log("skipping writing output with .ok=False", name
, output
)
2709 yield from self
.check_write(info
, name
, output
, ca_en
, ew_dst
)
2710 # restore the CR value on non-VLI failfirst (from sv.cmp and others
2711 # which write directly to CR in the pseudocode (gah, what a mess)
2712 # if ffirst_hit and not vli:
2713 # self.cr.value = self.cr_backup
2715 def do_nia(self
, asmop
, ins_name
, rc_en
, ffirst_hit
):
2716 ffirst_hit
, vli
= ffirst_hit
2718 self
.svp64_reset_loop()
2721 # check advancement of src/dst/sub-steps and if PC needs updating
2722 nia_update
= (yield from self
.check_step_increment(
2723 rc_en
, asmop
, ins_name
))
2725 self
.update_pc_next()
2727 def check_replace_d(self
, info
, remap_active
):
2728 replace_d
= False # update / replace constant in pseudocode
2729 ldstmode
= yield self
.dec2
.rm_dec
.ldstmode
2730 vl
= self
.svstate
.vl
2731 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
2732 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
2733 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
2734 if info
.form
== 'DS':
2735 # DS-Form, multiply by 4 then knock 2 bits off after
2736 imm
= yield self
.dec2
.dec
.fields
.FormDS
.DS
[0:14] * 4
2738 imm
= yield self
.dec2
.dec
.fields
.FormD
.D
[0:16]
2739 imm
= exts(imm
, 16) # sign-extend to integer
2740 # get the right step. LD is from srcstep, ST is dststep
2741 op
= yield self
.dec2
.e
.do
.insn_type
2743 if op
== MicrOp
.OP_LOAD
.value
:
2745 offsmul
= yield self
.dec2
.in1_step
2746 log("D-field REMAP src", imm
, offsmul
, ldstmode
)
2748 offsmul
= (srcstep
* (subvl
+1)) + ssubstep
2749 log("D-field src", imm
, offsmul
, ldstmode
)
2750 elif op
== MicrOp
.OP_STORE
.value
:
2751 # XXX NOTE! no bit-reversed STORE! this should not ever be used
2752 offsmul
= (dststep
* (subvl
+1)) + dsubstep
2753 log("D-field dst", imm
, offsmul
, ldstmode
)
2754 # Unit-Strided LD/ST adds offset*width to immediate
2755 if ldstmode
== SVP64LDSTmode
.UNITSTRIDE
.value
:
2756 ldst_len
= yield self
.dec2
.e
.do
.data_len
2757 imm
= SelectableInt(imm
+ offsmul
* ldst_len
, 32)
2759 # Element-strided multiplies the immediate by element step
2760 elif ldstmode
== SVP64LDSTmode
.ELSTRIDE
.value
:
2761 imm
= SelectableInt(imm
* offsmul
, 32)
2764 ldst_ra_vec
= yield self
.dec2
.rm_dec
.ldst_ra_vec
2765 ldst_imz_in
= yield self
.dec2
.rm_dec
.ldst_imz_in
2766 log("LDSTmode", SVP64LDSTmode(ldstmode
),
2767 offsmul
, imm
, ldst_ra_vec
, ldst_imz_in
)
2768 # new replacement D... errr.. DS
2770 if info
.form
== 'DS':
2771 # TODO: assert 2 LSBs are zero?
2772 log("DS-Form, TODO, assert 2 LSBs zero?", bin(imm
.value
))
2773 imm
.value
= imm
.value
>> 2
2774 self
.namespace
['DS'] = imm
2776 self
.namespace
['D'] = imm
2778 def get_input(self
, name
, ew_src
, xlen
):
2779 # using PowerDecoder2, first, find the decoder index.
2780 # (mapping name RA RB RC RS to in1, in2, in3)
2781 regnum
, is_vec
= yield from get_idx_in(self
.dec2
, name
, True)
2783 # doing this is not part of svp64, it's because output
2784 # registers, to be modified, need to be in the namespace.
2785 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2787 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2789 if isinstance(regnum
, tuple):
2790 (regnum
, base
, offs
) = regnum
2792 base
, offs
= regnum
, 0 # temporary HACK
2794 # in case getting the register number is needed, _RA, _RB
2795 # (HACK: only in straight non-svp64-mode for now, or elwidth == 64)
2796 regname
= "_" + name
2797 if not self
.is_svp64_mode
or ew_src
== 64:
2798 self
.namespace
[regname
] = regnum
2800 # FIXME: we're trying to access a sub-register, plain register
2801 # numbers don't work for that. for now, just pass something that
2802 # can be compared to 0 and probably will cause an error if misused.
2803 # see https://bugs.libre-soc.org/show_bug.cgi?id=1221
2804 self
.namespace
[regname
] = regnum
* 10000
2806 if not self
.is_svp64_mode
or not self
.pred_src_zero
:
2807 log('reading reg %s %s' % (name
, str(regnum
)), is_vec
)
2809 fval
= self
.fpr(base
, is_vec
, offs
, ew_src
)
2810 reg_val
= SelectableInt(fval
)
2811 assert ew_src
== self
.XLEN
, "TODO fix elwidth conversion"
2812 self
.trace("r:FPR:%d:%d:%d " % (base
, offs
, ew_src
))
2813 log("read fp reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
),
2814 kind
=LogType
.InstrInOuts
)
2815 elif name
is not None:
2816 gval
= self
.gpr(base
, is_vec
, offs
, ew_src
)
2817 reg_val
= SelectableInt(gval
.value
, bits
=xlen
)
2818 self
.trace("r:GPR:%d:%d:%d " % (base
, offs
, ew_src
))
2819 log("read int reg %d/%d: 0x%x" % (base
, offs
, reg_val
.value
),
2820 kind
=LogType
.InstrInOuts
)
2822 log('zero input reg %s %s' % (name
, str(regnum
)), is_vec
)
2823 reg_val
= SelectableInt(0, ew_src
)
2826 def remap_set_steps(self
, remaps
):
2827 """remap_set_steps sets up the in1/2/3 and out1/2 steps.
2828 they work in concert with PowerDecoder2 at the moment,
2829 there is no HDL implementation of REMAP. therefore this
2830 function, because ISACaller still uses PowerDecoder2,
2831 will *explicitly* write the dec2.XX_step values. this has
2834 # just some convenient debug info
2836 sname
= 'SVSHAPE%d' % i
2837 shape
= self
.spr
[sname
]
2838 log(sname
, bin(shape
.value
))
2839 log(" lims", shape
.lims
)
2840 log(" mode", shape
.mode
)
2841 log(" skip", shape
.skip
)
2843 # set up the list of steps to remap
2844 mi0
= self
.svstate
.mi0
2845 mi1
= self
.svstate
.mi1
2846 mi2
= self
.svstate
.mi2
2847 mo0
= self
.svstate
.mo0
2848 mo1
= self
.svstate
.mo1
2849 steps
= [[self
.dec2
.in1_step
, mi0
], # RA
2850 [self
.dec2
.in2_step
, mi1
], # RB
2851 [self
.dec2
.in3_step
, mi2
], # RC
2852 [self
.dec2
.o_step
, mo0
], # RT
2853 [self
.dec2
.o2_step
, mo1
], # EA
2856 rnames
= ['RA', 'RB', 'RC', 'RT', 'RS']
2857 for i
, reg
in enumerate(rnames
):
2858 idx
= yield from get_idx_map(self
.dec2
, reg
)
2860 idx
= yield from get_idx_map(self
.dec2
, "F"+reg
)
2862 steps
[i
][0] = self
.dec2
.in1_step
2864 steps
[i
][0] = self
.dec2
.in2_step
2866 steps
[i
][0] = self
.dec2
.in3_step
2867 log("remap step", i
, reg
, idx
, steps
[i
][1])
2868 remap_idxs
= self
.remap_idxs
2870 # now cross-index the required SHAPE for each of 3-in 2-out regs
2871 rnames
= ['RA', 'RB', 'RC', 'RT', 'EA']
2872 for i
, (dstep
, shape_idx
) in enumerate(steps
):
2873 (shape
, remap
) = remaps
[shape_idx
]
2874 remap_idx
= remap_idxs
[shape_idx
]
2875 # zero is "disabled"
2876 if shape
.value
== 0x0:
2878 # now set the actual requested step to the current index
2879 if dstep
is not None:
2880 yield dstep
.eq(remap_idx
)
2882 # debug printout info
2883 rremaps
.append((shape
.mode
, hex(shape
.value
), dstep
,
2884 i
, rnames
[i
], shape_idx
, remap_idx
))
2886 log("shape remap", x
)
2888 def check_write(self
, info
, name
, output
, carry_en
, ew_dst
):
2889 if name
== 'overflow': # ignore, done already (above)
2891 if name
== 'CR0': # ignore, done already (above)
2893 if isinstance(output
, int):
2894 output
= SelectableInt(output
, EFFECTIVELY_UNLIMITED
)
2896 if name
.startswith("RESERVE"):
2897 log("write %s 0x%x" % (name
, output
.value
))
2898 getattr(self
, name
).eq(output
)
2900 if name
in ['FPSCR', ]:
2901 log("write FPSCR 0x%x" % (output
.value
))
2902 self
.FPSCR
.eq(output
)
2905 if name
in ['CA', 'CA32']:
2907 log("writing %s to XER" % name
, output
)
2908 log("write XER %s 0x%x" % (name
, output
.value
))
2909 self
.spr
['XER'][XER_bits
[name
]] = output
.value
2911 log("NOT writing %s to XER" % name
, output
)
2913 # write special SPRs
2914 if name
in info
.special_regs
:
2915 log('writing special %s' % name
, output
, special_sprs
)
2916 log("write reg %s 0x%x" % (name
, output
.value
),
2917 kind
=LogType
.InstrInOuts
)
2918 if name
in special_sprs
:
2919 self
.spr
[name
] = output
2921 self
.namespace
[name
].eq(output
)
2923 log('msr written', hex(self
.msr
.value
))
2925 # find out1/out2 PR/FPR
2926 regnum
, is_vec
= yield from get_idx_out(self
.dec2
, name
, True)
2928 regnum
, is_vec
= yield from get_idx_out2(self
.dec2
, name
, True)
2930 # temporary hack for not having 2nd output
2931 regnum
= yield getattr(self
.decoder
, name
)
2933 # convenient debug prefix
2938 # check zeroing due to predicate bit being zero
2939 if self
.is_svp64_mode
and self
.pred_dst_zero
:
2940 log('zeroing reg %s %s' % (str(regnum
), str(output
)), is_vec
)
2941 output
= SelectableInt(0, EFFECTIVELY_UNLIMITED
)
2942 log("write reg %s%s 0x%x ew %d" % (reg_prefix
, str(regnum
),
2943 output
.value
, ew_dst
),
2944 kind
=LogType
.InstrInOuts
)
2945 # zero-extend tov64 bit begore storing (should use EXT oh well)
2946 if output
.bits
> 64:
2947 output
= SelectableInt(output
.value
, 64)
2948 rnum
, base
, offset
= regnum
2950 self
.fpr
.write(regnum
, output
, is_vec
, ew_dst
)
2951 self
.trace("w:FPR:%d:%d:%d " % (rnum
, offset
, ew_dst
))
2954 # LDST/Update does *not* allow elwidths on RA (Effective Address).
2955 # this has to be detected, and overridden. see get_input (related)
2956 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
2957 is_ldst
= (sv_mode
in [SVMode
.LDST_IDX
.value
, SVMode
.LDST_IMM
.value
] \
2958 and self
.is_svp64_mode
)
2959 if is_ldst
and name
in ['EA', 'RA']:
2960 op
= self
.dec2
.dec
.op
2961 if hasattr(op
, "upd"):
2962 # update mode LD/ST uses read-reg A also as an output
2964 log("write is_ldst is_update", sv_mode
, is_ldst
, upd
)
2965 if upd
== LDSTMode
.update
.value
:
2966 ew_dst
= 64 # override for RA (EA) to 64-bit
2968 self
.gpr
.write(regnum
, output
, is_vec
, ew_dst
)
2969 self
.trace("w:GPR:%d:%d:%d " % (rnum
, offset
, ew_dst
))
2971 def check_step_increment(self
, rc_en
, asmop
, ins_name
):
2972 # check if it is the SVSTATE.src/dest step that needs incrementing
2973 # this is our Sub-Program-Counter loop from 0 to VL-1
2974 if not self
.allow_next_step_inc
:
2975 if self
.is_svp64_mode
:
2976 return (yield from self
.svstate_post_inc(ins_name
))
2978 # XXX only in non-SVP64 mode!
2979 # record state of whether the current operation was an svshape,
2981 # to be able to know if it should apply in the next instruction.
2982 # also (if going to use this instruction) should disable ability
2983 # to interrupt in between. sigh.
2984 self
.last_op_svshape
= asmop
in ['svremap', 'svindex',
2991 log("SVSTATE_NEXT: inc requested, mode",
2992 self
.svstate_next_mode
, self
.allow_next_step_inc
)
2993 yield from self
.svstate_pre_inc()
2994 pre
= yield from self
.update_new_svstate_steps()
2996 # reset at end of loop including exit Vertical Mode
2997 log("SVSTATE_NEXT: end of loop, reset")
2998 self
.svp64_reset_loop()
2999 self
.svstate
.vfirst
= 0
3003 self
.handle_comparison(SelectableInt(0, 64)) # CR0
3005 if self
.allow_next_step_inc
== 2:
3006 log("SVSTATE_NEXT: read")
3007 nia_update
= (yield from self
.svstate_post_inc(ins_name
))
3009 log("SVSTATE_NEXT: post-inc")
3010 # use actual (cached) src/dst-step here to check end
3011 remaps
= self
.get_remap_indices()
3012 remap_idxs
= self
.remap_idxs
3013 vl
= self
.svstate
.vl
3014 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
3015 if self
.allow_next_step_inc
!= 2:
3016 yield from self
.advance_svstate_steps()
3017 #self.namespace['SVSTATE'] = self.svstate.spr
3018 # set CR0 (if Rc=1) based on end
3019 endtest
= 1 if self
.at_loopend() else 0
3021 #results = [SelectableInt(endtest, 64)]
3022 # self.handle_comparison(results) # CR0
3024 # see if svstep was requested, if so, which SVSTATE
3026 if self
.svstate_next_mode
> 0:
3027 shape_idx
= self
.svstate_next_mode
.value
-1
3028 endings
= self
.remap_loopends
[shape_idx
]
3029 cr_field
= SelectableInt((~endings
) << 1 | endtest
, 4)
3030 log("svstep Rc=1, CR0", cr_field
, endtest
)
3031 self
.crl
[0].eq(cr_field
) # CR0
3033 # reset at end of loop including exit Vertical Mode
3034 log("SVSTATE_NEXT: after increments, reset")
3035 self
.svp64_reset_loop()
3036 self
.svstate
.vfirst
= 0
3039 def SVSTATE_NEXT(self
, mode
, submode
, RA
=None):
3040 """explicitly moves srcstep/dststep on to next element, for
3041 "Vertical-First" mode. this function is called from
3042 setvl pseudo-code, as a pseudo-op "svstep"
3044 WARNING: this function uses information that was created EARLIER
3045 due to it being in the middle of a yield, but this function is
3046 *NOT* called from yield (it's called from compiled pseudocode).
3048 self
.allow_next_step_inc
= submode
.value
+ 1
3049 log("SVSTATE_NEXT mode", mode
, submode
, self
.allow_next_step_inc
)
3050 self
.svstate_next_mode
= mode
3051 if self
.svstate_next_mode
> 0 and self
.svstate_next_mode
< 5:
3052 shape_idx
= self
.svstate_next_mode
.value
-1
3053 return SelectableInt(self
.remap_idxs
[shape_idx
], 7)
3054 if self
.svstate_next_mode
== 5:
3055 self
.svstate_next_mode
= 0
3056 return SelectableInt(self
.svstate
.srcstep
, 7)
3057 if self
.svstate_next_mode
== 6:
3058 self
.svstate_next_mode
= 0
3059 return SelectableInt(self
.svstate
.dststep
, 7)
3060 if self
.svstate_next_mode
== 7:
3061 self
.svstate_next_mode
= 0
3062 return SelectableInt(self
.svstate
.ssubstep
, 7)
3063 if self
.svstate_next_mode
== 8:
3064 self
.svstate_next_mode
= 0
3065 return SelectableInt(self
.svstate
.dsubstep
, 7)
3066 return SelectableInt(0, 7)
3068 def get_src_dststeps(self
):
3069 """gets srcstep, dststep, and ssubstep, dsubstep
3071 return (self
.new_srcstep
, self
.new_dststep
,
3072 self
.new_ssubstep
, self
.new_dsubstep
)
3074 def update_svstate_namespace(self
, overwrite_svstate
=True):
3075 if overwrite_svstate
:
3076 # note, do not get the bit-reversed srcstep here!
3077 srcstep
, dststep
= self
.new_srcstep
, self
.new_dststep
3078 ssubstep
, dsubstep
= self
.new_ssubstep
, self
.new_dsubstep
3080 # update SVSTATE with new srcstep
3081 self
.svstate
.srcstep
= srcstep
3082 self
.svstate
.dststep
= dststep
3083 self
.svstate
.ssubstep
= ssubstep
3084 self
.svstate
.dsubstep
= dsubstep
3085 self
.namespace
['SVSTATE'] = self
.svstate
3086 yield self
.dec2
.state
.svstate
.eq(self
.svstate
.value
)
3087 yield Settle() # let decoder update
3089 def update_new_svstate_steps(self
, overwrite_svstate
=True):
3090 yield from self
.update_svstate_namespace(overwrite_svstate
)
3091 srcstep
= self
.svstate
.srcstep
3092 dststep
= self
.svstate
.dststep
3093 ssubstep
= self
.svstate
.ssubstep
3094 dsubstep
= self
.svstate
.dsubstep
3095 pack
= self
.svstate
.pack
3096 unpack
= self
.svstate
.unpack
3097 vl
= self
.svstate
.vl
3098 sv_mode
= yield self
.dec2
.rm_dec
.sv_mode
3099 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
3100 rm_mode
= yield self
.dec2
.rm_dec
.mode
3101 ff_inv
= yield self
.dec2
.rm_dec
.inv
3102 cr_bit
= yield self
.dec2
.rm_dec
.cr_sel
3103 log(" srcstep", srcstep
)
3104 log(" dststep", dststep
)
3106 log(" unpack", unpack
)
3107 log(" ssubstep", ssubstep
)
3108 log(" dsubstep", dsubstep
)
3110 log(" subvl", subvl
)
3111 log(" rm_mode", rm_mode
)
3112 log(" sv_mode", sv_mode
)
3114 log(" cr_bit", cr_bit
)
3116 # check if end reached (we let srcstep overrun, above)
3117 # nothing needs doing (TODO zeroing): just do next instruction
3120 return ((ssubstep
== subvl
and srcstep
== vl
) or
3121 (dsubstep
== subvl
and dststep
== vl
))
3123 def svstate_post_inc(self
, insn_name
, vf
=0):
3124 # check if SV "Vertical First" mode is enabled
3125 vfirst
= self
.svstate
.vfirst
3126 log(" SV Vertical First", vf
, vfirst
)
3127 if not vf
and vfirst
== 1:
3128 # SV Branch-Conditional required to be as-if-vector
3129 # because there *is* no destination register
3130 # (SV normally only terminates on 1st scalar reg written
3131 # except in [slightly-misnamed] mapreduce mode)
3132 ffirst
= yield from is_ffirst_mode(self
.dec2
)
3133 if insn_name
.startswith("sv.bc") or ffirst
:
3134 self
.update_pc_next()
3139 # check if it is the SVSTATE.src/dest step that needs incrementing
3140 # this is our Sub-Program-Counter loop from 0 to VL-1
3141 # XXX twin predication TODO
3142 vl
= self
.svstate
.vl
3143 subvl
= yield self
.dec2
.rm_dec
.rm_in
.subvl
3144 mvl
= self
.svstate
.maxvl
3145 srcstep
= self
.svstate
.srcstep
3146 dststep
= self
.svstate
.dststep
3147 ssubstep
= self
.svstate
.ssubstep
3148 dsubstep
= self
.svstate
.dsubstep
3149 pack
= self
.svstate
.pack
3150 unpack
= self
.svstate
.unpack
3151 rm_mode
= yield self
.dec2
.rm_dec
.mode
3152 reverse_gear
= yield self
.dec2
.rm_dec
.reverse_gear
3153 sv_ptype
= yield self
.dec2
.dec
.op
.SV_Ptype
3154 out_vec
= not (yield self
.dec2
.no_out_vec
)
3155 in_vec
= not (yield self
.dec2
.no_in_vec
)
3156 rm_mode
= yield self
.dec2
.rm_dec
.mode
3157 log(" svstate.vl", vl
)
3158 log(" svstate.mvl", mvl
)
3159 log(" rm.subvl", subvl
)
3160 log(" svstate.srcstep", srcstep
)
3161 log(" svstate.dststep", dststep
)
3162 log(" svstate.ssubstep", ssubstep
)
3163 log(" svstate.dsubstep", dsubstep
)
3164 log(" svstate.pack", pack
)
3165 log(" svstate.unpack", unpack
)
3166 log(" mode", rm_mode
)
3167 log(" reverse", reverse_gear
)
3168 log(" out_vec", out_vec
)
3169 log(" in_vec", in_vec
)
3170 log(" sv_ptype", sv_ptype
, sv_ptype
== SVPType
.P2
.value
)
3171 log(" rm_mode", rm_mode
)
3172 # check if this was an sv.bc* and if so did it succeed
3173 if self
.is_svp64_mode
and insn_name
.startswith("sv.bc"):
3174 end_loop
= self
.namespace
['end_loop']
3175 log("branch %s end_loop" % insn_name
, end_loop
)
3177 self
.svp64_reset_loop()
3178 self
.update_pc_next()
3180 # check if srcstep needs incrementing by one, stop PC advancing
3181 # but for 2-pred both src/dest have to be checked.
3182 # XXX this might not be true! it may just be LD/ST
3183 if sv_ptype
== SVPType
.P2
.value
:
3184 svp64_is_vector
= (out_vec
or in_vec
)
3186 svp64_is_vector
= out_vec
3187 # also if data-dependent fail-first is used, only in_vec is tested,
3188 # allowing *scalar destinations* to be used as an accumulator.
3189 # effectively this implies /mr (mapreduce mode) is 100% on with ddffirst
3190 # see https://bugs.libre-soc.org/show_bug.cgi?id=1183#c16
3191 ffirst
= yield from is_ffirst_mode(self
.dec2
)
3193 svp64_is_vector
= in_vec
3195 # loops end at the first "hit" (source or dest)
3196 yield from self
.advance_svstate_steps()
3197 loopend
= self
.loopend
3198 log("loopend", svp64_is_vector
, loopend
)
3199 if not svp64_is_vector
or loopend
:
3200 # reset loop to zero and update NIA
3201 self
.svp64_reset_loop()
3206 # still looping, advance and update NIA
3207 self
.namespace
['SVSTATE'] = self
.svstate
3209 # not an SVP64 branch, so fix PC (NIA==CIA) for next loop
3210 # (by default, NIA is CIA+4 if v3.0B or CIA+8 if SVP64)
3211 # this way we keep repeating the same instruction (with new steps)
3212 self
.pc
.NIA
.eq(self
.pc
.CIA
)
3213 self
.namespace
['NIA'] = self
.pc
.NIA
3214 log("end of sub-pc call", self
.namespace
['CIA'], self
.namespace
['NIA'])
3215 return False # DO NOT allow PC update whilst Sub-PC loop running
3217 def update_pc_next(self
):
3218 # UPDATE program counter
3219 self
.pc
.update(self
.namespace
, self
.is_svp64_mode
)
3220 #self.svstate.spr = self.namespace['SVSTATE']
3221 log("end of call", self
.namespace
['CIA'],
3222 self
.namespace
['NIA'],
3223 self
.namespace
['SVSTATE'])
3225 def svp64_reset_loop(self
):
3226 self
.svstate
.srcstep
= 0
3227 self
.svstate
.dststep
= 0
3228 self
.svstate
.ssubstep
= 0
3229 self
.svstate
.dsubstep
= 0
3230 self
.loopend
= False
3231 log(" svstate.srcstep loop end (PC to update)")
3232 self
.namespace
['SVSTATE'] = self
.svstate
3234 def update_nia(self
):
3235 self
.pc
.update_nia(self
.is_svp64_mode
)
3236 self
.namespace
['NIA'] = self
.pc
.NIA
3240 """Decorator factory.
3242 this decorator will "inject" variables into the function's namespace,
3243 from the *dictionary* in self.namespace. it therefore becomes possible
3244 to make it look like a whole stack of variables which would otherwise
3245 need "self." inserted in front of them (*and* for those variables to be
3246 added to the instance) "appear" in the function.
3248 "self.namespace['SI']" for example becomes accessible as just "SI" but
3249 *only* inside the function, when decorated.
3251 def variable_injector(func
):
3253 def decorator(*args
, **kwargs
):
3255 func_globals
= func
.__globals
__ # Python 2.6+
3256 except AttributeError:
3257 func_globals
= func
.func_globals
# Earlier versions.
3259 context
= args
[0].namespace
# variables to be injected
3260 saved_values
= func_globals
.copy() # Shallow copy of dict.
3261 log("globals before", context
.keys())
3262 func_globals
.update(context
)
3263 result
= func(*args
, **kwargs
)
3264 log("globals after", func_globals
['CIA'], func_globals
['NIA'])
3265 log("args[0]", args
[0].namespace
['CIA'],
3266 args
[0].namespace
['NIA'],
3267 args
[0].namespace
['SVSTATE'])
3268 if 'end_loop' in func_globals
:
3269 log("args[0] end_loop", func_globals
['end_loop'])
3270 args
[0].namespace
= func_globals
3271 #exec (func.__code__, func_globals)
3274 # func_globals = saved_values # Undo changes.
3280 return variable_injector