+ def _check_shl(self, m, dut):
+ m.d.comb += Assume(dut.i.ra == 0)
+ expected = Signal(64)
+ with m.If(dut.i.ctx.op.is_32bit):
+ m.d.comb += expected.eq((dut.i.rs << dut.i.rb[:6])[:32])
+ with m.Else():
+ m.d.comb += expected.eq((dut.i.rs << dut.i.rb[:7])[:64])
+ m.d.comb += Assert(dut.o.o.data == expected)
+ m.d.comb += Assert(dut.o.xer_ca.data == 0)
+
+ def _check_shr(self, m, dut):
+ m.d.comb += Assume(dut.i.ra == 0)
+ expected = Signal(64)
+ carry = Signal()
+ shift_in_s = Signal(signed(128))
+ shift_roundtrip = Signal(signed(128))
+ shift_in_u = Signal(128)
+ shift_amt = Signal(7)
+ with m.If(dut.i.ctx.op.is_32bit):
+ m.d.comb += [
+ shift_amt.eq(dut.i.rb[:6]),
+ shift_in_s.eq(dut.i.rs[:32].as_signed()),
+ shift_in_u.eq(dut.i.rs[:32]),
+ ]
+ with m.Else():
+ m.d.comb += [
+ shift_amt.eq(dut.i.rb[:7]),
+ shift_in_s.eq(dut.i.rs.as_signed()),
+ shift_in_u.eq(dut.i.rs),
+ ]
+
+ with m.If(dut.i.ctx.op.is_signed):
+ m.d.comb += [
+ expected.eq(shift_in_s >> shift_amt),
+ shift_roundtrip.eq((shift_in_s >> shift_amt) << shift_amt),
+ carry.eq((shift_in_s < 0) & (shift_roundtrip != shift_in_s)),
+ ]
+ with m.Else():
+ m.d.comb += [
+ expected.eq(shift_in_u >> shift_amt),
+ carry.eq(0),
+ ]
+ m.d.comb += Assert(dut.o.o.data == expected)
+ m.d.comb += Assert(dut.o.xer_ca.data == Repl(carry, 2))
+
+ def _check_rlc(self, m, dut):
+ raise NotImplementedError
+ m.submodules.mask = mask = Mask()
+ with m.If():
+ pass
+ m.d.comb += Assert(dut.o.xer_ca.data == 0)
+
+ def _check_rlcl(self, m, dut):
+ raise NotImplementedError
+
+ def _check_rlcr(self, m, dut):
+ raise NotImplementedError
+
+ def _check_extswsli(self, m, dut):
+ m.d.comb += Assume(dut.i.ra == 0)
+ m.d.comb += Assume(dut.i.rb[6:] == 0)
+ m.d.comb += Assume(~dut.i.ctx.op.is_32bit) # all instrs. are 64-bit
+ expected = Signal(64)
+ m.d.comb += expected.eq((dut.i.rs[0:32].as_signed() << dut.i.rb[:6]))
+ m.d.comb += Assert(dut.o.o.data == expected)
+ m.d.comb += Assert(dut.o.xer_ca.data == 0)
+
+ def _check_ternlog(self, m, dut):
+ lut = dut.fields.FormTLI.TLI[:]
+ for i in range(64):
+ idx = Cat(dut.i.rb[i], dut.i.ra[i], dut.i.rc[i])
+ for j in range(8):
+ with m.If(j == idx):
+ m.d.comb += Assert(dut.o.o.data[i] == lut[j])
+ m.d.comb += Assert(dut.o.xer_ca.data == 0)
+
+ def _check_grev32(self, m, dut):
+ m.d.comb += Assume(dut.i.ctx.op.is_32bit)
+ # assert zero-extended
+ m.d.comb += Assert(dut.o.o.data[32:] == 0)
+ i = Signal(5)
+ m.d.comb += eq_any_const(i)
+ idx = dut.i.rb[0: 5] ^ i
+ m.d.comb += Assert((dut.o.o.data >> i)[0] == (dut.i.ra >> idx)[0])
+ m.d.comb += Assert(dut.o.xer_ca.data == 0)
+
+ def _check_grev64(self, m, dut):
+ m.d.comb += Assume(~dut.i.ctx.op.is_32bit)
+ i = Signal(6)
+ m.d.comb += eq_any_const(i)
+ idx = dut.i.rb[0: 6] ^ i
+ m.d.comb += Assert((dut.o.o.data >> i)[0] == (dut.i.ra >> idx)[0])
+ m.d.comb += Assert(dut.o.xer_ca.data == 0)
+