X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fieee754%2Fpart_mul_add%2Fmultiply.py;h=a67f4b4ccf7b8aed40f204a5d64159d17888e7e9;hb=7c1854690603e0970d6db29cb18ac1ac8b838816;hp=e86f655b53b6ffe5d06086a36ca62f2523a7850f;hpb=3feb6cc6b8fa0ac8ea0ac0d8a91a91722a07a6c5;p=ieee754fpu.git diff --git a/src/ieee754/part_mul_add/multiply.py b/src/ieee754/part_mul_add/multiply.py index e86f655b..a67f4b4c 100644 --- a/src/ieee754/part_mul_add/multiply.py +++ b/src/ieee754/part_mul_add/multiply.py @@ -50,15 +50,17 @@ class PartitionPoints(dict): raise ValueError("point must be a non-negative integer") self[point] = Value.wrap(enabled) - def like(self, name=None, src_loc_at=0): + def like(self, name=None, src_loc_at=0, mul=1): """Create a new ``PartitionPoints`` with ``Signal``s for all values. :param name: the base name for the new ``Signal``s. + :param mul: a multiplication factor on the indices """ if name is None: name = Signal(src_loc_at=1+src_loc_at).name # get variable name retval = PartitionPoints() for point, enabled in self.items(): + point *= mul retval[point] = Signal(enabled.shape(), name=f"{name}_{point}") return retval @@ -103,6 +105,12 @@ class PartitionPoints(dict): return False return True + def part_byte(self, index, mfactor=1): # mfactor used for "expanding" + if index == -1 or index == 7: + return C(True, 1) + assert index >= 0 and index < 8 + return self[(index * 8 + 8)*mfactor] + class FullAdder(Elaboratable): """Full Adder. @@ -140,7 +148,7 @@ class FullAdder(Elaboratable): return m -class MaskedFullAdder(FullAdder): +class MaskedFullAdder(Elaboratable): """Masked Full Adder. :attribute mask: the carry partition mask @@ -153,6 +161,13 @@ class MaskedFullAdder(FullAdder): FullAdders are always used with a "mask" on the output. To keep the graphviz "clean", this class performs the masking here rather than inside a large for-loop. + + See the following discussion as to why this is no longer derived + from FullAdder. Each carry is shifted here *before* being ANDed + with the mask, so that an AOI cell may be used (which is more + gate-efficient) + https://en.wikipedia.org/wiki/AND-OR-Invert + https://groups.google.com/d/msg/comp.arch/fcq-GLQqvas/vTxmcA0QAgAJ """ def __init__(self, width): @@ -160,14 +175,31 @@ class MaskedFullAdder(FullAdder): :param width: the bit width of the input and output """ - FullAdder.__init__(self, width) - self.mask = Signal(width) - self.mcarry = Signal(width) + self.width = width + self.mask = Signal(width, reset_less=True) + self.mcarry = Signal(width, reset_less=True) + self.in0 = Signal(width, reset_less=True) + self.in1 = Signal(width, reset_less=True) + self.in2 = Signal(width, reset_less=True) + self.sum = Signal(width, reset_less=True) def elaborate(self, platform): """Elaborate this module.""" - m = FullAdder.elaborate(self, platform) - m.d.comb += self.mcarry.eq((self.carry << 1) & self.mask) + m = Module() + s1 = Signal(self.width, reset_less=True) + s2 = Signal(self.width, reset_less=True) + s3 = Signal(self.width, reset_less=True) + c1 = Signal(self.width, reset_less=True) + c2 = Signal(self.width, reset_less=True) + c3 = Signal(self.width, reset_less=True) + m.d.comb += self.sum.eq(self.in0 ^ self.in1 ^ self.in2) + m.d.comb += s1.eq(Cat(0, self.in0)) + m.d.comb += s2.eq(Cat(0, self.in1)) + m.d.comb += s3.eq(Cat(0, self.in2)) + m.d.comb += c1.eq(s1 & s2 & self.mask) + m.d.comb += c2.eq(s2 & s3 & self.mask) + m.d.comb += c3.eq(s3 & s1 & self.mask) + m.d.comb += self.mcarry.eq(c1 | c2 | c3) return m @@ -182,10 +214,10 @@ class PartitionedAdder(Elaboratable): partition: .... P... P... P... P... (32 bits) a : .... .... .... .... .... (32 bits) b : .... .... .... .... .... (32 bits) - exp-a : ....P....P....P....P.... (32+4 bits) + exp-a : ....P....P....P....P.... (32+4 bits, P=1 if no partition) exp-b : ....0....0....0....0.... (32 bits plus 4 zeros) - exp-o : ....xN...xN...xN...xN... (32+4 bits) - o : .... N... N... N... N... (32 bits) + exp-o : ....xN...xN...xN...xN... (32+4 bits - x to be discarded) + o : .... N... N... N... N... (32 bits - x ignored, N is carry-over) :attribute width: the bit width of the input and output. Read-only. :attribute a: the first input to the adder @@ -268,6 +300,88 @@ class PartitionedAdder(Elaboratable): FULL_ADDER_INPUT_COUNT = 3 +class AddReduceData: + + def __init__(self, ppoints, n_inputs, output_width, n_parts): + self.part_ops = [Signal(2, name=f"part_ops_{i}") + for i in range(n_parts)] + self.inputs = [Signal(output_width, name=f"inputs[{i}]") + for i in range(n_inputs)] + self.reg_partition_points = ppoints.like() + + def eq_from(self, reg_partition_points, inputs, part_ops): + return [self.reg_partition_points.eq(reg_partition_points)] + \ + [self.inputs[i].eq(inputs[i]) + for i in range(len(self.inputs))] + \ + [self.part_ops[i].eq(part_ops[i]) + for i in range(len(self.part_ops))] + + def eq(self, rhs): + return self.eq_from(rhs.reg_partition_points, rhs.inputs, rhs.part_ops) + + +class FinalReduceData: + + def __init__(self, ppoints, output_width, n_parts): + self.part_ops = [Signal(2, name=f"part_ops_{i}") + for i in range(n_parts)] + self.output = Signal(output_width) + self.reg_partition_points = ppoints.like() + + def eq_from(self, reg_partition_points, output, part_ops): + return [self.reg_partition_points.eq(reg_partition_points)] + \ + [self.output.eq(output)] + \ + [self.part_ops[i].eq(part_ops[i]) + for i in range(len(self.part_ops))] + + def eq(self, rhs): + return self.eq_from(rhs.reg_partition_points, rhs.output, rhs.part_ops) + + +class FinalAdd(Elaboratable): + """ Final stage of add reduce + """ + + def __init__(self, n_inputs, output_width, n_parts, register_levels, + partition_points): + self.i = AddReduceData(partition_points, n_inputs, + output_width, n_parts) + self.o = FinalReduceData(partition_points, output_width, n_parts) + self.output_width = output_width + self.n_inputs = n_inputs + self.n_parts = n_parts + self.register_levels = list(register_levels) + self.partition_points = PartitionPoints(partition_points) + if not self.partition_points.fits_in_width(output_width): + raise ValueError("partition_points doesn't fit in output_width") + + def elaborate(self, platform): + """Elaborate this module.""" + m = Module() + + output_width = self.output_width + output = Signal(output_width) + if self.n_inputs == 0: + # use 0 as the default output value + m.d.comb += output.eq(0) + elif self.n_inputs == 1: + # handle single input + m.d.comb += output.eq(self.i.inputs[0]) + else: + # base case for adding 2 inputs + assert self.n_inputs == 2 + adder = PartitionedAdder(output_width, self.i.reg_partition_points) + m.submodules.final_adder = adder + m.d.comb += adder.a.eq(self.i.inputs[0]) + m.d.comb += adder.b.eq(self.i.inputs[1]) + m.d.comb += output.eq(adder.output) + + # create output + m.d.comb += self.o.eq_from(self.i.reg_partition_points, output, + self.i.part_ops) + + return m + class AddReduceSingle(Elaboratable): """Add list of numbers together. @@ -281,7 +395,8 @@ class AddReduceSingle(Elaboratable): supported, except for by ``Signal.eq``. """ - def __init__(self, inputs, output_width, register_levels, partition_points): + def __init__(self, n_inputs, output_width, n_parts, register_levels, + partition_points): """Create an ``AddReduce``. :param inputs: input ``Signal``s to be summed. @@ -290,23 +405,35 @@ class AddReduceSingle(Elaboratable): pipeline registers. :param partition_points: the input partition points. """ - self.inputs = list(inputs) - self._resized_inputs = [ - Signal(output_width, name=f"resized_inputs[{i}]") - for i in range(len(self.inputs))] + self.n_inputs = n_inputs + self.n_parts = n_parts + self.output_width = output_width + self.i = AddReduceData(partition_points, n_inputs, + output_width, n_parts) self.register_levels = list(register_levels) - self.output = Signal(output_width) self.partition_points = PartitionPoints(partition_points) if not self.partition_points.fits_in_width(output_width): raise ValueError("partition_points doesn't fit in output_width") - self._reg_partition_points = self.partition_points.like() - max_level = AddReduce.get_max_level(len(self.inputs)) + max_level = AddReduceSingle.get_max_level(n_inputs) for level in self.register_levels: if level > max_level: raise ValueError( "not enough adder levels for specified register levels") + # this is annoying. we have to create the modules (and terms) + # because we need to know what they are (in order to set up the + # interconnects back in AddReduce), but cannot do the m.d.comb += + # etc because this is not in elaboratable. + self.groups = AddReduceSingle.full_adder_groups(n_inputs) + self._intermediate_terms = [] + self.adders = [] + if len(self.groups) != 0: + self.create_next_terms() + + self.o = AddReduceData(partition_points, len(self._intermediate_terms), + output_width, n_parts) + @staticmethod def get_max_level(input_count): """Get the maximum level. @@ -316,12 +443,13 @@ class AddReduceSingle(Elaboratable): """ retval = 0 while True: - groups = AddReduce.full_adder_groups(input_count) + groups = AddReduceSingle.full_adder_groups(input_count) if len(groups) == 0: return retval input_count %= FULL_ADDER_INPUT_COUNT input_count += 2 * len(groups) retval += 1 + @staticmethod def full_adder_groups(input_count): """Get ``inputs`` indices for which a full adder should be built.""" @@ -329,87 +457,68 @@ class AddReduceSingle(Elaboratable): input_count - FULL_ADDER_INPUT_COUNT + 1, FULL_ADDER_INPUT_COUNT) - def _elaborate(self, platform): + def elaborate(self, platform): """Elaborate this module.""" m = Module() - # resize inputs to correct bit-width and optionally add in - # pipeline registers - resized_input_assignments = [self._resized_inputs[i].eq(self.inputs[i]) - for i in range(len(self.inputs))] - if 0 in self.register_levels: - m.d.sync += resized_input_assignments - m.d.sync += self._reg_partition_points.eq(self.partition_points) - else: - m.d.comb += resized_input_assignments - m.d.comb += self._reg_partition_points.eq(self.partition_points) - - groups = AddReduceSingle.full_adder_groups(len(self.inputs)) - # if there are no full adders to create, then we handle the base cases - # and return, otherwise we go on to the recursive case - if len(groups) == 0: - if len(self.inputs) == 0: - # use 0 as the default output value - m.d.comb += self.output.eq(0) - elif len(self.inputs) == 1: - # handle single input - m.d.comb += self.output.eq(self._resized_inputs[0]) - else: - # base case for adding 2 or more inputs, which get recursively - # reduced to 2 inputs - assert len(self.inputs) == 2 - adder = PartitionedAdder(len(self.output), - self._reg_partition_points) - m.submodules.final_adder = adder - m.d.comb += adder.a.eq(self._resized_inputs[0]) - m.d.comb += adder.b.eq(self._resized_inputs[1]) - m.d.comb += self.output.eq(adder.output) - return None, m - - # go on to prepare recursive case - intermediate_terms = [] + # copy the intermediate terms to the output + for i, value in enumerate(self._intermediate_terms): + m.d.comb += self.o.inputs[i].eq(value) - def add_intermediate_term(value): - intermediate_term = Signal( - len(self.output), - name=f"intermediate_terms[{len(intermediate_terms)}]") - intermediate_terms.append(intermediate_term) - m.d.comb += intermediate_term.eq(value) - - # store mask in intermediary (simplifies graph) - part_mask = Signal(len(self.output), reset_less=True) - mask = self._reg_partition_points.as_mask(len(self.output)) + # copy reg part points and part ops to output + m.d.comb += self.o.reg_partition_points.eq(self.i.reg_partition_points) + m.d.comb += [self.o.part_ops[i].eq(self.i.part_ops[i]) + for i in range(len(self.i.part_ops))] + + # set up the partition mask (for the adders) + part_mask = Signal(self.output_width, reset_less=True) + + mask = self.i.reg_partition_points.as_mask(self.output_width) m.d.comb += part_mask.eq(mask) - # create full adders for this recursive level. - # this shrinks N terms to 2 * (N // 3) plus the remainder - for i in groups: - adder_i = MaskedFullAdder(len(self.output)) + # add and link the intermediate term modules + for i, (iidx, adder_i) in enumerate(self.adders): setattr(m.submodules, f"adder_{i}", adder_i) - m.d.comb += adder_i.in0.eq(self._resized_inputs[i]) - m.d.comb += adder_i.in1.eq(self._resized_inputs[i + 1]) - m.d.comb += adder_i.in2.eq(self._resized_inputs[i + 2]) + + m.d.comb += adder_i.in0.eq(self.i.inputs[iidx]) + m.d.comb += adder_i.in1.eq(self.i.inputs[iidx + 1]) + m.d.comb += adder_i.in2.eq(self.i.inputs[iidx + 2]) m.d.comb += adder_i.mask.eq(part_mask) + + return m + + def create_next_terms(self): + + _intermediate_terms = [] + + def add_intermediate_term(value): + _intermediate_terms.append(value) + + # create full adders for this recursive level. + # this shrinks N terms to 2 * (N // 3) plus the remainder + for i in self.groups: + adder_i = MaskedFullAdder(self.output_width) + self.adders.append((i, adder_i)) # add both the sum and the masked-carry to the next level. # 3 inputs have now been reduced to 2... add_intermediate_term(adder_i.sum) add_intermediate_term(adder_i.mcarry) # handle the remaining inputs. - if len(self.inputs) % FULL_ADDER_INPUT_COUNT == 1: - add_intermediate_term(self._resized_inputs[-1]) - elif len(self.inputs) % FULL_ADDER_INPUT_COUNT == 2: + if self.n_inputs % FULL_ADDER_INPUT_COUNT == 1: + add_intermediate_term(self.i.inputs[-1]) + elif self.n_inputs % FULL_ADDER_INPUT_COUNT == 2: # Just pass the terms to the next layer, since we wouldn't gain # anything by using a half adder since there would still be 2 terms # and just passing the terms to the next layer saves gates. - add_intermediate_term(self._resized_inputs[-2]) - add_intermediate_term(self._resized_inputs[-1]) + add_intermediate_term(self.i.inputs[-2]) + add_intermediate_term(self.i.inputs[-1]) else: - assert len(self.inputs) % FULL_ADDER_INPUT_COUNT == 0 + assert self.n_inputs % FULL_ADDER_INPUT_COUNT == 0 - return intermediate_terms, m + self._intermediate_terms = _intermediate_terms -class AddReduce(AddReduceSingle): +class AddReduce(Elaboratable): """Recursively Add list of numbers together. :attribute inputs: input ``Signal``s to be summed. Modification not @@ -421,7 +530,8 @@ class AddReduce(AddReduceSingle): supported, except for by ``Signal.eq``. """ - def __init__(self, inputs, output_width, register_levels, partition_points): + def __init__(self, inputs, output_width, register_levels, partition_points, + part_ops): """Create an ``AddReduce``. :param inputs: input ``Signal``s to be summed. @@ -430,28 +540,82 @@ class AddReduce(AddReduceSingle): pipeline registers. :param partition_points: the input partition points. """ - AddReduceSingle.__init__(self, inputs, output_width, register_levels, - partition_points) + self.inputs = inputs + self.part_ops = part_ops + n_parts = len(part_ops) + self.o = FinalReduceData(partition_points, output_width, n_parts) + self.output_width = output_width + self.register_levels = register_levels + self.partition_points = partition_points - def next_register_levels(self): + self.create_levels() + + @staticmethod + def get_max_level(input_count): + return AddReduceSingle.get_max_level(input_count) + + @staticmethod + def next_register_levels(register_levels): """``Iterable`` of ``register_levels`` for next recursive level.""" - for level in self.register_levels: + for level in register_levels: if level > 0: yield level - 1 + def create_levels(self): + """creates reduction levels""" + + mods = [] + next_levels = self.register_levels + partition_points = self.partition_points + part_ops = self.part_ops + n_parts = len(part_ops) + inputs = self.inputs + ilen = len(inputs) + while True: + next_level = AddReduceSingle(ilen, self.output_width, n_parts, + next_levels, partition_points) + mods.append(next_level) + next_levels = list(AddReduce.next_register_levels(next_levels)) + partition_points = next_level.i.reg_partition_points + inputs = next_level.o.inputs + ilen = len(inputs) + part_ops = next_level.i.part_ops + groups = AddReduceSingle.full_adder_groups(len(inputs)) + if len(groups) == 0: + break + + next_level = FinalAdd(ilen, self.output_width, n_parts, + next_levels, partition_points) + mods.append(next_level) + + self.levels = mods + def elaborate(self, platform): """Elaborate this module.""" - intermediate_terms, m = AddReduceSingle._elaborate(self, platform) - if intermediate_terms is None: - return m - - # recursive invocation of ``AddReduce`` - next_level = AddReduce(intermediate_terms, - len(self.output), - self.next_register_levels(), - self._reg_partition_points) - m.submodules.next_level = next_level - m.d.comb += self.output.eq(next_level.output) + m = Module() + + for i, next_level in enumerate(self.levels): + setattr(m.submodules, "next_level%d" % i, next_level) + + partition_points = self.partition_points + inputs = self.inputs + part_ops = self.part_ops + n_parts = len(part_ops) + n_inputs = len(inputs) + output_width = self.output_width + i = AddReduceData(partition_points, n_inputs, output_width, n_parts) + m.d.comb += i.eq_from(partition_points, inputs, part_ops) + for idx in range(len(self.levels)): + mcur = self.levels[idx] + if 0 in mcur.register_levels: + m.d.sync += mcur.i.eq(i) + else: + m.d.comb += mcur.i.eq(i) + i = mcur.o # for next loop + + # output comes from last module + m.d.comb += self.o.eq(i) + return m @@ -516,8 +680,8 @@ class ProductTerm(Elaboratable): bsb = Signal(self.width, reset_less=True) a_index, b_index = self.a_index, self.b_index pwidth = self.pwidth - m.d.comb += bsa.eq(self.a.bit_select(a_index * pwidth, pwidth)) - m.d.comb += bsb.eq(self.b.bit_select(b_index * pwidth, pwidth)) + m.d.comb += bsa.eq(self.a.part(a_index * pwidth, pwidth)) + m.d.comb += bsb.eq(self.b.part(b_index * pwidth, pwidth)) m.d.comb += self.ti.eq(bsa * bsb) m.d.comb += self.term.eq(get_term(self.ti, self.shift, self.enabled)) """ @@ -531,8 +695,8 @@ class ProductTerm(Elaboratable): asel = Signal(width, reset_less=True) bsel = Signal(width, reset_less=True) a_index, b_index = self.a_index, self.b_index - m.d.comb += asel.eq(self.a.bit_select(a_index * pwidth, pwidth)) - m.d.comb += bsel.eq(self.b.bit_select(b_index * pwidth, pwidth)) + m.d.comb += asel.eq(self.a.part(a_index * pwidth, pwidth)) + m.d.comb += bsel.eq(self.b.part(b_index * pwidth, pwidth)) m.d.comb += bsa.eq(get_term(asel, self.shift, self.enabled)) m.d.comb += bsb.eq(get_term(bsel, self.shift, self.enabled)) m.d.comb += self.ti.eq(bsa * bsb) @@ -576,6 +740,7 @@ class ProductTerms(Elaboratable): return m + class LSBNegTerm(Elaboratable): def __init__(self, bit_width): @@ -610,6 +775,45 @@ class LSBNegTerm(Elaboratable): return m +class Parts(Elaboratable): + + def __init__(self, pbwid, epps, n_parts): + self.pbwid = pbwid + # inputs + self.epps = PartitionPoints.like(epps, name="epps") # expanded points + # outputs + self.parts = [Signal(name=f"part_{i}") for i in range(n_parts)] + + def elaborate(self, platform): + m = Module() + + epps, parts = self.epps, self.parts + # collect part-bytes (double factor because the input is extended) + pbs = Signal(self.pbwid, reset_less=True) + tl = [] + for i in range(self.pbwid): + pb = Signal(name="pb%d" % i, reset_less=True) + m.d.comb += pb.eq(epps.part_byte(i, mfactor=2)) # double + tl.append(pb) + m.d.comb += pbs.eq(Cat(*tl)) + + # negated-temporary copy of partition bits + npbs = Signal.like(pbs, reset_less=True) + m.d.comb += npbs.eq(~pbs) + byte_count = 8 // len(parts) + for i in range(len(parts)): + pbl = [] + pbl.append(npbs[i * byte_count - 1]) + for j in range(i * byte_count, (i + 1) * byte_count - 1): + pbl.append(pbs[j]) + pbl.append(npbs[(i + 1) * byte_count - 1]) + value = Signal(len(pbl), name="value_%d" % i, reset_less=True) + m.d.comb += value.eq(Cat(*pbl)) + m.d.comb += parts[i].eq(~(value).bool()) + + return m + + class Part(Elaboratable): """ a key class which, depending on the partitioning, will determine what action to take when parts of the output are signed or unsigned. @@ -625,7 +829,10 @@ class Part(Elaboratable): the extra terms - as separate terms - are then thrown at the AddReduce alongside the multiplication part-results. """ - def __init__(self, width, n_parts, n_levels, pbwid): + def __init__(self, epps, width, n_parts, n_levels, pbwid): + + self.pbwid = pbwid + self.epps = epps # inputs self.a = Signal(64) @@ -636,13 +843,6 @@ class Part(Elaboratable): # outputs self.parts = [Signal(name=f"part_{i}") for i in range(n_parts)] - self.delayed_parts = [ - [Signal(name=f"delayed_part_{delay}_{i}") - for i in range(n_parts)] - for delay in range(n_levels)] - # XXX REALLY WEIRD BUG - have to take a copy of the last delayed_parts - self.dplast = [Signal(name=f"dplast_{i}") - for i in range(n_parts)] self.not_a_term = Signal(width) self.neg_lsb_a_term = Signal(width) @@ -652,28 +852,17 @@ class Part(Elaboratable): def elaborate(self, platform): m = Module() - pbs, parts, delayed_parts = self.pbs, self.parts, self.delayed_parts - # negated-temporary copy of partition bits - npbs = Signal.like(pbs, reset_less=True) - m.d.comb += npbs.eq(~pbs) + pbs, parts = self.pbs, self.parts + epps = self.epps + m.submodules.p = p = Parts(self.pbwid, epps, len(parts)) + m.d.comb += p.epps.eq(epps) + parts = p.parts + byte_count = 8 // len(parts) - for i in range(len(parts)): - pbl = [] - pbl.append(npbs[i * byte_count - 1]) - for j in range(i * byte_count, (i + 1) * byte_count - 1): - pbl.append(pbs[j]) - pbl.append(npbs[(i + 1) * byte_count - 1]) - value = Signal(len(pbl), name="value_%di" % i, reset_less=True) - m.d.comb += value.eq(Cat(*pbl)) - m.d.comb += parts[i].eq(~(value).bool()) - m.d.comb += delayed_parts[0][i].eq(parts[i]) - m.d.sync += [delayed_parts[j + 1][i].eq(delayed_parts[j][i]) - for j in range(len(delayed_parts)-1)] - m.d.comb += self.dplast[i].eq(delayed_parts[-1][i]) - not_a_term, neg_lsb_a_term, not_b_term, neg_lsb_b_term = \ - self.not_a_term, self.neg_lsb_a_term, \ - self.not_b_term, self.neg_lsb_b_term + not_a_term, neg_lsb_a_term, not_b_term, neg_lsb_b_term = ( + self.not_a_term, self.neg_lsb_a_term, + self.not_b_term, self.neg_lsb_b_term) byte_width = 8 // len(parts) # byte width bit_wid = 8 * byte_width # bit width @@ -683,7 +872,7 @@ class Part(Elaboratable): pa = LSBNegTerm(bit_wid) setattr(m.submodules, "lnt_%d_a_%d" % (bit_wid, i), pa) m.d.comb += pa.part.eq(parts[i]) - m.d.comb += pa.op.eq(self.a.bit_select(bit_wid * i, bit_wid)) + m.d.comb += pa.op.eq(self.a.part(bit_wid * i, bit_wid)) m.d.comb += pa.signed.eq(self.b_signed[i * byte_width]) # yes b m.d.comb += pa.msb.eq(self.b[(i + 1) * bit_wid - 1]) # really, b nat.append(pa.nt) @@ -693,7 +882,7 @@ class Part(Elaboratable): pb = LSBNegTerm(bit_wid) setattr(m.submodules, "lnt_%d_b_%d" % (bit_wid, i), pb) m.d.comb += pb.part.eq(parts[i]) - m.d.comb += pb.op.eq(self.b.bit_select(bit_wid * i, bit_wid)) + m.d.comb += pb.op.eq(self.b.part(bit_wid * i, bit_wid)) m.d.comb += pb.signed.eq(self.a_signed[i * byte_width]) # yes a m.d.comb += pb.msb.eq(self.a[(i + 1) * bit_wid - 1]) # really, a nbt.append(pb.nt) @@ -716,7 +905,7 @@ class IntermediateOut(Elaboratable): def __init__(self, width, out_wid, n_parts): self.width = width self.n_parts = n_parts - self.delayed_part_ops = [Signal(2, name="dpop%d" % i, reset_less=True) + self.part_ops = [Signal(2, name="dpop%d" % i, reset_less=True) for i in range(8)] self.intermed = Signal(out_wid, reset_less=True) self.output = Signal(out_wid//2, reset_less=True) @@ -730,9 +919,9 @@ class IntermediateOut(Elaboratable): for i in range(self.n_parts): op = Signal(w, reset_less=True, name="op%d_%d" % (w, i)) m.d.comb += op.eq( - Mux(self.delayed_part_ops[sel * i] == OP_MUL_LOW, - self.intermed.bit_select(i * w*2, w), - self.intermed.bit_select(i * w*2 + w, w))) + Mux(self.part_ops[sel * i] == OP_MUL_LOW, + self.intermed.part(i * w*2, w), + self.intermed.part(i * w*2 + w, w))) ol.append(op) m.d.comb += self.output.eq(Cat(*ol)) @@ -772,10 +961,10 @@ class FinalOut(Elaboratable): op = Signal(8, reset_less=True, name="op_%d" % i) m.d.comb += op.eq( Mux(self.d8[i] | self.d16[i // 2], - Mux(self.d8[i], self.i8.bit_select(i * 8, 8), - self.i16.bit_select(i * 8, 8)), - Mux(self.d32[i // 4], self.i32.bit_select(i * 8, 8), - self.i64.bit_select(i * 8, 8)))) + Mux(self.d8[i], self.i8.part(i * 8, 8), + self.i16.part(i * 8, 8)), + Mux(self.d32[i // 4], self.i32.part(i * 8, 8), + self.i64.part(i * 8, 8)))) ol.append(op) m.d.comb += self.out.eq(Cat(*ol)) return m @@ -874,12 +1063,6 @@ class Mul8_16_32_64(Elaboratable): # output self.output = Signal(64) - def _part_byte(self, index): - if index == -1 or index == 7: - return C(True, 1) - assert index >= 0 and index < 8 - return self.part_pts[index * 8 + 8] - def elaborate(self, platform): m = Module() @@ -888,10 +1071,17 @@ class Mul8_16_32_64(Elaboratable): tl = [] for i in range(8): pb = Signal(name="pb%d" % i, reset_less=True) - m.d.comb += pb.eq(self._part_byte(i)) + m.d.comb += pb.eq(self.part_pts.part_byte(i)) tl.append(pb) m.d.comb += pbs.eq(Cat(*tl)) + # create (doubled) PartitionPoints (output is double input width) + expanded_part_pts = eps = PartitionPoints() + for i, v in self.part_pts.items(): + ep = Signal(name=f"expanded_part_pts_{i*2}", reset_less=True) + expanded_part_pts[i * 2] = ep + m.d.comb += ep.eq(v) + # local variables signs = [] for i in range(8): @@ -900,20 +1090,11 @@ class Mul8_16_32_64(Elaboratable): setattr(m.submodules, "signs%d" % i, s) m.d.comb += s.part_ops.eq(self.part_ops[i]) - delayed_part_ops = [ - [Signal(2, name=f"_delayed_part_ops_{delay}_{i}") - for i in range(8)] - for delay in range(1 + len(self.register_levels))] - for i in range(len(self.part_ops)): - m.d.comb += delayed_part_ops[0][i].eq(self.part_ops[i]) - m.d.sync += [delayed_part_ops[j + 1][i].eq(delayed_part_ops[j][i]) - for j in range(len(self.register_levels))] - n_levels = len(self.register_levels)+1 - m.submodules.part_8 = part_8 = Part(128, 8, n_levels, 8) - m.submodules.part_16 = part_16 = Part(128, 4, n_levels, 8) - m.submodules.part_32 = part_32 = Part(128, 2, n_levels, 8) - m.submodules.part_64 = part_64 = Part(128, 1, n_levels, 8) + m.submodules.part_8 = part_8 = Part(eps, 128, 8, n_levels, 8) + m.submodules.part_16 = part_16 = Part(eps, 128, 4, n_levels, 8) + m.submodules.part_32 = part_32 = Part(eps, 128, 2, n_levels, 8) + m.submodules.part_64 = part_64 = Part(eps, 128, 1, n_levels, 8) nat_l, nbt_l, nla_l, nlb_l = [], [], [], [] for mod in [part_8, part_16, part_32, part_64]: m.d.comb += mod.a.eq(self.a) @@ -954,50 +1135,59 @@ class Mul8_16_32_64(Elaboratable): m.d.comb += mod.orin[i].eq(l[i]) terms.append(mod.orout) - expanded_part_pts = PartitionPoints() - for i, v in self.part_pts.items(): - signal = Signal(name=f"expanded_part_pts_{i*2}", reset_less=True) - expanded_part_pts[i * 2] = signal - m.d.comb += signal.eq(v) - add_reduce = AddReduce(terms, 128, self.register_levels, - expanded_part_pts) + expanded_part_pts, + self.part_ops) + + out_part_ops = add_reduce.o.part_ops + out_part_pts = add_reduce.o.reg_partition_points + m.submodules.add_reduce = add_reduce - m.d.comb += self._intermediate_output.eq(add_reduce.output) + m.d.comb += self._intermediate_output.eq(add_reduce.o.output) # create _output_64 m.submodules.io64 = io64 = IntermediateOut(64, 128, 1) m.d.comb += io64.intermed.eq(self._intermediate_output) for i in range(8): - m.d.comb += io64.delayed_part_ops[i].eq(delayed_part_ops[-1][i]) + m.d.comb += io64.part_ops[i].eq(out_part_ops[i]) # create _output_32 m.submodules.io32 = io32 = IntermediateOut(32, 128, 2) m.d.comb += io32.intermed.eq(self._intermediate_output) for i in range(8): - m.d.comb += io32.delayed_part_ops[i].eq(delayed_part_ops[-1][i]) + m.d.comb += io32.part_ops[i].eq(out_part_ops[i]) # create _output_16 m.submodules.io16 = io16 = IntermediateOut(16, 128, 4) m.d.comb += io16.intermed.eq(self._intermediate_output) for i in range(8): - m.d.comb += io16.delayed_part_ops[i].eq(delayed_part_ops[-1][i]) + m.d.comb += io16.part_ops[i].eq(out_part_ops[i]) # create _output_8 m.submodules.io8 = io8 = IntermediateOut(8, 128, 8) m.d.comb += io8.intermed.eq(self._intermediate_output) for i in range(8): - m.d.comb += io8.delayed_part_ops[i].eq(delayed_part_ops[-1][i]) + m.d.comb += io8.part_ops[i].eq(out_part_ops[i]) + + m.submodules.p_8 = p_8 = Parts(8, eps, len(part_8.parts)) + m.submodules.p_16 = p_16 = Parts(8, eps, len(part_16.parts)) + m.submodules.p_32 = p_32 = Parts(8, eps, len(part_32.parts)) + m.submodules.p_64 = p_64 = Parts(8, eps, len(part_64.parts)) + + m.d.comb += p_8.epps.eq(out_part_pts) + m.d.comb += p_16.epps.eq(out_part_pts) + m.d.comb += p_32.epps.eq(out_part_pts) + m.d.comb += p_64.epps.eq(out_part_pts) # final output m.submodules.finalout = finalout = FinalOut(64) - for i in range(len(part_8.delayed_parts[-1])): - m.d.comb += finalout.d8[i].eq(part_8.dplast[i]) - for i in range(len(part_16.delayed_parts[-1])): - m.d.comb += finalout.d16[i].eq(part_16.dplast[i]) - for i in range(len(part_32.delayed_parts[-1])): - m.d.comb += finalout.d32[i].eq(part_32.dplast[i]) + for i in range(len(part_8.parts)): + m.d.comb += finalout.d8[i].eq(p_8.parts[i]) + for i in range(len(part_16.parts)): + m.d.comb += finalout.d16[i].eq(p_16.parts[i]) + for i in range(len(part_32.parts)): + m.d.comb += finalout.d32[i].eq(p_32.parts[i]) m.d.comb += finalout.i8.eq(io8.output) m.d.comb += finalout.i16.eq(io16.output) m.d.comb += finalout.i32.eq(io32.output)