X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fieee754%2Fpart_mul_add%2Fmultiply.py;h=17eec4e5d4e78d3d0d763a04734eb9d709e4110c;hb=43db70eac45b2e86180faf1b9288817b2fd80a46;hp=084f7dde62a7f1f98ac15b4a8c4b02a42de6fe3c;hpb=dbb743201e5145ebb2afaded8f1dadd068e99249;p=ieee754fpu.git diff --git a/src/ieee754/part_mul_add/multiply.py b/src/ieee754/part_mul_add/multiply.py index 084f7dde..17eec4e5 100644 --- a/src/ieee754/part_mul_add/multiply.py +++ b/src/ieee754/part_mul_add/multiply.py @@ -50,15 +50,17 @@ class PartitionPoints(dict): raise ValueError("point must be a non-negative integer") self[point] = Value.wrap(enabled) - def like(self, name=None, src_loc_at=0): + def like(self, name=None, src_loc_at=0, mul=1): """Create a new ``PartitionPoints`` with ``Signal``s for all values. :param name: the base name for the new ``Signal``s. + :param mul: a multiplication factor on the indices """ if name is None: name = Signal(src_loc_at=1+src_loc_at).name # get variable name retval = PartitionPoints() for point, enabled in self.items(): + point *= mul retval[point] = Signal(enabled.shape(), name=f"{name}_{point}") return retval @@ -298,6 +300,62 @@ class PartitionedAdder(Elaboratable): FULL_ADDER_INPUT_COUNT = 3 +class AddReduceData: + + def __init__(self, ppoints, n_inputs, output_width, n_parts): + self.part_ops = [Signal(2, name=f"part_ops_{i}") + for i in range(n_parts)] + self.inputs = [Signal(output_width, name=f"inputs[{i}]") + for i in range(n_inputs)] + self.reg_partition_points = ppoints.like() + + def eq(self, rhs): + return [self.reg_partition_points.eq(rhs.reg_partition_points)] + \ + [self.inputs[i].eq(rhs.inputs[i]) + for i in range(len(self.inputs))] + \ + [self.part_ops[i].eq(rhs.part_ops[i]) + for i in range(len(self.part_ops))] + + +class FinalAdd(Elaboratable): + """ Final stage of add reduce + """ + + def __init__(self, n_inputs, output_width, n_parts, register_levels, + partition_points): + self.i = AddReduceData(partition_points, n_inputs, + output_width, n_parts) + self.n_inputs = n_inputs + self.n_parts = n_parts + self._resized_inputs = self.i.inputs + self.register_levels = list(register_levels) + self.output = Signal(output_width) + self.partition_points = PartitionPoints(partition_points) + if not self.partition_points.fits_in_width(output_width): + raise ValueError("partition_points doesn't fit in output_width") + self.intermediate_terms = [] + + def elaborate(self, platform): + """Elaborate this module.""" + m = Module() + + if self.n_inputs == 0: + # use 0 as the default output value + m.d.comb += self.output.eq(0) + elif self.n_inputs == 1: + # handle single input + m.d.comb += self.output.eq(self._resized_inputs[0]) + else: + # base case for adding 2 inputs + assert self.n_inputs == 2 + adder = PartitionedAdder(len(self.output), + self.i.reg_partition_points) + m.submodules.final_adder = adder + m.d.comb += adder.a.eq(self._resized_inputs[0]) + m.d.comb += adder.b.eq(self._resized_inputs[1]) + m.d.comb += self.output.eq(adder.output) + return m + class AddReduceSingle(Elaboratable): """Add list of numbers together. @@ -311,8 +369,8 @@ class AddReduceSingle(Elaboratable): supported, except for by ``Signal.eq``. """ - def __init__(self, inputs, output_width, register_levels, partition_points, - part_ops): + def __init__(self, n_inputs, output_width, n_parts, register_levels, + partition_points): """Create an ``AddReduce``. :param inputs: input ``Signal``s to be summed. @@ -321,21 +379,18 @@ class AddReduceSingle(Elaboratable): pipeline registers. :param partition_points: the input partition points. """ - self.part_ops = part_ops - self.out_part_ops = [Signal(2, name=f"out_part_ops_{i}") - for i in range(len(part_ops))] - self.inputs = list(inputs) - self._resized_inputs = [ - Signal(output_width, name=f"resized_inputs[{i}]") - for i in range(len(self.inputs))] + self.n_inputs = n_inputs + self.n_parts = n_parts + self.output_width = output_width + self.i = AddReduceData(partition_points, n_inputs, + output_width, n_parts) + self._resized_inputs = self.i.inputs self.register_levels = list(register_levels) - self.output = Signal(output_width) self.partition_points = PartitionPoints(partition_points) if not self.partition_points.fits_in_width(output_width): raise ValueError("partition_points doesn't fit in output_width") - self._reg_partition_points = self.partition_points.like() - max_level = AddReduceSingle.get_max_level(len(self.inputs)) + max_level = AddReduceSingle.get_max_level(n_inputs) for level in self.register_levels: if level > max_level: raise ValueError( @@ -345,7 +400,7 @@ class AddReduceSingle(Elaboratable): # because we need to know what they are (in order to set up the # interconnects back in AddReduce), but cannot do the m.d.comb += # etc because this is not in elaboratable. - self.groups = AddReduceSingle.full_adder_groups(len(self.inputs)) + self.groups = AddReduceSingle.full_adder_groups(n_inputs) self._intermediate_terms = [] if len(self.groups) != 0: self.create_next_terms() @@ -377,45 +432,10 @@ class AddReduceSingle(Elaboratable): """Elaborate this module.""" m = Module() - # resize inputs to correct bit-width and optionally add in - # pipeline registers - resized_input_assignments = [self._resized_inputs[i].eq(self.inputs[i]) - for i in range(len(self.inputs))] - copy_part_ops = [self.out_part_ops[i].eq(self.part_ops[i]) - for i in range(len(self.part_ops))] - if 0 in self.register_levels: - m.d.sync += copy_part_ops - m.d.sync += resized_input_assignments - m.d.sync += self._reg_partition_points.eq(self.partition_points) - else: - m.d.comb += copy_part_ops - m.d.comb += resized_input_assignments - m.d.comb += self._reg_partition_points.eq(self.partition_points) - for (value, term) in self._intermediate_terms: m.d.comb += term.eq(value) - # if there are no full adders to create, then we handle the base cases - # and return, otherwise we go on to the recursive case - if len(self.groups) == 0: - if len(self.inputs) == 0: - # use 0 as the default output value - m.d.comb += self.output.eq(0) - elif len(self.inputs) == 1: - # handle single input - m.d.comb += self.output.eq(self._resized_inputs[0]) - else: - # base case for adding 2 inputs - assert len(self.inputs) == 2 - adder = PartitionedAdder(len(self.output), - self._reg_partition_points) - m.submodules.final_adder = adder - m.d.comb += adder.a.eq(self._resized_inputs[0]) - m.d.comb += adder.b.eq(self._resized_inputs[1]) - m.d.comb += self.output.eq(adder.output) - return m - - mask = self._reg_partition_points.as_mask(len(self.output)) + mask = self.i.reg_partition_points.as_mask(self.output_width) m.d.comb += self.part_mask.eq(mask) # add and link the intermediate term modules @@ -437,35 +457,35 @@ class AddReduceSingle(Elaboratable): def add_intermediate_term(value): intermediate_term = Signal( - len(self.output), + self.output_width, name=f"intermediate_terms[{len(intermediate_terms)}]") _intermediate_terms.append((value, intermediate_term)) intermediate_terms.append(intermediate_term) # store mask in intermediary (simplifies graph) - self.part_mask = Signal(len(self.output), reset_less=True) + self.part_mask = Signal(self.output_width, reset_less=True) # create full adders for this recursive level. # this shrinks N terms to 2 * (N // 3) plus the remainder self.adders = [] for i in self.groups: - adder_i = MaskedFullAdder(len(self.output)) + adder_i = MaskedFullAdder(self.output_width) self.adders.append((i, adder_i)) # add both the sum and the masked-carry to the next level. # 3 inputs have now been reduced to 2... add_intermediate_term(adder_i.sum) add_intermediate_term(adder_i.mcarry) # handle the remaining inputs. - if len(self.inputs) % FULL_ADDER_INPUT_COUNT == 1: + if self.n_inputs % FULL_ADDER_INPUT_COUNT == 1: add_intermediate_term(self._resized_inputs[-1]) - elif len(self.inputs) % FULL_ADDER_INPUT_COUNT == 2: + elif self.n_inputs % FULL_ADDER_INPUT_COUNT == 2: # Just pass the terms to the next layer, since we wouldn't gain # anything by using a half adder since there would still be 2 terms # and just passing the terms to the next layer saves gates. add_intermediate_term(self._resized_inputs[-2]) add_intermediate_term(self._resized_inputs[-1]) else: - assert len(self.inputs) % FULL_ADDER_INPUT_COUNT == 0 + assert self.n_inputs % FULL_ADDER_INPUT_COUNT == 0 self.intermediate_terms = intermediate_terms self._intermediate_terms = _intermediate_terms @@ -523,16 +543,25 @@ class AddReduce(Elaboratable): partition_points = self.partition_points inputs = self.inputs part_ops = self.part_ops + n_parts = len(part_ops) while True: - next_level = AddReduceSingle(inputs, self.output_width, next_levels, - partition_points, part_ops) + ilen = len(inputs) + next_level = AddReduceSingle(ilen, self.output_width, n_parts, + next_levels, partition_points) mods.append(next_level) - if len(next_level.groups) == 0: - break next_levels = list(AddReduce.next_register_levels(next_levels)) - partition_points = next_level._reg_partition_points + partition_points = next_level.i.reg_partition_points inputs = next_level.intermediate_terms - part_ops = next_level.out_part_ops + ilen = len(inputs) + part_ops = next_level.i.part_ops + groups = AddReduceSingle.full_adder_groups(len(inputs)) + if len(groups) == 0: + break + + if ilen != 0: + next_level = FinalAdd(ilen, self.output_width, n_parts, + next_levels, partition_points) + mods.append(next_level) self.levels = mods @@ -543,9 +572,30 @@ class AddReduce(Elaboratable): for i, next_level in enumerate(self.levels): setattr(m.submodules, "next_level%d" % i, next_level) + partition_points = self.partition_points + inputs = self.inputs + part_ops = self.part_ops + for i in range(len(self.levels)): + mcur = self.levels[i] + inassign = [mcur._resized_inputs[i].eq(inputs[i]) + for i in range(len(inputs))] + copy_part_ops = [mcur.i.part_ops[i].eq(part_ops[i]) + for i in range(len(part_ops))] + if 0 in mcur.register_levels: + m.d.sync += copy_part_ops + m.d.sync += inassign + m.d.sync += mcur.i.reg_partition_points.eq(partition_points) + else: + m.d.comb += copy_part_ops + m.d.comb += inassign + m.d.comb += mcur.i.reg_partition_points.eq(partition_points) + partition_points = mcur.i.reg_partition_points + inputs = mcur.intermediate_terms + part_ops = mcur.i.part_ops + # output comes from last module m.d.comb += self.output.eq(next_level.output) - copy_part_ops = [self.out_part_ops[i].eq(next_level.out_part_ops[i]) + copy_part_ops = [self.out_part_ops[i].eq(next_level.i.part_ops[i]) for i in range(len(self.part_ops))] m.d.comb += copy_part_ops @@ -708,6 +758,45 @@ class LSBNegTerm(Elaboratable): return m +class Parts(Elaboratable): + + def __init__(self, pbwid, epps, n_parts): + self.pbwid = pbwid + # inputs + self.epps = PartitionPoints.like(epps, name="epps") # expanded points + # outputs + self.parts = [Signal(name=f"part_{i}") for i in range(n_parts)] + + def elaborate(self, platform): + m = Module() + + epps, parts = self.epps, self.parts + # collect part-bytes (double factor because the input is extended) + pbs = Signal(self.pbwid, reset_less=True) + tl = [] + for i in range(self.pbwid): + pb = Signal(name="pb%d" % i, reset_less=True) + m.d.comb += pb.eq(epps.part_byte(i, mfactor=2)) # double + tl.append(pb) + m.d.comb += pbs.eq(Cat(*tl)) + + # negated-temporary copy of partition bits + npbs = Signal.like(pbs, reset_less=True) + m.d.comb += npbs.eq(~pbs) + byte_count = 8 // len(parts) + for i in range(len(parts)): + pbl = [] + pbl.append(npbs[i * byte_count - 1]) + for j in range(i * byte_count, (i + 1) * byte_count - 1): + pbl.append(pbs[j]) + pbl.append(npbs[(i + 1) * byte_count - 1]) + value = Signal(len(pbl), name="value_%d" % i, reset_less=True) + m.d.comb += value.eq(Cat(*pbl)) + m.d.comb += parts[i].eq(~(value).bool()) + + return m + + class Part(Elaboratable): """ a key class which, depending on the partitioning, will determine what action to take when parts of the output are signed or unsigned. @@ -723,7 +812,10 @@ class Part(Elaboratable): the extra terms - as separate terms - are then thrown at the AddReduce alongside the multiplication part-results. """ - def __init__(self, width, n_parts, n_levels, pbwid): + def __init__(self, epps, width, n_parts, n_levels, pbwid): + + self.pbwid = pbwid + self.epps = epps # inputs self.a = Signal(64) @@ -734,13 +826,6 @@ class Part(Elaboratable): # outputs self.parts = [Signal(name=f"part_{i}") for i in range(n_parts)] - self.delayed_parts = [ - [Signal(name=f"delayed_part_{delay}_{i}") - for i in range(n_parts)] - for delay in range(n_levels)] - # XXX REALLY WEIRD BUG - have to take a copy of the last delayed_parts - self.dplast = [Signal(name=f"dplast_{i}") - for i in range(n_parts)] self.not_a_term = Signal(width) self.neg_lsb_a_term = Signal(width) @@ -750,28 +835,17 @@ class Part(Elaboratable): def elaborate(self, platform): m = Module() - pbs, parts, delayed_parts = self.pbs, self.parts, self.delayed_parts - # negated-temporary copy of partition bits - npbs = Signal.like(pbs, reset_less=True) - m.d.comb += npbs.eq(~pbs) + pbs, parts = self.pbs, self.parts + epps = self.epps + m.submodules.p = p = Parts(self.pbwid, epps, len(parts)) + m.d.comb += p.epps.eq(epps) + parts = p.parts + byte_count = 8 // len(parts) - for i in range(len(parts)): - pbl = [] - pbl.append(npbs[i * byte_count - 1]) - for j in range(i * byte_count, (i + 1) * byte_count - 1): - pbl.append(pbs[j]) - pbl.append(npbs[(i + 1) * byte_count - 1]) - value = Signal(len(pbl), name="value_%di" % i, reset_less=True) - m.d.comb += value.eq(Cat(*pbl)) - m.d.comb += parts[i].eq(~(value).bool()) - m.d.comb += delayed_parts[0][i].eq(parts[i]) - m.d.sync += [delayed_parts[j + 1][i].eq(delayed_parts[j][i]) - for j in range(len(delayed_parts)-1)] - m.d.comb += self.dplast[i].eq(delayed_parts[-1][i]) - not_a_term, neg_lsb_a_term, not_b_term, neg_lsb_b_term = \ - self.not_a_term, self.neg_lsb_a_term, \ - self.not_b_term, self.neg_lsb_b_term + not_a_term, neg_lsb_a_term, not_b_term, neg_lsb_b_term = ( + self.not_a_term, self.neg_lsb_a_term, + self.not_b_term, self.neg_lsb_b_term) byte_width = 8 // len(parts) # byte width bit_wid = 8 * byte_width # bit width @@ -985,7 +1059,7 @@ class Mul8_16_32_64(Elaboratable): m.d.comb += pbs.eq(Cat(*tl)) # create (doubled) PartitionPoints (output is double input width) - expanded_part_pts = PartitionPoints() + expanded_part_pts = eps = PartitionPoints() for i, v in self.part_pts.items(): ep = Signal(name=f"expanded_part_pts_{i*2}", reset_less=True) expanded_part_pts[i * 2] = ep @@ -1000,10 +1074,10 @@ class Mul8_16_32_64(Elaboratable): m.d.comb += s.part_ops.eq(self.part_ops[i]) n_levels = len(self.register_levels)+1 - m.submodules.part_8 = part_8 = Part(128, 8, n_levels, 8) - m.submodules.part_16 = part_16 = Part(128, 4, n_levels, 8) - m.submodules.part_32 = part_32 = Part(128, 2, n_levels, 8) - m.submodules.part_64 = part_64 = Part(128, 1, n_levels, 8) + m.submodules.part_8 = part_8 = Part(eps, 128, 8, n_levels, 8) + m.submodules.part_16 = part_16 = Part(eps, 128, 4, n_levels, 8) + m.submodules.part_32 = part_32 = Part(eps, 128, 2, n_levels, 8) + m.submodules.part_64 = part_64 = Part(eps, 128, 1, n_levels, 8) nat_l, nbt_l, nla_l, nlb_l = [], [], [], [] for mod in [part_8, part_16, part_32, part_64]: m.d.comb += mod.a.eq(self.a) @@ -1050,7 +1124,8 @@ class Mul8_16_32_64(Elaboratable): expanded_part_pts, self.part_ops) - out_part_ops = add_reduce.levels[-1].out_part_ops + out_part_ops = add_reduce.levels[-1].i.part_ops + out_part_pts = add_reduce.levels[-1].i.reg_partition_points m.submodules.add_reduce = add_reduce m.d.comb += self._intermediate_output.eq(add_reduce.output) @@ -1078,14 +1153,24 @@ class Mul8_16_32_64(Elaboratable): for i in range(8): m.d.comb += io8.part_ops[i].eq(out_part_ops[i]) + m.submodules.p_8 = p_8 = Parts(8, eps, len(part_8.parts)) + m.submodules.p_16 = p_16 = Parts(8, eps, len(part_16.parts)) + m.submodules.p_32 = p_32 = Parts(8, eps, len(part_32.parts)) + m.submodules.p_64 = p_64 = Parts(8, eps, len(part_64.parts)) + + m.d.comb += p_8.epps.eq(out_part_pts) + m.d.comb += p_16.epps.eq(out_part_pts) + m.d.comb += p_32.epps.eq(out_part_pts) + m.d.comb += p_64.epps.eq(out_part_pts) + # final output m.submodules.finalout = finalout = FinalOut(64) - for i in range(len(part_8.delayed_parts[-1])): - m.d.comb += finalout.d8[i].eq(part_8.dplast[i]) - for i in range(len(part_16.delayed_parts[-1])): - m.d.comb += finalout.d16[i].eq(part_16.dplast[i]) - for i in range(len(part_32.delayed_parts[-1])): - m.d.comb += finalout.d32[i].eq(part_32.dplast[i]) + for i in range(len(part_8.parts)): + m.d.comb += finalout.d8[i].eq(p_8.parts[i]) + for i in range(len(part_16.parts)): + m.d.comb += finalout.d16[i].eq(p_16.parts[i]) + for i in range(len(part_32.parts)): + m.d.comb += finalout.d32[i].eq(p_32.parts[i]) m.d.comb += finalout.i8.eq(io8.output) m.d.comb += finalout.i16.eq(io16.output) m.d.comb += finalout.i32.eq(io32.output)