use part_ops not out_part_ops
[ieee754fpu.git] / src / ieee754 / part_mul_add / multiply.py
index a5c5e7064bfafdbeb37acf679d9f852447454549..17eec4e5d4e78d3d0d763a04734eb9d709e4110c 100644 (file)
@@ -300,6 +300,62 @@ class PartitionedAdder(Elaboratable):
 
 FULL_ADDER_INPUT_COUNT = 3
 
+class AddReduceData:
+
+    def __init__(self, ppoints, n_inputs, output_width, n_parts):
+        self.part_ops = [Signal(2, name=f"part_ops_{i}")
+                          for i in range(n_parts)]
+        self.inputs = [Signal(output_width, name=f"inputs[{i}]")
+            for i in range(n_inputs)]
+        self.reg_partition_points = ppoints.like()
+
+    def eq(self, rhs):
+        return [self.reg_partition_points.eq(rhs.reg_partition_points)] + \
+               [self.inputs[i].eq(rhs.inputs[i])
+                                     for i in range(len(self.inputs))] + \
+               [self.part_ops[i].eq(rhs.part_ops[i])
+                                     for i in range(len(self.part_ops))]
+
+
+class FinalAdd(Elaboratable):
+    """ Final stage of add reduce
+    """
+
+    def __init__(self, n_inputs, output_width, n_parts, register_levels,
+                       partition_points):
+        self.i = AddReduceData(partition_points, n_inputs,
+                               output_width, n_parts)
+        self.n_inputs = n_inputs
+        self.n_parts = n_parts
+        self._resized_inputs = self.i.inputs
+        self.register_levels = list(register_levels)
+        self.output = Signal(output_width)
+        self.partition_points = PartitionPoints(partition_points)
+        if not self.partition_points.fits_in_width(output_width):
+            raise ValueError("partition_points doesn't fit in output_width")
+        self.intermediate_terms = []
+
+    def elaborate(self, platform):
+        """Elaborate this module."""
+        m = Module()
+
+        if self.n_inputs == 0:
+            # use 0 as the default output value
+            m.d.comb += self.output.eq(0)
+        elif self.n_inputs == 1:
+            # handle single input
+            m.d.comb += self.output.eq(self._resized_inputs[0])
+        else:
+            # base case for adding 2 inputs
+            assert self.n_inputs == 2
+            adder = PartitionedAdder(len(self.output),
+                                     self.i.reg_partition_points)
+            m.submodules.final_adder = adder
+            m.d.comb += adder.a.eq(self._resized_inputs[0])
+            m.d.comb += adder.b.eq(self._resized_inputs[1])
+            m.d.comb += self.output.eq(adder.output)
+        return m
+
 
 class AddReduceSingle(Elaboratable):
     """Add list of numbers together.
@@ -313,8 +369,8 @@ class AddReduceSingle(Elaboratable):
         supported, except for by ``Signal.eq``.
     """
 
-    def __init__(self, inputs, output_width, register_levels, partition_points,
-                       part_ops):
+    def __init__(self, n_inputs, output_width, n_parts, register_levels,
+                       partition_points):
         """Create an ``AddReduce``.
 
         :param inputs: input ``Signal``s to be summed.
@@ -323,21 +379,18 @@ class AddReduceSingle(Elaboratable):
             pipeline registers.
         :param partition_points: the input partition points.
         """
-        self.part_ops = part_ops
-        self.out_part_ops = [Signal(2, name=f"out_part_ops_{i}")
-                          for i in range(len(part_ops))]
-        self.inputs = list(inputs)
-        self._resized_inputs = [
-            Signal(output_width, name=f"resized_inputs[{i}]")
-            for i in range(len(self.inputs))]
+        self.n_inputs = n_inputs
+        self.n_parts = n_parts
+        self.output_width = output_width
+        self.i = AddReduceData(partition_points, n_inputs,
+                               output_width, n_parts)
+        self._resized_inputs = self.i.inputs
         self.register_levels = list(register_levels)
-        self.output = Signal(output_width)
         self.partition_points = PartitionPoints(partition_points)
         if not self.partition_points.fits_in_width(output_width):
             raise ValueError("partition_points doesn't fit in output_width")
-        self._reg_partition_points = self.partition_points.like()
 
-        max_level = AddReduceSingle.get_max_level(len(self.inputs))
+        max_level = AddReduceSingle.get_max_level(n_inputs)
         for level in self.register_levels:
             if level > max_level:
                 raise ValueError(
@@ -347,7 +400,7 @@ class AddReduceSingle(Elaboratable):
         # because we need to know what they are (in order to set up the
         # interconnects back in AddReduce), but cannot do the m.d.comb +=
         # etc because this is not in elaboratable.
-        self.groups = AddReduceSingle.full_adder_groups(len(self.inputs))
+        self.groups = AddReduceSingle.full_adder_groups(n_inputs)
         self._intermediate_terms = []
         if len(self.groups) != 0:
             self.create_next_terms()
@@ -379,45 +432,10 @@ class AddReduceSingle(Elaboratable):
         """Elaborate this module."""
         m = Module()
 
-        # resize inputs to correct bit-width and optionally add in
-        # pipeline registers
-        resized_input_assignments = [self._resized_inputs[i].eq(self.inputs[i])
-                                     for i in range(len(self.inputs))]
-        copy_part_ops = [self.out_part_ops[i].eq(self.part_ops[i])
-                                     for i in range(len(self.part_ops))]
-        if 0 in self.register_levels:
-            m.d.sync += copy_part_ops
-            m.d.sync += resized_input_assignments
-            m.d.sync += self._reg_partition_points.eq(self.partition_points)
-        else:
-            m.d.comb += copy_part_ops
-            m.d.comb += resized_input_assignments
-            m.d.comb += self._reg_partition_points.eq(self.partition_points)
-
         for (value, term) in self._intermediate_terms:
             m.d.comb += term.eq(value)
 
-        # if there are no full adders to create, then we handle the base cases
-        # and return, otherwise we go on to the recursive case
-        if len(self.groups) == 0:
-            if len(self.inputs) == 0:
-                # use 0 as the default output value
-                m.d.comb += self.output.eq(0)
-            elif len(self.inputs) == 1:
-                # handle single input
-                m.d.comb += self.output.eq(self._resized_inputs[0])
-            else:
-                # base case for adding 2 inputs
-                assert len(self.inputs) == 2
-                adder = PartitionedAdder(len(self.output),
-                                         self._reg_partition_points)
-                m.submodules.final_adder = adder
-                m.d.comb += adder.a.eq(self._resized_inputs[0])
-                m.d.comb += adder.b.eq(self._resized_inputs[1])
-                m.d.comb += self.output.eq(adder.output)
-            return m
-
-        mask = self._reg_partition_points.as_mask(len(self.output))
+        mask = self.i.reg_partition_points.as_mask(self.output_width)
         m.d.comb += self.part_mask.eq(mask)
 
         # add and link the intermediate term modules
@@ -439,35 +457,35 @@ class AddReduceSingle(Elaboratable):
 
         def add_intermediate_term(value):
             intermediate_term = Signal(
-                len(self.output),
+                self.output_width,
                 name=f"intermediate_terms[{len(intermediate_terms)}]")
             _intermediate_terms.append((value, intermediate_term))
             intermediate_terms.append(intermediate_term)
 
         # store mask in intermediary (simplifies graph)
-        self.part_mask = Signal(len(self.output), reset_less=True)
+        self.part_mask = Signal(self.output_width, reset_less=True)
 
         # create full adders for this recursive level.
         # this shrinks N terms to 2 * (N // 3) plus the remainder
         self.adders = []
         for i in self.groups:
-            adder_i = MaskedFullAdder(len(self.output))
+            adder_i = MaskedFullAdder(self.output_width)
             self.adders.append((i, adder_i))
             # add both the sum and the masked-carry to the next level.
             # 3 inputs have now been reduced to 2...
             add_intermediate_term(adder_i.sum)
             add_intermediate_term(adder_i.mcarry)
         # handle the remaining inputs.
-        if len(self.inputs) % FULL_ADDER_INPUT_COUNT == 1:
+        if self.n_inputs % FULL_ADDER_INPUT_COUNT == 1:
             add_intermediate_term(self._resized_inputs[-1])
-        elif len(self.inputs) % FULL_ADDER_INPUT_COUNT == 2:
+        elif self.n_inputs % FULL_ADDER_INPUT_COUNT == 2:
             # Just pass the terms to the next layer, since we wouldn't gain
             # anything by using a half adder since there would still be 2 terms
             # and just passing the terms to the next layer saves gates.
             add_intermediate_term(self._resized_inputs[-2])
             add_intermediate_term(self._resized_inputs[-1])
         else:
-            assert len(self.inputs) % FULL_ADDER_INPUT_COUNT == 0
+            assert self.n_inputs % FULL_ADDER_INPUT_COUNT == 0
 
         self.intermediate_terms = intermediate_terms
         self._intermediate_terms = _intermediate_terms
@@ -525,16 +543,25 @@ class AddReduce(Elaboratable):
         partition_points = self.partition_points
         inputs = self.inputs
         part_ops = self.part_ops
+        n_parts = len(part_ops)
         while True:
-            next_level = AddReduceSingle(inputs, self.output_width, next_levels,
-                                         partition_points, part_ops)
+            ilen = len(inputs)
+            next_level = AddReduceSingle(ilen, self.output_width, n_parts,
+                                         next_levels, partition_points)
             mods.append(next_level)
-            if len(next_level.groups) == 0:
-                break
             next_levels = list(AddReduce.next_register_levels(next_levels))
-            partition_points = next_level._reg_partition_points
+            partition_points = next_level.i.reg_partition_points
             inputs = next_level.intermediate_terms
-            part_ops = next_level.out_part_ops
+            ilen = len(inputs)
+            part_ops = next_level.i.part_ops
+            groups = AddReduceSingle.full_adder_groups(len(inputs))
+            if len(groups) == 0:
+                break
+
+        if ilen != 0:
+            next_level = FinalAdd(ilen, self.output_width, n_parts,
+                                  next_levels, partition_points)
+            mods.append(next_level)
 
         self.levels = mods
 
@@ -545,9 +572,30 @@ class AddReduce(Elaboratable):
         for i, next_level in enumerate(self.levels):
             setattr(m.submodules, "next_level%d" % i, next_level)
 
+        partition_points = self.partition_points
+        inputs = self.inputs
+        part_ops = self.part_ops
+        for i in range(len(self.levels)):
+            mcur = self.levels[i]
+            inassign = [mcur._resized_inputs[i].eq(inputs[i])
+                                         for i in range(len(inputs))]
+            copy_part_ops = [mcur.i.part_ops[i].eq(part_ops[i])
+                                         for i in range(len(part_ops))]
+            if 0 in mcur.register_levels:
+                m.d.sync += copy_part_ops
+                m.d.sync += inassign
+                m.d.sync += mcur.i.reg_partition_points.eq(partition_points)
+            else:
+                m.d.comb += copy_part_ops
+                m.d.comb += inassign
+                m.d.comb += mcur.i.reg_partition_points.eq(partition_points)
+            partition_points = mcur.i.reg_partition_points
+            inputs = mcur.intermediate_terms
+            part_ops = mcur.i.part_ops
+
         # output comes from last module
         m.d.comb += self.output.eq(next_level.output)
-        copy_part_ops = [self.out_part_ops[i].eq(next_level.out_part_ops[i])
+        copy_part_ops = [self.out_part_ops[i].eq(next_level.i.part_ops[i])
                                      for i in range(len(self.part_ops))]
         m.d.comb += copy_part_ops
 
@@ -793,12 +841,11 @@ class Part(Elaboratable):
         m.d.comb += p.epps.eq(epps)
         parts = p.parts
 
-        npbs = Signal.like(pbs, reset_less=True)
         byte_count = 8 // len(parts)
 
-        not_a_term, neg_lsb_a_term, not_b_term, neg_lsb_b_term = \
-                self.not_a_term, self.neg_lsb_a_term, \
-                self.not_b_term, self.neg_lsb_b_term
+        not_a_term, neg_lsb_a_term, not_b_term, neg_lsb_b_term = (
+                self.not_a_term, self.neg_lsb_a_term,
+                self.not_b_term, self.neg_lsb_b_term)
 
         byte_width = 8 // len(parts) # byte width
         bit_wid = 8 * byte_width     # bit width
@@ -1077,8 +1124,8 @@ class Mul8_16_32_64(Elaboratable):
                                expanded_part_pts,
                                self.part_ops)
 
-        out_part_ops = add_reduce.levels[-1].out_part_ops
-        out_part_pts = add_reduce.levels[-1]._reg_partition_points
+        out_part_ops = add_reduce.levels[-1].i.part_ops
+        out_part_pts = add_reduce.levels[-1].i.reg_partition_points
 
         m.submodules.add_reduce = add_reduce
         m.d.comb += self._intermediate_output.eq(add_reduce.output)