remove use of AddReduce, use AddReduceInternal instead
[ieee754fpu.git] / src / ieee754 / part_mul_add / multiply.py
index 3366a4f95187a491c724cb2a5cd7faa4f2a9ad4a..2c828c187f2747bde3285df64599636719e3be72 100644 (file)
@@ -346,10 +346,9 @@ class FinalAdd(Elaboratable):
     """ Final stage of add reduce
     """
 
-    def __init__(self, n_inputs, output_width, n_parts, partition_points):
-        self.i = AddReduceData(partition_points, n_inputs,
-                               output_width, n_parts)
-        self.o = FinalReduceData(partition_points, output_width, n_parts)
+    def __init__(self, n_inputs, output_width, n_parts, partition_points,
+                       partition_step=1):
+        self.partition_step = partition_step
         self.output_width = output_width
         self.n_inputs = n_inputs
         self.n_parts = n_parts
@@ -357,6 +356,17 @@ class FinalAdd(Elaboratable):
         if not self.partition_points.fits_in_width(output_width):
             raise ValueError("partition_points doesn't fit in output_width")
 
+        self.i = self.ispec()
+        self.o = self.ospec()
+
+    def ispec(self):
+        return AddReduceData(self.partition_points, self.n_inputs,
+                             self.output_width, self.n_parts)
+
+    def ospec(self):
+        return FinalReduceData(self.partition_points,
+                                 self.output_width, self.n_parts)
+
     def elaborate(self, platform):
         """Elaborate this module."""
         m = Module()
@@ -373,7 +383,7 @@ class FinalAdd(Elaboratable):
             # base case for adding 2 inputs
             assert self.n_inputs == 2
             adder = PartitionedAdder(output_width,
-                                     self.i.part_pts, 2)
+                                     self.i.part_pts, self.partition_step)
             m.submodules.final_adder = adder
             m.d.comb += adder.a.eq(self.i.terms[0])
             m.d.comb += adder.b.eq(self.i.terms[1])
@@ -398,25 +408,35 @@ class AddReduceSingle(Elaboratable):
         supported, except for by ``Signal.eq``.
     """
 
-    def __init__(self, n_inputs, output_width, n_parts, partition_points):
+    def __init__(self, n_inputs, output_width, n_parts, partition_points,
+                       partition_step=1):
         """Create an ``AddReduce``.
 
         :param inputs: input ``Signal``s to be summed.
         :param output_width: bit-width of ``output``.
         :param partition_points: the input partition points.
         """
+        self.partition_step = partition_step
         self.n_inputs = n_inputs
         self.n_parts = n_parts
         self.output_width = output_width
-        self.i = AddReduceData(partition_points, n_inputs,
-                               output_width, n_parts)
         self.partition_points = PartitionPoints(partition_points)
         if not self.partition_points.fits_in_width(output_width):
             raise ValueError("partition_points doesn't fit in output_width")
 
         self.groups = AddReduceSingle.full_adder_groups(n_inputs)
-        n_terms = AddReduceSingle.calc_n_inputs(n_inputs, self.groups)
-        self.o = AddReduceData(partition_points, n_terms, output_width, n_parts)
+        self.n_terms = AddReduceSingle.calc_n_inputs(n_inputs, self.groups)
+
+        self.i = self.ispec()
+        self.o = self.ospec()
+
+    def ispec(self):
+        return AddReduceData(self.partition_points, self.n_inputs,
+                             self.output_width, self.n_parts)
+
+    def ospec(self):
+        return AddReduceData(self.partition_points, self.n_terms,
+                             self.output_width, self.n_parts)
 
     @staticmethod
     def calc_n_inputs(n_inputs, groups):
@@ -500,7 +520,8 @@ class AddReduceSingle(Elaboratable):
         part_mask = Signal(self.output_width, reset_less=True)
 
         # get partition points as a mask
-        mask = self.i.part_pts.as_mask(self.output_width, mul=2)
+        mask = self.i.part_pts.as_mask(self.output_width,
+                                       mul=self.partition_step)
         m.d.comb += part_mask.eq(mask)
 
         # add and link the intermediate term modules
@@ -527,18 +548,19 @@ class AddReduceInternal:
         supported, except for by ``Signal.eq``.
     """
 
-    def __init__(self, inputs, output_width, partition_points,
-                       part_ops):
+    def __init__(self, i, output_width, partition_step=1):
         """Create an ``AddReduce``.
 
         :param inputs: input ``Signal``s to be summed.
         :param output_width: bit-width of ``output``.
         :param partition_points: the input partition points.
         """
-        self.inputs = inputs
-        self.part_ops = part_ops
+        self.i = i
+        self.inputs = i.terms
+        self.part_ops = i.part_ops
         self.output_width = output_width
-        self.partition_points = partition_points
+        self.partition_points = i.part_pts
+        self.partition_step = partition_step
 
         self.create_levels()
 
@@ -556,7 +578,8 @@ class AddReduceInternal:
             if len(groups) == 0:
                 break
             next_level = AddReduceSingle(ilen, self.output_width, n_parts,
-                                         partition_points)
+                                         partition_points,
+                                         self.partition_step)
             mods.append(next_level)
             partition_points = next_level.i.part_pts
             inputs = next_level.o.terms
@@ -564,7 +587,7 @@ class AddReduceInternal:
             part_ops = next_level.i.part_ops
 
         next_level = FinalAdd(ilen, self.output_width, n_parts,
-                              partition_points)
+                              partition_points, self.partition_step)
         mods.append(next_level)
 
         self.levels = mods
@@ -582,8 +605,8 @@ class AddReduce(AddReduceInternal, Elaboratable):
         supported, except for by ``Signal.eq``.
     """
 
-    def __init__(self, inputs, output_width, register_levels, partition_points,
-                       part_ops):
+    def __init__(self, inputs, output_width, register_levels, part_pts,
+                       part_ops, partition_step=1):
         """Create an ``AddReduce``.
 
         :param inputs: input ``Signal``s to be summed.
@@ -592,10 +615,14 @@ class AddReduce(AddReduceInternal, Elaboratable):
             pipeline registers.
         :param partition_points: the input partition points.
         """
-        AddReduceInternal.__init__(self, inputs, output_width,
-                                   partition_points, part_ops)
+        self._inputs = inputs
+        self._part_pts = part_pts
+        self._part_ops = part_ops
         n_parts = len(part_ops)
-        self.o = FinalReduceData(partition_points, output_width, n_parts)
+        self.i = AddReduceData(part_pts, len(inputs),
+                             output_width, n_parts)
+        AddReduceInternal.__init__(self, self.i, output_width, partition_step)
+        self.o = FinalReduceData(part_pts, output_width, n_parts)
         self.register_levels = register_levels
 
     @staticmethod
@@ -609,48 +636,16 @@ class AddReduce(AddReduceInternal, Elaboratable):
             if level > 0:
                 yield level - 1
 
-    def create_levels(self):
-        """creates reduction levels"""
-
-        mods = []
-        partition_points = self.partition_points
-        part_ops = self.part_ops
-        n_parts = len(part_ops)
-        inputs = self.inputs
-        ilen = len(inputs)
-        while True:
-            groups = AddReduceSingle.full_adder_groups(len(inputs))
-            if len(groups) == 0:
-                break
-            next_level = AddReduceSingle(ilen, self.output_width, n_parts,
-                                         partition_points)
-            mods.append(next_level)
-            partition_points = next_level.i.part_pts
-            inputs = next_level.o.terms
-            ilen = len(inputs)
-            part_ops = next_level.i.part_ops
-
-        next_level = FinalAdd(ilen, self.output_width, n_parts,
-                              partition_points)
-        mods.append(next_level)
-
-        self.levels = mods
-
     def elaborate(self, platform):
         """Elaborate this module."""
         m = Module()
 
+        m.d.comb += self.i.eq_from(self._part_pts, self._inputs, self._part_ops)
+
         for i, next_level in enumerate(self.levels):
             setattr(m.submodules, "next_level%d" % i, next_level)
 
-        partition_points = self.partition_points
-        inputs = self.inputs
-        part_ops = self.part_ops
-        n_parts = len(part_ops)
-        n_inputs = len(inputs)
-        output_width = self.output_width
-        i = AddReduceData(partition_points, n_inputs, output_width, n_parts)
-        m.d.comb += i.eq_from(partition_points, inputs, part_ops)
+        i = self.i
         for idx in range(len(self.levels)):
             mcur = self.levels[idx]
             if idx in self.register_levels:
@@ -987,11 +982,18 @@ class FinalOut(Elaboratable):
     """
     def __init__(self, output_width, n_parts, part_pts):
         self.part_pts = part_pts
-        self.i = IntermediateData(part_pts, output_width, n_parts)
+        self.output_width = output_width
+        self.n_parts = n_parts
         self.out_wid = output_width//2
-        # output
-        self.out = Signal(self.out_wid, reset_less=True)
-        self.intermediate_output = Signal(output_width, reset_less=True)
+
+        self.i = self.ispec()
+        self.o = self.ospec()
+
+    def ispec(self):
+        return IntermediateData(self.part_pts, self.output_width, self.n_parts)
+
+    def ospec(self):
+        return OutputData()
 
     def elaborate(self, platform):
         m = Module()
@@ -1045,8 +1047,11 @@ class FinalOut(Elaboratable):
                     Mux(d32[i // 4], i32.bit_select(i * 8, 8),
                                       i64.bit_select(i * 8, 8))))
             ol.append(op)
-        m.d.comb += self.out.eq(Cat(*ol))
-        m.d.comb += self.intermediate_output.eq(self.i.intermediate_output)
+
+        # create outputs
+        m.d.comb += self.o.output.eq(Cat(*ol))
+        m.d.comb += self.o.intermediate_output.eq(self.i.intermediate_output)
+
         return m
 
 
@@ -1138,6 +1143,17 @@ class InputData:
         return self.eq_from(rhs.part_pts, rhs.a, rhs.b, rhs.part_ops)
 
 
+class OutputData:
+
+    def __init__(self):
+        self.intermediate_output = Signal(128) # needed for unit tests
+        self.output = Signal(64)
+
+    def eq(self, rhs):
+        return [self.intermediate_output.eq(rhs.intermediate_output),
+                self.output.eq(rhs.output)]
+
+
 class AllTerms(Elaboratable):
     """Set of terms to be added together
     """
@@ -1151,13 +1167,20 @@ class AllTerms(Elaboratable):
             pipeline registers.
         :param partition_points: the input partition points.
         """
-        self.i = InputData()
         self.register_levels = register_levels
         self.n_inputs = n_inputs
         self.n_parts = n_parts
         self.output_width = output_width
-        self.o = AddReduceData(self.i.part_pts, n_inputs,
-                               output_width, n_parts)
+
+        self.i = self.ispec()
+        self.o = self.ospec()
+
+    def ispec(self):
+        return InputData()
+
+    def ospec(self):
+        return AddReduceData(self.i.part_pts, self.n_inputs,
+                             self.output_width, self.n_parts)
 
     def elaborate(self, platform):
         m = Module()
@@ -1242,9 +1265,19 @@ class Intermediates(Elaboratable):
     """ Intermediate output modules
     """
 
-    def __init__(self, output_width, n_parts, partition_points):
-        self.i = FinalReduceData(partition_points, output_width, n_parts)
-        self.o = IntermediateData(partition_points, output_width, n_parts)
+    def __init__(self, output_width, n_parts, part_pts):
+        self.part_pts = part_pts
+        self.output_width = output_width
+        self.n_parts = n_parts
+
+        self.i = self.ispec()
+        self.o = self.ospec()
+
+    def ispec(self):
+        return FinalReduceData(self.part_pts, self.output_width, self.n_parts)
+
+    def ospec(self):
+        return IntermediateData(self.part_pts, self.output_width, self.n_parts)
 
     def elaborate(self, platform):
         m = Module()
@@ -1324,18 +1357,24 @@ class Mul8_16_32_64(Elaboratable):
         # parameter(s)
         self.register_levels = list(register_levels)
 
+        self.i = self.ispec()
+        self.o = self.ospec()
+
         # inputs
-        self.i = InputData()
         self.part_pts = self.i.part_pts
         self.part_ops = self.i.part_ops
         self.a = self.i.a
         self.b = self.i.b
 
-        # intermediates (needed for unit tests)
-        self.intermediate_output = Signal(128)
-
         # output
-        self.output = Signal(64)
+        self.intermediate_output = self.o.intermediate_output
+        self.output = self.o.output
+
+    def ispec(self):
+        return InputData()
+
+    def ospec(self):
+        return OutputData()
 
     def elaborate(self, platform):
         m = Module()
@@ -1350,26 +1389,26 @@ class Mul8_16_32_64(Elaboratable):
 
         terms = t.o.terms
 
-        add_reduce = AddReduce(terms,
-                               128,
-                               self.register_levels,
-                               t.o.part_pts,
-                               t.o.part_ops)
+        at = AddReduceInternal(t.o, 128, partition_step=2)
 
-        out_part_ops = add_reduce.o.part_ops
-        out_part_pts = add_reduce.o.part_pts
-
-        m.submodules.add_reduce = add_reduce
+        i = at.i
+        for idx in range(len(at.levels)):
+            mcur = at.levels[idx]
+            setattr(m.submodules, "addreduce_%d" % idx, mcur)
+            if idx in self.register_levels:
+                m.d.sync += mcur.i.eq(i)
+            else:
+                m.d.comb += mcur.i.eq(i)
+            i = mcur.o # for next loop
 
         interm = Intermediates(128, 8, part_pts)
         m.submodules.intermediates = interm
-        m.d.comb += interm.i.eq(add_reduce.o)
+        m.d.comb += interm.i.eq(i)
 
         # final output
         m.submodules.finalout = finalout = FinalOut(128, 8, part_pts)
         m.d.comb += finalout.i.eq(interm.o)
-        m.d.comb += self.output.eq(finalout.out)
-        m.d.comb += self.intermediate_output.eq(finalout.intermediate_output)
+        m.d.comb += self.o.eq(finalout.o)
 
         return m