From 2ea660fc0b945032b7818bf740da907393636698 Mon Sep 17 00:00:00 2001 From: Luke Kenneth Casson Leighton Date: Wed, 21 Aug 2019 10:26:59 +0100 Subject: [PATCH 1/1] move input assignments (chain) out of AddReduceSingle --- src/ieee754/part_mul_add/multiply.py | 46 ++++++++++------------------ 1 file changed, 16 insertions(+), 30 deletions(-) diff --git a/src/ieee754/part_mul_add/multiply.py b/src/ieee754/part_mul_add/multiply.py index 6950a6d9..a1014a57 100644 --- a/src/ieee754/part_mul_add/multiply.py +++ b/src/ieee754/part_mul_add/multiply.py @@ -341,21 +341,6 @@ class FinalAdd(Elaboratable): """Elaborate this module.""" m = Module() - # resize inputs to correct bit-width and optionally add in - # pipeline registers - resized_input_assignments = [self._resized_inputs[i].eq(self.inputs[i]) - for i in range(len(self.inputs))] - copy_part_ops = [self.out_part_ops[i].eq(self.part_ops[i]) - for i in range(len(self.part_ops))] - if 0 in self.register_levels: - m.d.sync += copy_part_ops - m.d.sync += resized_input_assignments - m.d.sync += self._reg_partition_points.eq(self.partition_points) - else: - m.d.comb += copy_part_ops - m.d.comb += resized_input_assignments - m.d.comb += self._reg_partition_points.eq(self.partition_points) - if len(self.inputs) == 0: # use 0 as the default output value m.d.comb += self.output.eq(0) @@ -452,21 +437,6 @@ class AddReduceSingle(Elaboratable): """Elaborate this module.""" m = Module() - # resize inputs to correct bit-width and optionally add in - # pipeline registers - resized_input_assignments = [self._resized_inputs[i].eq(self.inputs[i]) - for i in range(len(self.inputs))] - copy_part_ops = [self.out_part_ops[i].eq(self.part_ops[i]) - for i in range(len(self.part_ops))] - if 0 in self.register_levels: - m.d.sync += copy_part_ops - m.d.sync += resized_input_assignments - m.d.sync += self._reg_partition_points.eq(self.partition_points) - else: - m.d.comb += copy_part_ops - m.d.comb += resized_input_assignments - m.d.comb += self._reg_partition_points.eq(self.partition_points) - for (value, term) in self._intermediate_terms: m.d.comb += term.eq(value) @@ -603,6 +573,22 @@ class AddReduce(Elaboratable): for i, next_level in enumerate(self.levels): setattr(m.submodules, "next_level%d" % i, next_level) + for i in range(len(self.levels)): + mcur = self.levels[i] + #mnext = self.levels[i+1] + inassign = [mcur._resized_inputs[i].eq(mcur.inputs[i]) + for i in range(len(mcur.inputs))] + copy_part_ops = [mcur.out_part_ops[i].eq(mcur.part_ops[i]) + for i in range(len(mcur.part_ops))] + if 0 in mcur.register_levels: + m.d.sync += copy_part_ops + m.d.sync += inassign + m.d.sync += mcur._reg_partition_points.eq(mcur.partition_points) + else: + m.d.comb += copy_part_ops + m.d.comb += inassign + m.d.comb += mcur._reg_partition_points.eq(mcur.partition_points) + # output comes from last module m.d.comb += self.output.eq(next_level.output) copy_part_ops = [self.out_part_ops[i].eq(next_level.out_part_ops[i]) -- 2.30.2